prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
import numpy as np
import os
from openvino.inference_engine import TensorDesc, Blob, IECore
from conftest import image_path, create_encoder
import ngraph as ng
path_to_image = image_path()
def test_init_with_tensor_desc():
tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC")
blob = Blob(tensor_desc)
assert isinstance(blob.buffer, np.ndarray)
assert blob.tensor_desc == tensor_desc
@pytest.mark.parametrize("shape, layout", [
([1, 3, 127, 127], "NCHW"),
([], "SCALAR"),
])
def test_init_with_numpy(shape, layout):
tensor_desc = TensorDesc("FP32", shape, layout)
array = np.ones(shape=shape, dtype=np.float32)
blob = Blob(tensor_desc, array)
assert isinstance(blob.buffer, np.ndarray)
assert np.shares_memory(blob.buffer, array)
assert blob.tensor_desc == tensor_desc
def test_get_tensor_desc():
tensor_desc = TensorDesc("FP32", [1, 127, 127, 3], "NHWC")
blob = Blob(tensor_desc)
assert blob.tensor_desc == tensor_desc
def test_get_buffer():
tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NCHW")
array = np.ones(shape=(1, 3, 127, 127), dtype=np.float32)
blob = Blob(tensor_desc, array)
assert np.array_equal(blob.buffer, array)
@pytest.mark.parametrize("precision, numpy_precision", [
("FP32", np.float32),
("FP64", np.float64),
("FP16", np.float16),
("I8", np.int8),
("U8", np.uint8),
("I32", np.int32),
("I16", np.int16),
("U16", np.uint16),
("I64", np.int64),
("BOOL", np.uint8),
("BIN", np.int8),
("BF16", np.float16),
])
def test_write_to_buffer(precision, numpy_precision):
tensor_desc = TensorDesc(precision, [1, 3, 127, 127], "NCHW")
array = np.zeros(shape=(1, 3, 127, 127), dtype=numpy_precision)
blob = Blob(tensor_desc, array)
ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=numpy_precision)
blob.buffer[:] = ones_arr
assert
|
np.array_equal(blob.buffer, ones_arr)
|
numpy.array_equal
|
import os
import gym
import gym
from gym import wrappers, logger
import gym_panda
from gym_panda.wrapper_env.wrapper import *
# import gym_circle_move
import numpy as np
import matplotlib.pyplot as plt
from stable_baselines import DDPG,PPO2,TRPO
from stable_baselines.common.policies import MlpPolicy
from stable_baselines import results_plotter
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
#from stable_baselines.common.noise import AdaptiveParamNoiseSpec
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.ddpg.policies import MlpPolicy as ddpg_MlpPolicy
from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec
from datetime import datetime
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.latest_path = os.path.join(log_dir, 'latest_model')
self.best_mean_reward = -np.inf
self.reward = []
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
if self.latest_path is not None:
os.makedirs(self.latest_path, exist_ok=True)
def _on_step(self) -> bool:
# print("h------------------------------------------------------g")
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print("Num timesteps: {}".format(self.num_timesteps))
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
if self.n_calls % 1e4 == 0:
self.model.save(self.latest_path)
return True
if __name__ == "__main__":
# make env
env_name = "feasibilitypanda-v0"
#env_name = "disabledpanda-v0"
env = gym.make(env_name)
#pdb.set_trace()
#env = SkipStepsWrapperVAE(env)
#env = infeasibleWrapper(env)
# Create log dir
#log_dir = "/iliad/u/yilunhao/logs/models/sb-trpo-joint-target-diffdynamics-{}/".format(datetime.now().strftime("%Y-%m-%d"))
log_dir = "../logs/models/sb-trpo-joint-target-diffdynamics-{}/".format(datetime.now().strftime("%Y-%m-%d"))
tensorboard_dir = "../logs/logs"
os.makedirs(tensorboard_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
env = Monitor(env, log_dir)
env.reset()
# print(env.n)
n_actions = env.action_space.shape[-1]
param_noise = AdaptiveParamNoiseSpec(initial_stddev=0.01, desired_action_stddev=0.01)
action_noise = OrnsteinUhlenbeckActionNoise(mean=
|
np.zeros(n_actions)
|
numpy.zeros
|
from abc import abstractmethod, ABC
from typing import Optional, Tuple, Any, cast, Union, Sequence
import PIL
import gym
import numpy as np
from torchvision import transforms
from allenact.base_abstractions.misc import EnvType
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import SubTaskType
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact.utils.tensor_utils import ScaleBothSides
class VisionSensor(Sensor[EnvType, SubTaskType]):
def __init__(
self,
mean: Optional[np.ndarray] = None,
stdev: Optional[np.ndarray] = None,
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "vision",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: Optional[int] = None,
unnormalized_infimum: float = -np.inf,
unnormalized_supremum: float = np.inf,
scale_first: bool = True,
**kwargs: Any
):
"""Initializer.
# Parameters
mean : The images will be normalized with the given mean
stdev : The images will be normalized with the given standard deviations.
height : If it's a non-negative integer and `width` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
width : If it's a non-negative integer and `height` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
uuid : The universally unique identifier for the sensor.
output_shape : Optional observation space shape (alternative to `output_channels`).
output_channels : Optional observation space number of channels (alternative to `output_shape`).
unnormalized_infimum : Lower limit(s) for the observation space range.
unnormalized_supremum : Upper limit(s) for the observation space range.
scale_first : Whether to scale image before normalization (if needed).
kwargs : Extra kwargs. Currently unused.
"""
self._norm_means = mean
self._norm_sds = stdev
assert (self._norm_means is None) == (self._norm_sds is None), (
"In VisionSensor's config, "
"either both mean/stdev must be None or neither."
)
self._should_normalize = self._norm_means is not None
self._height = height
self._width = width
assert (self._width is None) == (self._height is None), (
"In VisionSensor's config, "
"either both height/width must be None or neither."
)
self._scale_first = scale_first
self.scaler: Optional[ScaleBothSides] = None
if self._width is not None:
self.scaler = ScaleBothSides(
width=cast(int, self._width), height=cast(int, self._height)
)
self.to_pil = transforms.ToPILImage() # assumes mode="RGB" for 3 channels
self._observation_space = self._make_observation_space(
output_shape=output_shape,
output_channels=output_channels,
unnormalized_infimum=unnormalized_infimum,
unnormalized_supremum=unnormalized_supremum,
)
assert int(PIL.__version__.split(".")[0]) != 7, (
"We found that Pillow version >=7.* has broken scaling,"
" please downgrade to version 6.2.1 or upgrade to >=8.0.0"
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _make_observation_space(
self,
output_shape: Optional[Tuple[int, ...]],
output_channels: Optional[int],
unnormalized_infimum: float,
unnormalized_supremum: float,
) -> gym.spaces.Box:
assert output_shape is None or output_channels is None, (
"In VisionSensor's config, "
"only one of output_shape and output_channels can be not None."
)
shape: Optional[Tuple[int, ...]] = None
if output_shape is not None:
shape = output_shape
elif self._height is not None and output_channels is not None:
shape = (
cast(int, self._height),
cast(int, self._width),
cast(int, output_channels),
)
if not self._should_normalize or shape is None or len(shape) == 1:
return gym.spaces.Box(
low=np.float32(unnormalized_infimum),
high=np.float32(unnormalized_supremum),
shape=shape,
)
else:
out_shape = shape[:-1] + (1,)
low = np.tile(
(unnormalized_infimum - cast(np.ndarray, self._norm_means))
/ cast(np.ndarray, self._norm_sds),
out_shape,
)
high = np.tile(
(unnormalized_supremum - cast(np.ndarray, self._norm_means))
/ cast(np.ndarray, self._norm_sds),
out_shape,
)
return gym.spaces.Box(low=np.float32(low), high=np.float32(high))
def _get_observation_space(self):
return self._observation_space
@property
def height(self) -> Optional[int]:
"""Height that input image will be rescale to have.
# Returns
The height as a non-negative integer or `None` if no rescaling is done.
"""
return self._height
@property
def width(self) -> Optional[int]:
"""Width that input image will be rescale to have.
# Returns
The width as a non-negative integer or `None` if no rescaling is done.
"""
return self._width
@abstractmethod
def frame_from_env(self, env: EnvType, task: Optional[SubTaskType]) -> np.ndarray:
raise NotImplementedError
def get_observation(
self, env: EnvType, task: Optional[SubTaskType], *args: Any, **kwargs: Any
) -> Any:
im = self.frame_from_env(env=env, task=task)
assert (
im.dtype == np.float32 and (len(im.shape) == 2 or im.shape[-1] == 1)
) or (im.shape[-1] == 3 and im.dtype == np.uint8), (
"Input frame must either have 3 channels and be of"
" type np.uint8 or have one channel and be of type np.float32"
)
if self._scale_first:
if self.scaler is not None and im.shape[:2] != (self._height, self._width):
im = np.array(self.scaler(self.to_pil(im)), dtype=im.dtype) # hwc
assert im.dtype in [np.uint8, np.float32]
if im.dtype == np.uint8:
im = im.astype(np.float32) / 255.0
if self._should_normalize:
im -= self._norm_means
im /= self._norm_sds
if not self._scale_first:
if self.scaler is not None and im.shape[:2] != (self._height, self._width):
im = np.array(self.scaler(self.to_pil(im)), dtype=np.float32) # hwc
return im
class RGBSensor(VisionSensor[EnvType, SubTaskType], ABC):
def __init__(
self,
use_resnet_normalization: bool = False,
mean: Optional[Union[np.ndarray, Sequence[float]]] = (0.485, 0.456, 0.406),
stdev: Optional[Union[np.ndarray, Sequence[float]]] = (0.229, 0.224, 0.225),
height: Optional[int] = None,
width: Optional[int] = None,
uuid: str = "rgb",
output_shape: Optional[Tuple[int, ...]] = None,
output_channels: int = 3,
unnormalized_infimum: float = 0.0,
unnormalized_supremum: float = 1.0,
scale_first: bool = True,
**kwargs: Any
):
"""Initializer.
# Parameters
use_resnet_normalization : Whether to apply image normalization with the given `mean` and `stdev`.
mean : The images will be normalized with the given mean if `use_resnet_normalization` is True (default
`[0.485, 0.456, 0.406]`, i.e. the standard resnet normalization mean).
stdev : The images will be normalized with the given standard deviation if `use_resnet_normalization` is True
(default `[0.229, 0.224, 0.225]`, i.e. the standard resnet normalization standard deviation).
height: If it's a non-negative integer and `width` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
width: If it's a non-negative integer and `height` is also non-negative integer, the image returned from the
environment will be rescaled to have `height` rows and `width` columns using bilinear sampling.
uuid: The universally unique identifier for the sensor.
output_shape: Optional observation space shape (alternative to `output_channels`).
output_channels: Optional observation space number of channels (alternative to `output_shape`).
unnormalized_infimum: Lower limit(s) for the observation space range.
unnormalized_supremum: Upper limit(s) for the observation space range.
scale_first: Whether to scale image before normalization (if needed).
kwargs : Extra kwargs. Currently unused.
"""
if not use_resnet_normalization:
mean, stdev = None, None
if isinstance(mean, tuple):
mean = np.array(mean, dtype=np.float32).reshape(1, 1, len(mean))
if isinstance(stdev, tuple):
stdev =
|
np.array(stdev, dtype=np.float32)
|
numpy.array
|
from math import ceil
from moviepy.editor import VideoFileClip, concatenate_videoclips
from moviepy.video.fx.all import speedx
from moviepy.audio.fx.all import volumex
import numpy as np
class Clip:
def __init__(self, clip_path):
self.clip = VideoFileClip(clip_path)
self.audio = Audio(self.clip.audio)
def get_duration(self):
return self.clip.duration
def jumpcut(self, magnitude_threshold_ratio, duration_threshold_in_seconds,
failure_tolerance_ratio, space_on_edges, silence_part_speed,
min_loud_part_duration):
intervals_to_cut = self.audio.get_intervals_to_cut(magnitude_threshold_ratio,
duration_threshold_in_seconds,
failure_tolerance_ratio,
space_on_edges)
jumpcutted_clips = []
previous_stop = 0
for start, stop in intervals_to_cut:
clip_before = self.clip.subclip(previous_stop, start)
if clip_before.duration > min_loud_part_duration:
jumpcutted_clips.append(clip_before)
if silence_part_speed is not None:
silence_clip = self.clip.subclip(start, stop)
silence_clip = speedx(silence_clip, silence_part_speed).without_audio()
jumpcutted_clips.append(silence_clip)
previous_stop = stop
last_clip = self.clip.subclip(stop, self.clip.duration)
jumpcutted_clips.append(last_clip)
return concatenate_videoclips(jumpcutted_clips), intervals_to_cut
class Audio:
def __init__(self, audio):
self.audio = audio
self.fps = audio.fps
self.signal = self.audio.to_soundarray()
if len(self.signal.shape) == 1:
self.signal = self.signal.reshape(-1, 1)
def get_intervals_to_cut(self, magnitude_threshold_ratio, duration_threshold_in_seconds,
failure_tolerance_ratio, space_on_edges):
min_magnitude = min(abs(np.min(self.signal)),
|
np.max(self.signal)
|
numpy.max
|
"""
Make sure the isochronal likelihood function looks the same as stardate with
isochrones only
"""
import emcee
import numpy as np
from stardate.lhf import lnprob, lnlike
import stardate as sd
import time
from isochrones import SingleStarModel, get_ichrone
bands = ["B", "V", "J", "H", "K", "BP", "RP"]
mist = get_ichrone("mist", bands=bands)
def iso_lnprob(params, *args):
linparams = params*1
linparams[3] = np.exp(linparams[3])
mod, iso_params = args
like, prior = mod.lnlike(linparams), mod.lnprior(linparams)
prob = like + prior
if not np.isfinite(prob):
prob = -np.inf
return prob
def test_iso_lnlike():
iso_params = {"teff": (5770, 10),
"feh": (0, .01),
"logg": (4.44, .1),
"parallax": (1, 1)}
mod = SingleStarModel(mist, **iso_params) # StarModel isochrones obj
params = [354, np.log10(4.56*1e9), 0., 1000, 0.]
lnparams = [354, np.log10(4.56*1e9), 0., np.log(1000), 0.]
lnpr = mod.lnprior(params)
# lnparams = [350, 9, 0., 6, 0.]
args1 = [mod, iso_params]
# Calculate the lnprob above
iso_lp = iso_lnprob(lnparams, *args1)
start = time.time()
for i in range(100):
iso_lp = iso_lnprob(lnparams, *args1)
end = time.time()
# Calculate the stardate lnprob
args2 = [mod, None, None, True, False, True, "praesepe"]
start = time.time()
for i in range(100):
lp = lnprob(lnparams, *args2)[0]
end = time.time()
# print("time = ", end - start)
ll = lnlike(lnparams, *args2)
lnprior = lp - ll
assert iso_lp == lp
assert np.isclose(iso_lp - lnpr, ll)
assert lnpr == lnprior
# THIS TEST
np.random.seed(42)
nwalkers, ndim, nsteps = 50, 5, 100
p0 = [np.random.randn(ndim)*1e-4 + lnparams for j in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, iso_lnprob, args=args1);
start = time.time()
sampler.run_mcmc(p0, nsteps);
end = time.time()
samples = np.reshape(sampler.chain, (nwalkers*nsteps, ndim))
test_median =
|
np.median(samples[:, 1])
|
numpy.median
|
"""Pollinator service model for InVEST."""
import itertools
import collections
import re
import os
import logging
import hashlib
import inspect
from osgeo import gdal
from osgeo import ogr
import pygeoprocessing
import numpy
import taskgraph
from . import utils
from . import validation
from . import MODEL_METADATA
LOGGER = logging.getLogger(__name__)
ARGS_SPEC = {
"model_name": MODEL_METADATA["pollination"].model_title,
"pyname": MODEL_METADATA["pollination"].pyname,
"userguide_html": MODEL_METADATA["pollination"].userguide,
"args": {
"workspace_dir": validation.WORKSPACE_SPEC,
"results_suffix": validation.SUFFIX_SPEC,
"n_workers": validation.N_WORKERS_SPEC,
"landcover_raster_path": {
"type": "raster",
"required": True,
"validation_options": {
"projected": True,
},
"about": (
"This is the landcover map that's used to map biophysical "
"properties about habitat and floral resources of landcover "
"types to a spatial layout."),
"name": "Land Cover Map"
},
"guild_table_path": {
"validation_options": {
"required_fields": ["species", "alpha", "relative_abundance"],
},
"type": "csv",
"required": True,
"about": (
"A table indicating the bee species to analyze in this model "
"run. Table headers must include:<br/>* 'species': a bee "
"species whose column string names will be referred to in "
"other tables and the model will output analyses per species."
"<br/> * any number of columns matching "
"_NESTING_SUITABILITY_PATTERN with values in the range "
"[0.0, 1.0] indicating the suitability of the given species "
"to nest in a particular substrate.<br/>* any number of "
"_FORAGING_ACTIVITY_PATTERN columns with values in the range "
"[0.0, 1.0] indicating the relative level of foraging "
"activity for that species during a particular season."
"<br/>* 'alpha': the sigma average flight distance of that "
"bee species in meters.<br/>* 'relative_abundance': a weight "
"indicating the relative abundance of the particular species "
"with respect to the sum of all relative abundance weights "
"in the table."),
"name": "Guild Table"
},
"landcover_biophysical_table_path": {
"validation_options": {
"required_fields": ["lucode"],
},
"type": "csv",
"required": True,
"about": (
"A CSV table mapping landcover codes in the landcover raster "
"to indexes of nesting availability for each nesting "
"substrate referenced in guilds table as well as indexes of "
"abundance of floral resources on that landcover type per "
"season in the bee activity columns of the guild table."
"<br/>All indexes are in the range [0.0, 1.0].<br/>Columns "
"in the table must be at least<br/>* 'lucode': representing "
"all the unique landcover codes in the raster st "
"`args['landcover_path']`<br/>* For every nesting matching "
"_NESTING_SUITABILITY_PATTERN in the guild stable, a column "
"matching the pattern in `_LANDCOVER_NESTING_INDEX_HEADER`."
"<br/>* For every season matching _FORAGING_ACTIVITY_PATTERN "
"in the guilds table, a column matching the pattern in "
"`_LANDCOVER_FLORAL_RESOURCES_INDEX_HEADER`."),
"name": "Land Cover Biophysical Table"
},
"farm_vector_path": {
"validation_options": {
"required_fields": ["crop_type", "half_sat", "season", "p_dep",
"p_managed"],
},
"type": "vector",
"required": False,
"about": (
"This is a layer of polygons representing farm sites to be "
"analyzed. The vector must have at least the following "
"fields:<br/><br/>* season (string): season in which the "
"farm needs pollination.<br/>* half_sat (float): a real in "
"the range [0.0, 1.0] representing the proportion of wild "
"pollinators to achieve a 50% yield of that crop.<br/>* "
"p_wild_dep (float): a number in the range [0.0, 1.0] "
"representing the proportion of yield dependent on "
"pollinators.<br/>* p_managed (float): proportion of "
"pollinators that come from non-native/managed hives.<br/>"
"* f_[season] (float): any number of fields that match this "
"pattern such that `season` also matches the season headers "
"in the biophysical and guild table. Any areas that overlap "
"the landcover map will replace seasonal floral resources "
"with this value. Ranges from 0..1.<br/>* n_[substrate] "
"(float): any number of fields that match this pattern such "
"that `substrate` also matches the nesting substrate headers "
"in the biophysical and guild table. Any areas that "
"overlap the landcover map will replace nesting substrate "
"suitability with this value. Ranges from 0..1."),
"name": "Farm Vector"
}
}
}
_INDEX_NODATA = -1.0
# These patterns are expected in the biophysical table
_NESTING_SUBSTRATE_PATTERN = 'nesting_([^_]+)_availability_index'
_FLORAL_RESOURCES_AVAILABLE_PATTERN = 'floral_resources_([^_]+)_index'
_EXPECTED_BIOPHYSICAL_HEADERS = [
'lucode', _NESTING_SUBSTRATE_PATTERN, _FLORAL_RESOURCES_AVAILABLE_PATTERN]
# These are patterns expected in the guilds table
_NESTING_SUITABILITY_PATTERN = 'nesting_suitability_([^_]+)_index'
# replace with season
_FORAGING_ACTIVITY_PATTERN = 'foraging_activity_%s_index'
_FORAGING_ACTIVITY_RE_PATTERN = _FORAGING_ACTIVITY_PATTERN % '([^_]+)'
_RELATIVE_SPECIES_ABUNDANCE_FIELD = 'relative_abundance'
_ALPHA_HEADER = 'alpha'
_EXPECTED_GUILD_HEADERS = [
'species', _NESTING_SUITABILITY_PATTERN, _FORAGING_ACTIVITY_RE_PATTERN,
_ALPHA_HEADER, _RELATIVE_SPECIES_ABUNDANCE_FIELD]
_NESTING_SUBSTRATE_INDEX_FILEPATTERN = 'nesting_substrate_index_%s%s.tif'
# this is used if there is a farm polygon present
_FARM_NESTING_SUBSTRATE_INDEX_FILEPATTERN = (
'farm_nesting_substrate_index_%s%s.tif')
# replaced by (species, file_suffix)
_HABITAT_NESTING_INDEX_FILE_PATTERN = 'habitat_nesting_index_%s%s.tif'
# replaced by (season, file_suffix)
_RELATIVE_FLORAL_ABUNDANCE_INDEX_FILE_PATTERN = (
'relative_floral_abundance_index_%s%s.tif')
# this is used if there's a farm polygon present
_FARM_RELATIVE_FLORAL_ABUNDANCE_INDEX_FILE_PATTERN = (
'farm_relative_floral_abundance_index_%s%s.tif')
# used as an intermediate step for floral resources calculation
# replace (species, file_suffix)
_LOCAL_FORAGING_EFFECTIVENESS_FILE_PATTERN = (
'local_foraging_effectiveness_%s%s.tif')
# for intermediate output of floral resources replace (species, file_suffix)
_FLORAL_RESOURCES_INDEX_FILE_PATTERN = (
'floral_resources_%s%s.tif')
# pollinator supply raster replace (species, file_suffix)
_POLLINATOR_SUPPLY_FILE_PATTERN = 'pollinator_supply_%s%s.tif'
# name of reprojected farm vector replace (file_suffix)
_PROJECTED_FARM_VECTOR_FILE_PATTERN = 'reprojected_farm_vector%s.shp'
# used to store the 2D decay kernel for a given distance replace
# (alpha, file suffix)
_KERNEL_FILE_PATTERN = 'kernel_%f%s.tif'
# PA(x,s,j) replace (species, season, file_suffix)
_POLLINATOR_ABUNDANCE_FILE_PATTERN = 'pollinator_abundance_%s_%s%s.tif'
# PAT(x,j) total pollinator abundance per season replace (season, file_suffix)
_TOTAL_POLLINATOR_ABUNDANCE_FILE_PATTERN = (
'total_pollinator_abundance_%s%s.tif')
# used for RA(l(x),j)*fa(s,j) replace (species, season, file_suffix)
_FORAGED_FLOWERS_INDEX_FILE_PATTERN = (
'foraged_flowers_index_%s_%s%s.tif')
# used for convolving PS over alpha s replace (species, file_suffix)
_CONVOLVE_PS_FILE_PATH = 'convolve_ps_%s%s.tif'
# half saturation raster replace (season, file_suffix)
_HALF_SATURATION_FILE_PATTERN = 'half_saturation_%s%s.tif'
# blank raster as a basis to rasterize on replace (file_suffix)
_BLANK_RASTER_FILE_PATTERN = 'blank_raster%s.tif'
# raster to hold seasonal farm pollinator replace (season, file_suffix)
_FARM_POLLINATOR_SEASON_FILE_PATTERN = 'farm_pollinator_%s%s.tif'
# total farm pollinators replace (file_suffix)
_FARM_POLLINATOR_FILE_PATTERN = 'farm_pollinators%s.tif'
# managed pollinator indexes replace (file_suffix)
_MANAGED_POLLINATOR_FILE_PATTERN = 'managed_pollinators%s.tif'
# total pollinator raster replace (file_suffix)
_TOTAL_POLLINATOR_YIELD_FILE_PATTERN = 'total_pollinator_yield%s.tif'
# wild pollinator raster replace (file_suffix)
_WILD_POLLINATOR_YIELD_FILE_PATTERN = 'wild_pollinator_yield%s.tif'
# final aggregate farm shapefile file pattern replace (file_suffix)
_FARM_VECTOR_RESULT_FILE_PATTERN = 'farm_results%s.shp'
# output field on target shapefile if farms are enabled
_TOTAL_FARM_YIELD_FIELD_ID = 'y_tot'
# output field for wild pollinators on farms if farms are enabled
_WILD_POLLINATOR_FARM_YIELD_FIELD_ID = 'y_wild'
# output field for proportion of wild pollinators over the pollinator
# dependent part of the yield
_POLLINATOR_PROPORTION_FARM_YIELD_FIELD_ID = 'pdep_y_w'
# output field for pollinator abundance on farm for the season of pollination
_POLLINATOR_ABUNDANCE_FARM_FIELD_ID = 'p_abund'
# expected pattern for seasonal floral resources in input shapefile (season)
_FARM_FLORAL_RESOURCES_HEADER_PATTERN = 'fr_%s'
# regular expression version of _FARM_FLORAL_RESOURCES_PATTERN
_FARM_FLORAL_RESOURCES_PATTERN = (
_FARM_FLORAL_RESOURCES_HEADER_PATTERN % '([^_]+)')
# expected pattern for nesting substrate in input shapfile (substrate)
_FARM_NESTING_SUBSTRATE_HEADER_PATTERN = 'n_%s'
# regular expression version of _FARM_NESTING_SUBSTRATE_HEADER_PATTERN
_FARM_NESTING_SUBSTRATE_RE_PATTERN = (
_FARM_NESTING_SUBSTRATE_HEADER_PATTERN % '([^_]+)')
_HALF_SATURATION_FARM_HEADER = 'half_sat'
_CROP_POLLINATOR_DEPENDENCE_FIELD = 'p_dep'
_MANAGED_POLLINATORS_FIELD = 'p_managed'
_FARM_SEASON_FIELD = 'season'
_EXPECTED_FARM_HEADERS = [
_FARM_SEASON_FIELD, 'crop_type', _HALF_SATURATION_FARM_HEADER,
_MANAGED_POLLINATORS_FIELD, _FARM_FLORAL_RESOURCES_PATTERN,
_FARM_NESTING_SUBSTRATE_RE_PATTERN, _CROP_POLLINATOR_DEPENDENCE_FIELD]
def execute(args):
"""Pollination.
Args:
args['workspace_dir'] (string): a path to the output workspace folder.
Will overwrite any files that exist if the path already exists.
args['results_suffix'] (string): string appended to each output
file path.
args['landcover_raster_path'] (string): file path to a landcover
raster.
args['guild_table_path'] (string): file path to a table indicating
the bee species to analyze in this model run. Table headers
must include:
* 'species': a bee species whose column string names will
be referred to in other tables and the model will output
analyses per species.
* one or more columns matching _NESTING_SUITABILITY_PATTERN
with values in the range [0.0, 1.0] indicating the
suitability of the given species to nest in a particular
substrate.
* one or more columns matching _FORAGING_ACTIVITY_RE_PATTERN
with values in the range [0.0, 1.0] indicating the
relative level of foraging activity for that species
during a particular season.
* _ALPHA_HEADER the sigma average flight distance of that bee
species in meters.
* 'relative_abundance': a weight indicating the relative
abundance of the particular species with respect to the
sum of all relative abundance weights in the table.
args['landcover_biophysical_table_path'] (string): path to a table
mapping landcover codes in `args['landcover_path']` to indexes of
nesting availability for each nesting substrate referenced in
guilds table as well as indexes of abundance of floral resources
on that landcover type per season in the bee activity columns of
the guild table.
All indexes are in the range [0.0, 1.0].
Columns in the table must be at least
* 'lucode': representing all the unique landcover codes in
the raster ast `args['landcover_path']`
* For every nesting matching _NESTING_SUITABILITY_PATTERN
in the guild stable, a column matching the pattern in
`_LANDCOVER_NESTING_INDEX_HEADER`.
* For every season matching _FORAGING_ACTIVITY_RE_PATTERN
in the guilds table, a column matching
the pattern in `_LANDCOVER_FLORAL_RESOURCES_INDEX_HEADER`.
args['farm_vector_path'] (string): (optional) path to a single layer
polygon shapefile representing farms. If present will trigger the
farm yield component of the model.
The layer must have at least the following fields:
* season (string): season in which the farm needs pollination
* crop_type (string): a text field to identify the crop type for
summary statistics.
* half_sat (float): a real in the range [0.0, 1.0] representing
the proportion of wild pollinators to achieve a 50% yield
of that crop.
* p_dep (float): a number in the range [0.0, 1.0]
representing the proportion of yield dependent on pollinators.
* p_managed (float): proportion of pollinators that come from
non-native/managed hives.
* fr_[season] (float): one or more fields that match this pattern
such that `season` also matches the season headers in the
biophysical and guild table. Any areas that overlap the
landcover map will replace seasonal floral resources with
this value. Ranges from 0..1.
* n_[substrate] (float): One or more fields that match this
pattern such that `substrate` also matches the nesting
substrate headers in the biophysical and guild table. Any
areas that overlap the landcover map will replace nesting
substrate suitability with this value. Ranges from 0..1.
args['n_workers'] (int): (optional) The number of worker processes to
use for processing this model. If omitted, computation will take
place in the current process.
Returns:
None
"""
# create initial working directories and determine file suffixes
intermediate_output_dir = os.path.join(
args['workspace_dir'], 'intermediate_outputs')
work_token_dir = os.path.join(
intermediate_output_dir, '_taskgraph_working_dir')
output_dir = os.path.join(args['workspace_dir'])
utils.make_directories(
[output_dir, intermediate_output_dir])
file_suffix = utils.make_suffix_string(args, 'results_suffix')
if 'farm_vector_path' in args and args['farm_vector_path'] != '':
# we set the vector path to be the projected vector that we'll create
# later
farm_vector_path = os.path.join(
intermediate_output_dir,
_PROJECTED_FARM_VECTOR_FILE_PATTERN % file_suffix)
else:
farm_vector_path = None
# parse out the scenario variables from a complicated set of two tables
# and possibly a farm polygon. This function will also raise an exception
# if any of the inputs are malformed.
scenario_variables = _parse_scenario_variables(args)
landcover_raster_info = pygeoprocessing.get_raster_info(
args['landcover_raster_path'])
try:
n_workers = int(args['n_workers'])
except (KeyError, ValueError, TypeError):
# KeyError when n_workers is not present in args
# ValueError when n_workers is an empty string.
# TypeError when n_workers is None.
n_workers = -1 # Synchronous mode.
task_graph = taskgraph.TaskGraph(work_token_dir, n_workers)
if farm_vector_path is not None:
# ensure farm vector is in the same projection as the landcover map
reproject_farm_task = task_graph.add_task(
task_name='reproject_farm_task',
func=pygeoprocessing.reproject_vector,
args=(
args['farm_vector_path'],
landcover_raster_info['projection_wkt'], farm_vector_path),
target_path_list=[farm_vector_path])
# calculate nesting_substrate_index[substrate] substrate maps
# N(x, n) = ln(l(x), n)
scenario_variables['nesting_substrate_index_path'] = {}
landcover_substrate_index_tasks = {}
reclass_error_details = {
'raster_name': 'LULC', 'column_name': 'lucode',
'table_name': 'Biophysical'}
for substrate in scenario_variables['substrate_list']:
nesting_substrate_index_path = os.path.join(
intermediate_output_dir,
_NESTING_SUBSTRATE_INDEX_FILEPATTERN % (substrate, file_suffix))
scenario_variables['nesting_substrate_index_path'][substrate] = (
nesting_substrate_index_path)
landcover_substrate_index_tasks[substrate] = task_graph.add_task(
task_name='reclassify_to_substrate_%s' % substrate,
func=utils.reclassify_raster,
args=(
(args['landcover_raster_path'], 1),
scenario_variables['landcover_substrate_index'][substrate],
nesting_substrate_index_path, gdal.GDT_Float32,
_INDEX_NODATA, reclass_error_details),
target_path_list=[nesting_substrate_index_path])
# calculate farm_nesting_substrate_index[substrate] substrate maps
# dependent on farm substrate rasterized over N(x, n)
if farm_vector_path is not None:
scenario_variables['farm_nesting_substrate_index_path'] = (
collections.defaultdict(dict))
farm_substrate_rasterize_task_list = []
for substrate in scenario_variables['substrate_list']:
farm_substrate_id = (
_FARM_NESTING_SUBSTRATE_HEADER_PATTERN % substrate)
farm_nesting_substrate_index_path = os.path.join(
intermediate_output_dir,
_FARM_NESTING_SUBSTRATE_INDEX_FILEPATTERN % (
substrate, file_suffix))
scenario_variables['farm_nesting_substrate_index_path'][
substrate] = farm_nesting_substrate_index_path
farm_substrate_rasterize_task_list.append(
task_graph.add_task(
task_name='rasterize_nesting_substrate_%s' % substrate,
func=_rasterize_vector_onto_base,
args=(
scenario_variables['nesting_substrate_index_path'][
substrate],
farm_vector_path, farm_substrate_id,
farm_nesting_substrate_index_path),
target_path_list=[farm_nesting_substrate_index_path],
dependent_task_list=[
landcover_substrate_index_tasks[substrate],
reproject_farm_task]))
habitat_nesting_tasks = {}
scenario_variables['habitat_nesting_index_path'] = {}
for species in scenario_variables['species_list']:
# calculate habitat_nesting_index[species] HN(x, s) = max_n(N(x, n) ns(s,n))
if farm_vector_path is not None:
dependent_task_list = farm_substrate_rasterize_task_list
substrate_path_map = scenario_variables[
'farm_nesting_substrate_index_path']
else:
dependent_task_list = landcover_substrate_index_tasks.values()
substrate_path_map = scenario_variables[
'nesting_substrate_index_path']
scenario_variables['habitat_nesting_index_path'][species] = (
os.path.join(
intermediate_output_dir,
_HABITAT_NESTING_INDEX_FILE_PATTERN % (species, file_suffix)))
calculate_habitat_nesting_index_op = _CalculateHabitatNestingIndex(
substrate_path_map,
scenario_variables['species_substrate_index'][species],
scenario_variables['habitat_nesting_index_path'][species])
habitat_nesting_tasks[species] = task_graph.add_task(
task_name='calculate_habitat_nesting_%s' % species,
func=calculate_habitat_nesting_index_op,
dependent_task_list=dependent_task_list,
target_path_list=[
scenario_variables['habitat_nesting_index_path'][species]])
scenario_variables['relative_floral_abundance_index_path'] = {}
relative_floral_abudance_task_map = {}
reclass_error_details = {
'raster_name': 'LULC', 'column_name': 'lucode',
'table_name': 'Biophysical'}
for season in scenario_variables['season_list']:
# calculate relative_floral_abundance_index[season] per season
# RA(l(x), j)
relative_floral_abundance_index_path = os.path.join(
intermediate_output_dir,
_RELATIVE_FLORAL_ABUNDANCE_INDEX_FILE_PATTERN % (
season, file_suffix))
relative_floral_abudance_task = task_graph.add_task(
task_name='reclassify_to_floral_abundance_%s' % season,
func=utils.reclassify_raster,
args=(
(args['landcover_raster_path'], 1),
scenario_variables['landcover_floral_resources'][season],
relative_floral_abundance_index_path, gdal.GDT_Float32,
_INDEX_NODATA, reclass_error_details),
target_path_list=[relative_floral_abundance_index_path])
# if there's a farm, rasterize floral resources over the top
if farm_vector_path is not None:
farm_relative_floral_abundance_index_path = os.path.join(
intermediate_output_dir,
_FARM_RELATIVE_FLORAL_ABUNDANCE_INDEX_FILE_PATTERN % (
season, file_suffix))
# this is the shapefile header for the farm seasonal floral
# resources
farm_floral_resources_id = (
_FARM_FLORAL_RESOURCES_HEADER_PATTERN % season)
# override the relative floral task because we'll need this one
relative_floral_abudance_task = task_graph.add_task(
task_name='relative_floral_abudance_task_%s' % season,
func=_rasterize_vector_onto_base,
args=(
relative_floral_abundance_index_path,
farm_vector_path, farm_floral_resources_id,
farm_relative_floral_abundance_index_path),
target_path_list=[
farm_relative_floral_abundance_index_path],
dependent_task_list=[
relative_floral_abudance_task, reproject_farm_task])
# override the relative floral abundance index path since we'll
# need the farm one
relative_floral_abundance_index_path = (
farm_relative_floral_abundance_index_path)
scenario_variables['relative_floral_abundance_index_path'][season] = (
relative_floral_abundance_index_path)
relative_floral_abudance_task_map[season] = (
relative_floral_abudance_task)
scenario_variables['foraged_flowers_index_path'] = {}
foraged_flowers_index_task_map = {}
for species in scenario_variables['species_list']:
for season in scenario_variables['season_list']:
# calculate foraged_flowers_species_season = RA(l(x),j)*fa(s,j)
foraged_flowers_index_path = os.path.join(
intermediate_output_dir,
_FORAGED_FLOWERS_INDEX_FILE_PATTERN % (
species, season, file_suffix))
relative_abundance_path = (
scenario_variables['relative_floral_abundance_index_path'][
season])
mult_by_scalar_op = _MultByScalar(
scenario_variables['species_foraging_activity'][
(species, season)])
foraged_flowers_index_task_map[(species, season)] = (
task_graph.add_task(
task_name='calculate_foraged_flowers_%s_%s' % (
species, season),
func=pygeoprocessing.raster_calculator,
args=(
[(relative_abundance_path, 1)],
mult_by_scalar_op, foraged_flowers_index_path,
gdal.GDT_Float32, _INDEX_NODATA),
dependent_task_list=[
relative_floral_abudance_task_map[season]],
target_path_list=[foraged_flowers_index_path]))
scenario_variables['foraged_flowers_index_path'][
(species, season)] = foraged_flowers_index_path
pollinator_abundance_path_map = {}
pollinator_abundance_task_map = {}
floral_resources_index_path_map = {}
floral_resources_index_task_map = {}
for species in scenario_variables['species_list']:
# calculate foraging_effectiveness[species]
# FE(x, s) = sum_j [RA(l(x), j) * fa(s, j)]
foraged_flowers_path_band_list = [
(scenario_variables['foraged_flowers_index_path'][
(species, season)], 1)
for season in scenario_variables['season_list']]
local_foraging_effectiveness_path = os.path.join(
intermediate_output_dir,
_LOCAL_FORAGING_EFFECTIVENESS_FILE_PATTERN % (
species, file_suffix))
local_foraging_effectiveness_task = task_graph.add_task(
task_name='local_foraging_effectiveness_%s' % species,
func=pygeoprocessing.raster_calculator,
args=(
foraged_flowers_path_band_list,
_SumRasters(), local_foraging_effectiveness_path,
gdal.GDT_Float32, _INDEX_NODATA),
target_path_list=[
local_foraging_effectiveness_path],
dependent_task_list=[
foraged_flowers_index_task_map[(species, season)]
for season in scenario_variables['season_list']])
landcover_pixel_size_tuple = landcover_raster_info['pixel_size']
try:
landcover_mean_pixel_size = utils.mean_pixel_size_and_area(
landcover_pixel_size_tuple)[0]
except ValueError:
landcover_mean_pixel_size = numpy.min(numpy.absolute(
landcover_pixel_size_tuple))
LOGGER.debug(
'Land Cover Raster has unequal x, y pixel sizes: %s. Using'
'%s as the mean pixel size.' % (
landcover_pixel_size_tuple, landcover_mean_pixel_size))
# create a convolution kernel for the species flight range
alpha = (
scenario_variables['alpha_value'][species] /
landcover_mean_pixel_size)
kernel_path = os.path.join(
intermediate_output_dir, _KERNEL_FILE_PATTERN % (
alpha, file_suffix))
alpha_kernel_raster_task = task_graph.add_task(
task_name='decay_kernel_raster_%s' % alpha,
func=utils.exponential_decay_kernel_raster,
args=(alpha, kernel_path),
target_path_list=[kernel_path])
# convolve FE with alpha_s
floral_resources_index_path = os.path.join(
intermediate_output_dir, _FLORAL_RESOURCES_INDEX_FILE_PATTERN % (
species, file_suffix))
floral_resources_index_path_map[species] = floral_resources_index_path
floral_resources_task = task_graph.add_task(
task_name='convolve_%s' % species,
func=pygeoprocessing.convolve_2d,
args=(
(local_foraging_effectiveness_path, 1), (kernel_path, 1),
floral_resources_index_path),
kwargs={
'ignore_nodata_and_edges': True,
'mask_nodata': True,
'normalize_kernel': False,
},
dependent_task_list=[
alpha_kernel_raster_task, local_foraging_effectiveness_task],
target_path_list=[floral_resources_index_path])
floral_resources_index_task_map[species] = floral_resources_task
# calculate
# pollinator_supply_index[species] PS(x,s) = FR(x,s) * HN(x,s) * sa(s)
pollinator_supply_index_path = os.path.join(
output_dir, _POLLINATOR_SUPPLY_FILE_PATTERN % (
species, file_suffix))
ps_index_op = _PollinatorSupplyIndexOp(
scenario_variables['species_abundance'][species])
pollinator_supply_task = task_graph.add_task(
task_name='calculate_pollinator_supply_%s' % species,
func=pygeoprocessing.raster_calculator,
args=(
[(scenario_variables['habitat_nesting_index_path'][species],
1),
(floral_resources_index_path, 1)], ps_index_op,
pollinator_supply_index_path, gdal.GDT_Float32,
_INDEX_NODATA),
dependent_task_list=[
floral_resources_task, habitat_nesting_tasks[species]],
target_path_list=[pollinator_supply_index_path])
# calc convolved_PS PS over alpha_s
convolve_ps_path = os.path.join(
intermediate_output_dir, _CONVOLVE_PS_FILE_PATH % (
species, file_suffix))
convolve_ps_task = task_graph.add_task(
task_name='convolve_ps_%s' % species,
func=pygeoprocessing.convolve_2d,
args=(
(pollinator_supply_index_path, 1), (kernel_path, 1),
convolve_ps_path),
kwargs={
'ignore_nodata_and_edges': True,
'mask_nodata': True,
'normalize_kernel': False,
},
dependent_task_list=[
alpha_kernel_raster_task, pollinator_supply_task],
target_path_list=[convolve_ps_path])
for season in scenario_variables['season_list']:
# calculate pollinator activity as
# PA(x,s,j)=RA(l(x),j)fa(s,j) convolve(ps, alpha_s)
foraged_flowers_index_path = (
scenario_variables['foraged_flowers_index_path'][
(species, season)])
pollinator_abundance_path = os.path.join(
output_dir, _POLLINATOR_ABUNDANCE_FILE_PATTERN % (
species, season, file_suffix))
pollinator_abundance_task_map[(species, season)] = (
task_graph.add_task(
task_name='calculate_poll_abudance_%s' % species,
func=pygeoprocessing.raster_calculator,
args=(
[(foraged_flowers_index_path, 1),
(floral_resources_index_path_map[species], 1),
(convolve_ps_path, 1)],
_PollinatorSupplyOp(), pollinator_abundance_path,
gdal.GDT_Float32, _INDEX_NODATA),
dependent_task_list=[
foraged_flowers_index_task_map[(species, season)],
floral_resources_index_task_map[species],
convolve_ps_task],
target_path_list=[pollinator_abundance_path]))
pollinator_abundance_path_map[(species, season)] = (
pollinator_abundance_path)
# calculate total abundance of all pollinators for each season
total_pollinator_abundance_task = {}
for season in scenario_variables['season_list']:
# total_pollinator_abundance_index[season] PAT(x,j)=sum_s PA(x,s,j)
total_pollinator_abundance_index_path = os.path.join(
output_dir, _TOTAL_POLLINATOR_ABUNDANCE_FILE_PATTERN % (
season, file_suffix))
pollinator_abundance_season_path_band_list = [
(pollinator_abundance_path_map[(species, season)], 1)
for species in scenario_variables['species_list']]
total_pollinator_abundance_task[season] = task_graph.add_task(
task_name='calculate_poll_abudnce_%s_%s' % (species, season),
func=pygeoprocessing.raster_calculator,
args=(
pollinator_abundance_season_path_band_list, _SumRasters(),
total_pollinator_abundance_index_path, gdal.GDT_Float32,
_INDEX_NODATA),
dependent_task_list=[
pollinator_abundance_task_map[(species, season)]
for species in scenario_variables['species_list']],
target_path_list=[total_pollinator_abundance_index_path])
# next step is farm vector calculation, if no farms then okay to quit
if farm_vector_path is None:
task_graph.close()
task_graph.join()
return
# blank raster used for rasterizing all the farm parameters/fields later
blank_raster_path = os.path.join(
intermediate_output_dir, _BLANK_RASTER_FILE_PATTERN % file_suffix)
blank_raster_task = task_graph.add_task(
task_name='create_blank_raster',
func=pygeoprocessing.new_raster_from_base,
args=(
args['landcover_raster_path'], blank_raster_path,
gdal.GDT_Float32, [_INDEX_NODATA]),
kwargs={'fill_value_list': [_INDEX_NODATA]},
target_path_list=[blank_raster_path])
farm_pollinator_season_path_list = []
farm_pollinator_season_task_list = []
for season in scenario_variables['season_list']:
half_saturation_raster_path = os.path.join(
intermediate_output_dir, _HALF_SATURATION_FILE_PATTERN % (
season, file_suffix))
half_saturation_task = task_graph.add_task(
task_name='half_saturation_rasterize_%s' % season,
func=_rasterize_vector_onto_base,
args=(
blank_raster_path, farm_vector_path,
_HALF_SATURATION_FARM_HEADER, half_saturation_raster_path),
kwargs={'filter_string': "%s='%s'" % (_FARM_SEASON_FIELD, season)},
dependent_task_list=[blank_raster_task],
target_path_list=[half_saturation_raster_path])
# calc on farm pollinator abundance i.e. FP_season
farm_pollinator_season_path = os.path.join(
intermediate_output_dir, _FARM_POLLINATOR_SEASON_FILE_PATTERN % (
season, file_suffix))
farm_pollinator_season_task_list.append(task_graph.add_task(
task_name='farm_pollinator_%s' % season,
func=pygeoprocessing.raster_calculator,
args=(
[(half_saturation_raster_path, 1),
(total_pollinator_abundance_index_path, 1)],
_OnFarmPollinatorAbundance(), farm_pollinator_season_path,
gdal.GDT_Float32, _INDEX_NODATA),
dependent_task_list=[
half_saturation_task, total_pollinator_abundance_task[season]],
target_path_list=[farm_pollinator_season_path]))
farm_pollinator_season_path_list.append(farm_pollinator_season_path)
# sum farm pollinators
farm_pollinator_path = os.path.join(
output_dir, _FARM_POLLINATOR_FILE_PATTERN % file_suffix)
farm_pollinator_task = task_graph.add_task(
task_name='sum_farm_pollinators',
func=pygeoprocessing.raster_calculator,
args=(
[(path, 1) for path in farm_pollinator_season_path_list],
_SumRasters(), farm_pollinator_path, gdal.GDT_Float32,
_INDEX_NODATA),
dependent_task_list=farm_pollinator_season_task_list,
target_path_list=[farm_pollinator_path])
# rasterize managed pollinators
managed_pollinator_path = os.path.join(
intermediate_output_dir,
_MANAGED_POLLINATOR_FILE_PATTERN % file_suffix)
managed_pollinator_task = task_graph.add_task(
task_name='rasterize_managed_pollinators',
func=_rasterize_vector_onto_base,
args=(
blank_raster_path, farm_vector_path, _MANAGED_POLLINATORS_FIELD,
managed_pollinator_path),
dependent_task_list=[reproject_farm_task, blank_raster_task],
target_path_list=[managed_pollinator_path])
# calculate PYT
total_pollinator_yield_path = os.path.join(
output_dir, _TOTAL_POLLINATOR_YIELD_FILE_PATTERN % file_suffix)
pyt_task = task_graph.add_task(
task_name='calculate_total_pollinators',
func=pygeoprocessing.raster_calculator,
args=(
[(managed_pollinator_path, 1), (farm_pollinator_path, 1)],
_PYTOp(), total_pollinator_yield_path, gdal.GDT_Float32,
_INDEX_NODATA),
dependent_task_list=[farm_pollinator_task, managed_pollinator_task],
target_path_list=[total_pollinator_yield_path])
# calculate PYW
wild_pollinator_yield_path = os.path.join(
output_dir, _WILD_POLLINATOR_YIELD_FILE_PATTERN % file_suffix)
wild_pollinator_task = task_graph.add_task(
task_name='calcualte_wild_pollinators',
func=pygeoprocessing.raster_calculator,
args=(
[(managed_pollinator_path, 1), (total_pollinator_yield_path, 1)],
_PYWOp(), wild_pollinator_yield_path, gdal.GDT_Float32,
_INDEX_NODATA),
dependent_task_list=[pyt_task, managed_pollinator_task],
target_path_list=[wild_pollinator_yield_path])
# aggregate yields across farms
target_farm_result_path = os.path.join(
output_dir, _FARM_VECTOR_RESULT_FILE_PATTERN % file_suffix)
if os.path.exists(target_farm_result_path):
os.remove(target_farm_result_path)
reproject_farm_task.join()
_create_farm_result_vector(
farm_vector_path, target_farm_result_path)
# aggregate wild pollinator yield over farm
wild_pollinator_task.join()
wild_pollinator_yield_aggregate = pygeoprocessing.zonal_statistics(
(wild_pollinator_yield_path, 1), target_farm_result_path)
# aggregate yield over a farm
pyt_task.join()
total_farm_results = pygeoprocessing.zonal_statistics(
(total_pollinator_yield_path, 1), target_farm_result_path)
# aggregate the pollinator abundance results over the farms
pollinator_abundance_results = {}
for season in scenario_variables['season_list']:
total_pollinator_abundance_index_path = os.path.join(
output_dir, _TOTAL_POLLINATOR_ABUNDANCE_FILE_PATTERN % (
season, file_suffix))
total_pollinator_abundance_task[season].join()
pollinator_abundance_results[season] = (
pygeoprocessing.zonal_statistics(
(total_pollinator_abundance_index_path, 1),
target_farm_result_path))
target_farm_vector = gdal.OpenEx(target_farm_result_path, 1)
target_farm_layer = target_farm_vector.GetLayer()
# aggregate results per farm
for farm_feature in target_farm_layer:
nu = float(farm_feature.GetField(_CROP_POLLINATOR_DEPENDENCE_FIELD))
fid = farm_feature.GetFID()
if total_farm_results[fid]['count'] > 0:
# total pollinator farm yield is 1-*nu(1-tot_pollination_coverage)
# this is YT from the user's guide (y_tot)
farm_feature.SetField(
_TOTAL_FARM_YIELD_FIELD_ID,
1 - nu * (
1 - total_farm_results[fid]['sum'] /
float(total_farm_results[fid]['count'])))
# this is PYW ('pdep_y_w')
farm_feature.SetField(
_POLLINATOR_PROPORTION_FARM_YIELD_FIELD_ID,
(wild_pollinator_yield_aggregate[fid]['sum'] /
float(wild_pollinator_yield_aggregate[fid]['count'])))
# this is YW ('y_wild')
farm_feature.SetField(
_WILD_POLLINATOR_FARM_YIELD_FIELD_ID,
nu * (wild_pollinator_yield_aggregate[fid]['sum'] /
float(wild_pollinator_yield_aggregate[fid]['count'])))
# this is PAT ('p_abund')
farm_season = farm_feature.GetField(_FARM_SEASON_FIELD)
farm_feature.SetField(
_POLLINATOR_ABUNDANCE_FARM_FIELD_ID,
pollinator_abundance_results[farm_season][fid]['sum'] /
float(pollinator_abundance_results[farm_season][fid]['count']))
target_farm_layer.SetFeature(farm_feature)
target_farm_layer.SyncToDisk()
target_farm_layer = None
target_farm_vector = None
task_graph.close()
task_graph.join()
def _rasterize_vector_onto_base(
base_raster_path, base_vector_path, attribute_id,
target_raster_path, filter_string=None):
"""Rasterize attribute from vector onto a copy of base.
Args:
base_raster_path (string): path to a base raster file
attribute_id (string): id in `base_vector_path` to rasterize.
target_raster_path (string): a copy of `base_raster_path` with
`base_vector_path[attribute_id]` rasterized on top.
filter_string (string): filtering string to select from farm layer
Returns:
None.
"""
base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)
raster_driver = gdal.GetDriverByName('GTiff')
target_raster = raster_driver.CreateCopy(target_raster_path, base_raster)
base_raster = None
vector = gdal.OpenEx(base_vector_path)
layer = vector.GetLayer()
if filter_string is not None:
layer.SetAttributeFilter(str(filter_string))
gdal.RasterizeLayer(
target_raster, [1], layer,
options=['ATTRIBUTE=%s' % attribute_id])
target_raster.FlushCache()
target_raster = None
layer = None
vector = None
def _create_farm_result_vector(
base_vector_path, target_vector_path):
"""Create a copy of `base_vector_path` and add FID field to it.
Args:
base_vector_path (string): path to vector to copy
target_vector_path (string): path to target vector that is a copy
of the base, except for the new `fid_field_id` field that has
unique integer IDs for each feature. This path must not already
exist. It also has new entries for all the result fields:
_TOTAL_FARM_YIELD_FIELD_ID
_WILD_POLLINATOR_FARM_YIELD_FIELD_ID
Returns:
None.
"""
base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)
driver = gdal.GetDriverByName('ESRI Shapefile')
target_vector = driver.CreateCopy(
target_vector_path, base_vector)
target_layer = target_vector.GetLayer()
farm_pollinator_abundance_defn = ogr.FieldDefn(
_POLLINATOR_ABUNDANCE_FARM_FIELD_ID, ogr.OFTReal)
farm_pollinator_abundance_defn.SetWidth(25)
farm_pollinator_abundance_defn.SetPrecision(11)
target_layer.CreateField(farm_pollinator_abundance_defn)
total_farm_yield_field_defn = ogr.FieldDefn(
_TOTAL_FARM_YIELD_FIELD_ID, ogr.OFTReal)
total_farm_yield_field_defn.SetWidth(25)
total_farm_yield_field_defn.SetPrecision(11)
target_layer.CreateField(total_farm_yield_field_defn)
pol_proportion_farm_yield_field_defn = ogr.FieldDefn(
_POLLINATOR_PROPORTION_FARM_YIELD_FIELD_ID, ogr.OFTReal)
pol_proportion_farm_yield_field_defn.SetWidth(25)
pol_proportion_farm_yield_field_defn.SetPrecision(11)
target_layer.CreateField(pol_proportion_farm_yield_field_defn)
wild_pol_farm_yield_field_defn = ogr.FieldDefn(
_WILD_POLLINATOR_FARM_YIELD_FIELD_ID, ogr.OFTReal)
wild_pol_farm_yield_field_defn.SetWidth(25)
wild_pol_farm_yield_field_defn.SetPrecision(11)
target_layer.CreateField(wild_pol_farm_yield_field_defn)
target_layer = None
target_vector.FlushCache()
target_vector = None
def _parse_scenario_variables(args):
"""Parse out scenario variables from input parameters.
This function parses through the guild table, biophysical table, and
farm polygons (if available) to generate
Parameter:
args (dict): this is the args dictionary passed in to the `execute`
function, requires a 'guild_table_path', and
'landcover_biophysical_table_path' key and optional
'farm_vector_path' key.
Returns:
A dictionary with the keys:
* season_list (list of string)
* substrate_list (list of string)
* species_list (list of string)
* alpha_value[species] (float)
* landcover_substrate_index[substrate][landcover] (float)
* landcover_floral_resources[season][landcover] (float)
* species_abundance[species] (string->float)
* species_foraging_activity[(species, season)] (string->float)
* species_substrate_index[(species, substrate)] (tuple->float)
* foraging_activity_index[(species, season)] (tuple->float)
"""
guild_table_path = args['guild_table_path']
landcover_biophysical_table_path = args['landcover_biophysical_table_path']
if 'farm_vector_path' in args and args['farm_vector_path'] != '':
farm_vector_path = args['farm_vector_path']
else:
farm_vector_path = None
guild_table = utils.build_lookup_from_csv(
guild_table_path, 'species', to_lower=True)
LOGGER.info('Checking to make sure guild table has all expected headers')
guild_headers = list(guild_table.values())[0].keys()
for header in _EXPECTED_GUILD_HEADERS:
matches = re.findall(header, " ".join(guild_headers))
if len(matches) == 0:
raise ValueError(
"Expected a header in guild table that matched the pattern "
"'%s' but was unable to find one. Here are all the headers "
"from %s: %s" % (
header, guild_table_path,
guild_headers))
landcover_biophysical_table = utils.build_lookup_from_csv(
landcover_biophysical_table_path, 'lucode', to_lower=True)
biophysical_table_headers = (
list(landcover_biophysical_table.values())[0].keys())
for header in _EXPECTED_BIOPHYSICAL_HEADERS:
matches = re.findall(header, " ".join(biophysical_table_headers))
if len(matches) == 0:
raise ValueError(
"Expected a header in biophysical table that matched the "
"pattern '%s' but was unable to find one. Here are all the "
"headers from %s: %s" % (
header, landcover_biophysical_table_path,
biophysical_table_headers))
# this dict to dict will map seasons to guild/biophysical headers
# ex season_to_header['spring']['guilds']
season_to_header = collections.defaultdict(dict)
# this dict to dict will map substrate types to guild/biophysical headers
# ex substrate_to_header['cavity']['biophysical']
substrate_to_header = collections.defaultdict(dict)
for header in guild_headers:
match = re.match(_FORAGING_ACTIVITY_RE_PATTERN, header)
if match:
season = match.group(1)
season_to_header[season]['guild'] = match.group()
match = re.match(_NESTING_SUITABILITY_PATTERN, header)
if match:
substrate = match.group(1)
substrate_to_header[substrate]['guild'] = match.group()
farm_vector = None
if farm_vector_path is not None:
LOGGER.info('Checking that farm polygon has expected headers')
farm_vector = gdal.OpenEx(farm_vector_path)
farm_layer = farm_vector.GetLayer()
if farm_layer.GetGeomType() not in [
ogr.wkbPolygon, ogr.wkbMultiPolygon]:
farm_layer = None
farm_vector = None
raise ValueError("Farm layer not a polygon type")
farm_layer_defn = farm_layer.GetLayerDefn()
farm_headers = [
farm_layer_defn.GetFieldDefn(i).GetName()
for i in range(farm_layer_defn.GetFieldCount())]
for header in _EXPECTED_FARM_HEADERS:
matches = re.findall(header, " ".join(farm_headers))
if not matches:
raise ValueError(
"Missing an expected headers '%s'from %s.\n"
"Got these headers instead %s" % (
header, farm_vector_path, farm_headers))
for header in farm_headers:
match = re.match(_FARM_FLORAL_RESOURCES_PATTERN, header)
if match:
season = match.group(1)
season_to_header[season]['farm'] = match.group()
match = re.match(_FARM_NESTING_SUBSTRATE_RE_PATTERN, header)
if match:
substrate = match.group(1)
substrate_to_header[substrate]['farm'] = match.group()
for header in biophysical_table_headers:
match = re.match(_FLORAL_RESOURCES_AVAILABLE_PATTERN, header)
if match:
season = match.group(1)
season_to_header[season]['biophysical'] = match.group()
match = re.match(_NESTING_SUBSTRATE_PATTERN, header)
if match:
substrate = match.group(1)
substrate_to_header[substrate]['biophysical'] = match.group()
for table_type, lookup_table in itertools.chain(
season_to_header.items(), substrate_to_header.items()):
if len(lookup_table) != 3 and farm_vector is not None:
raise ValueError(
"Expected a biophysical, guild, and farm entry for '%s' but "
"instead found only %s. Ensure there are corresponding "
"entries of '%s' in both the guilds, biophysical "
"table, and farm fields." % (
table_type, lookup_table, table_type))
elif len(lookup_table) != 2 and farm_vector is None:
raise ValueError(
"Expected a biophysical, and guild entry for '%s' but "
"instead found only %s. Ensure there are corresponding "
"entries of '%s' in both the guilds and biophysical "
"table." % (
table_type, lookup_table, table_type))
if farm_vector_path is not None:
farm_season_set = set()
for farm_feature in farm_layer:
farm_season_set.add(farm_feature.GetField(_FARM_SEASON_FIELD))
if len(farm_season_set.difference(season_to_header)) > 0:
raise ValueError(
"Found seasons in farm polygon that were not specified in the"
"biophysical table: %s. Expected only these: %s" % (
farm_season_set.difference(season_to_header),
season_to_header))
result = {}
# * season_list (list of string)
result['season_list'] = sorted(season_to_header)
# * substrate_list (list of string)
result['substrate_list'] = sorted(substrate_to_header)
# * species_list (list of string)
result['species_list'] = sorted(guild_table)
result['alpha_value'] = dict()
for species in result['species_list']:
result['alpha_value'][species] = float(
guild_table[species][_ALPHA_HEADER])
# * species_abundance[species] (string->float)
total_relative_abundance = numpy.sum([
guild_table[species][_RELATIVE_SPECIES_ABUNDANCE_FIELD]
for species in result['species_list']])
result['species_abundance'] = {}
for species in result['species_list']:
result['species_abundance'][species] = (
guild_table[species][_RELATIVE_SPECIES_ABUNDANCE_FIELD] /
float(total_relative_abundance))
# map the relative foraging activity of a species during a certain season
# (species, season)
result['species_foraging_activity'] = dict()
for species in result['species_list']:
total_activity = numpy.sum([
guild_table[species][_FORAGING_ACTIVITY_PATTERN % season]
for season in result['season_list']])
for season in result['season_list']:
result['species_foraging_activity'][(species, season)] = (
guild_table[species][_FORAGING_ACTIVITY_PATTERN % season] /
float(total_activity))
# * landcover_substrate_index[substrate][landcover] (float)
result['landcover_substrate_index'] = collections.defaultdict(dict)
for raw_landcover_id in landcover_biophysical_table:
landcover_id = int(raw_landcover_id)
for substrate in result['substrate_list']:
substrate_biophysical_header = (
substrate_to_header[substrate]['biophysical'])
result['landcover_substrate_index'][substrate][landcover_id] = (
landcover_biophysical_table[landcover_id][
substrate_biophysical_header])
# * landcover_floral_resources[season][landcover] (float)
result['landcover_floral_resources'] = collections.defaultdict(dict)
for raw_landcover_id in landcover_biophysical_table:
landcover_id = int(raw_landcover_id)
for season in result['season_list']:
floral_rources_header = season_to_header[season]['biophysical']
result['landcover_floral_resources'][season][landcover_id] = (
landcover_biophysical_table[landcover_id][
floral_rources_header])
# * species_substrate_index[(species, substrate)] (tuple->float)
result['species_substrate_index'] = collections.defaultdict(dict)
for species in result['species_list']:
for substrate in result['substrate_list']:
substrate_guild_header = substrate_to_header[substrate]['guild']
result['species_substrate_index'][species][substrate] = (
guild_table[species][substrate_guild_header])
# * foraging_activity_index[(species, season)] (tuple->float)
result['foraging_activity_index'] = {}
for species in result['species_list']:
for season in result['season_list']:
key = (species, season)
foraging_biophyiscal_header = season_to_header[season]['guild']
result['foraging_activity_index'][key] = (
guild_table[species][foraging_biophyiscal_header])
return result
class _CalculateHabitatNestingIndex(object):
"""Closure for HN(x, s) = max_n(N(x, n) ns(s,n)) calculation."""
def __init__(
self, substrate_path_map, species_substrate_index_map,
target_habitat_nesting_index_path):
"""Define parameters necessary for HN(x,s) calculation.
Args:
substrate_path_map (dict): map substrate name to substrate index
raster path. (N(x, n))
species_substrate_index_map (dict): map substrate name to
scalar value of species substrate suitability. (ns(s,n))
target_habitat_nesting_index_path (string): path to target
raster
"""
# try to get the source code of __call__ so task graph will recompute
# if the function has changed
try:
self.__name__ = hashlib.sha1(inspect.getsource(
_CalculateHabitatNestingIndex.__call__
).encode('utf-8')).hexdigest()
except IOError:
# default to the classname if it doesn't work
self.__name__ = _CalculateHabitatNestingIndex.__name__
self.__name__ += str([
substrate_path_map, species_substrate_index_map,
target_habitat_nesting_index_path])
self.substrate_path_list = [
substrate_path_map[substrate_id]
for substrate_id in sorted(substrate_path_map)]
self.species_substrate_suitability_index_array = numpy.array([
species_substrate_index_map[substrate_id]
for substrate_id in sorted(substrate_path_map)]).reshape(
(len(species_substrate_index_map), 1))
self.target_habitat_nesting_index_path = (
target_habitat_nesting_index_path)
def __call__(self):
"""Calculate HN(x, s) = max_n(N(x, n) ns(s,n))."""
def max_op(*substrate_index_arrays):
"""Return the max of index_array[n] * ns[n]."""
result = numpy.max(
numpy.stack([x.flatten() for x in substrate_index_arrays]) *
self.species_substrate_suitability_index_array, axis=0)
result = result.reshape(substrate_index_arrays[0].shape)
result[substrate_index_arrays[0] == _INDEX_NODATA] = _INDEX_NODATA
return result
pygeoprocessing.raster_calculator(
[(x, 1) for x in self.substrate_path_list], max_op,
self.target_habitat_nesting_index_path,
gdal.GDT_Float32, _INDEX_NODATA)
class _SumRasters(object):
"""Sum all rasters where nodata is 0 unless the entire stack is nodata."""
def __init__(self):
# try to get the source code of __call__ so task graph will recompute
# if the function has changed
try:
self.__name__ = hashlib.sha1(
inspect.getsource(
_SumRasters.__call__
).encode('utf-8')).hexdigest()
except IOError:
# default to the classname if it doesn't work
self.__name__ = (
_SumRasters.__name__)
def __call__(self, *array_list):
"""Calculate sum of array_list and account for nodata."""
valid_mask = numpy.zeros(array_list[0].shape, dtype=bool)
result = numpy.empty_like(array_list[0])
result[:] = 0
for array in array_list:
local_valid_mask = array != _INDEX_NODATA
result[local_valid_mask] += array[local_valid_mask]
valid_mask |= local_valid_mask
result[~valid_mask] = _INDEX_NODATA
return result
class _PollinatorSupplyOp(object):
"""Calc PA=RA*fa/FR * convolve(PS)."""
def __init__(self):
# try to get the source code of __call__ so task graph will recompute
# if the function has changed
try:
self.__name__ = hashlib.sha1(
inspect.getsource(
_PollinatorSupplyOp.__call__
).encode('utf-8')).hexdigest()
except IOError:
# default to the classname if it doesn't work
self.__name__ = (
_PollinatorSupplyOp.__name__)
def __call__(
self, foraged_flowers_array, floral_resources_array,
convolve_ps_array):
"""Calculating (RA*fa)/FR * convolve(PS)."""
valid_mask = foraged_flowers_array != _INDEX_NODATA
result = numpy.empty_like(foraged_flowers_array)
result[:] = _INDEX_NODATA
zero_mask = floral_resources_array == 0
result[zero_mask & valid_mask] = 0.0
result_mask = valid_mask & ~zero_mask
result[result_mask] = (
foraged_flowers_array[result_mask] /
floral_resources_array[result_mask] *
convolve_ps_array[result_mask])
return result
class _PollinatorSupplyIndexOp(object):
"""Calculate PS(x,s) = FR(x,s) * HN(x,s) * sa(s)."""
def __init__(self, species_abundance):
"""Create a closure for species abundance to multiply later.
Args:
species_abundance (float): value to use in `__call__` when
calculating pollinator abundance.
Returns:
None.
"""
self.species_abundance = species_abundance
# try to get the source code of __call__ so task graph will recompute
# if the function has changed
try:
self.__name__ = hashlib.sha1(
inspect.getsource(
_PollinatorSupplyIndexOp.__call__
).encode('utf-8')).hexdigest()
except IOError:
# default to the classname if it doesn't work
self.__name__ = (
_PollinatorSupplyIndexOp.__name__)
self.__name__ += str(species_abundance)
def __call__(
self, floral_resources_array, habitat_nesting_suitability_array):
"""Calculate f_r * h_n * self.species_abundance."""
result = numpy.empty_like(floral_resources_array)
result[:] = _INDEX_NODATA
valid_mask = floral_resources_array != _INDEX_NODATA
result[valid_mask] = (
self.species_abundance * floral_resources_array[valid_mask] *
habitat_nesting_suitability_array[valid_mask])
return result
class _MultByScalar(object):
"""Calculate a raster * scalar. Mask through nodata."""
def __init__(self, scalar):
"""Create a closure for multiplying an array by a scalar.
Args:
scalar (float): value to use in `__call__` when multiplying by
its parameter.
Returns:
None.
"""
self.scalar = scalar
# try to get the source code of __call__ so task graph will recompute
# if the function has changed
try:
self.__name__ = hashlib.sha1(
inspect.getsource(
_MultByScalar.__call__
).encode('utf-8')).hexdigest()
except IOError:
# default to the classname if it doesn't work
self.__name__ = (
_MultByScalar.__name__)
self.__name__ += str(scalar)
def __call__(self, array):
"""Return array * self.scalar accounting for nodata."""
result = numpy.empty_like(array)
result[:] = _INDEX_NODATA
valid_mask = array != _INDEX_NODATA
result[valid_mask] = array[valid_mask] * self.scalar
return result
class _OnFarmPollinatorAbundance(object):
"""Calculate FP(x) = (PAT * (1 - h)) / (h * (1 - 2*pat)+pat))."""
def __init__(self):
# try to get the source code of __call__ so task graph will recompute
# if the function has changed
try:
self.__name__ = hashlib.sha1(
inspect.getsource(
_OnFarmPollinatorAbundance.__call__
).encode('utf-8')).hexdigest()
except IOError:
# default to the classname if it doesn't work
self.__name__ = (
_OnFarmPollinatorAbundance.__name__)
def __call__(self, h_array, pat_array):
"""Return (pad * (1 - h)) / (h * (1 - 2*pat)+pat)) tolerate nodata."""
result =
|
numpy.empty_like(h_array)
|
numpy.empty_like
|
#! /usr/bin/env python
from __future__ import print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import os
import sys
import re
from tempfile import *
import colorsys
import numpy as np
from collections import Counter
from astrom_common import *
from astrom_intra import intrabrickshift
from astrom_inter import findAffine
from astrometry.util.file import *
from astrometry.util.fits import *
from astrometry.util.util import *
from astrometry.util.miscutils import *
#from astrometry.blind.plotstuff import *
from astrometry.util.multiproc import multiproc
def find_overlaps(outlines):
'''
outlines: Outlines of the images in RA,Dec
Returns: overlaps, areas
overlaps: 2-d numpy array, overlaps[i,j] is the fraction of points in i
that are also in j.
areas: approximate area of each image, in square degrees
'''
N = len(outlines)
areas = np.zeros(N)
overlaps = np.zeros((N,N))
for i,out in enumerate(outlines):
areas[i] = polygon_area(out)
outlines2 = [np.array(list(zip(*out))) for out in outlines]
for i,out1 in enumerate(outlines):
for j,out2 in enumerate(outlines):
if j <= i:
continue
rr1,dd1 = out1
rr2,dd2 = out2
if min(rr1) > max(rr2) or min(rr2) > max(rr1):
continue
if min(dd1) > max(dd2) or min(dd2) > max(dd1):
continue
outA,outB = outlines2[i], outlines2[j]
if not polygons_intersect(outA, outB):
continue
cp = clip_polygon(outA, outB)
if len(cp) == 0:
continue
cp = np.array(cp)
area = polygon_area((cp[:,0], cp[:,1]))
overlaps[i,j] = area / areas[i]
overlaps[j,i] = area / areas[j]
return overlaps, areas
def wavelength(f):
'''
Maps a string "F110W" to wavelength in int nanometers: 1100
'''
fmap = { 110:1100, 160:1600 }
# trim F...W
if not ((f[0] in ['F','f']) and (f[-1] in ['w','W','N','n'])):
print('WARNING: wavelength("%s"): expected F###W' % f)
f = int(f[1:-1])
f = fmap.get(f, f)
return f
def argsort_filters(filters):
# Reorder them by wavelength...
nf = []
for f in filters:
if f == 'ref':
nf.append(0.)
else:
nf.append(wavelength(f))
I = np.argsort(nf)
return I
def filters_legend(lp, filters): #, **kwa):
I = argsort_filters(filters)
#plt.legend([lp[i] for i in I], [filters[i] for i in I], **kwa)
return [lp[i] for i in I], [filters[i] for i in I]
def _alfunc(args):
(Ti, Tj, R, i, j) = args
print('Matching', i, 'to', j)
M = Match(Ti, Tj, R)
if len(M.I) == 0:
return None
A = Alignment(Ti, Tj, R, cutrange=R, match=M)
if A.shift() is None:
return None
return A
def readfltgst(fltfn, gstfn, wcsexts):
info = parse_flt_filename(fltfn)
chip = info.get('chip')
filt = info.get('filt')
filt = filt.upper()
name = info.get('name')
hdr = read_header_as_dict(fltfn, 0)
exptime = hdr['EXPTIME']
if chip:
cname = '%s_%i' % (name,chip)
else:
cname = name
wcs = None
for ext in wcsexts:
try:
wcs = Tan(fltfn, ext)
break
except:
print('Failed to read WCS header from extension', ext, 'of', fltfn)
#import traceback
#traceback.print_exc()
print('Read WCS header from', fltfn)
outline = getwcsoutline(wcs)
try:
T = fits_table(gstfn)
print('Read gst file', gstfn, '->', len(T), 'stars')
except:
print('WARNING: failed to read FITS file', gstfn)
import traceback
traceback.print_exc()
return None
cols = T.get_columns()
cols = [c.lower() for c in cols]
#print 'Columns:', cols
if 'mag1_acs' in cols:
magnm = 'mag1_acs'
elif 'mag1_uvis' in cols:
magnm = 'mag1_uvis'
elif 'mag1_ir' in cols:
magnm = 'mag1_ir'
elif 'mag1_wfpc2' in cols:
magnm = 'mag1_wfpc2'
else:
assert(False)
T.magnm = magnm
T.mag = T.get(magnm)
return T, outline, (chip, filt, name, cname, exptime)
def readfltgsts(fltfns, gstfns, wcsexts, Nkeep, Nuniform):
TT = []
outlines = []
chips = []
names = []
cnames = []
filts = []
exptimes = []
Nall = []
for fltfn,gstfn in zip(fltfns, gstfns):
print('gst', gstfn)
print('flt', fltfn)
T, outline, meta = readfltgst(fltfn, gstfn, wcsexts)
(chip, filt, name, cname, exptime) = meta
if Nkeep and len(T) < Nkeep:
print('WARNING: gst file', gstfn, 'contains "only"', len(T), 'stars')
print('outline in RA,Dec:', outline)
rr,dd = outline
ra,dec =
|
np.mean(rr)
|
numpy.mean
|
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.axes
from sklearn.decomposition import FastICA #, PCA
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils import as_float_array , check_array
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn import manifold
import umap
from tqdm.notebook import tqdm
from joblib import Parallel, delayed
import warnings
from picard import picard
from ._whitening import whitening
"""
Introduction
------------
Given an input matrix X (n_observations x n_variables) the ICA algorithm decomposes X into :
X = S*A with S (n_observations x n_variables) and A (n_variables x n_variables)
where the columns of S are independent.
In practice, we are often interested in dimension reduction and we can thus set a reduced number
of components such that :
X ~ S*A with S (n_observations x n_components) and A (n_components x n_variables)
where the columns of S are independent.
In signal processing the problem of blind source separation can be solved by ICA with and input
matrix X (observations = times of registration x variables = initial signals).
# In gene expression study, we typically want to unravel independent biolgical sources. To do so,
# we can apply ICA on a matrix X (observations = genes x variables = biological samples).
Important note
--------------
PLEASE NOTE THAT IN THE FOLLOWING THE MATRICES S AND A WILL BE GIVEN IN THEIR TRANSPOSED SHAPE
SIMPLY BY CONVENTION.
"""
def _check_algorithm(algorithm , fun):
all_algorithms = ['fastica_par' , 'fastica_def' , 'fastica_picard' , 'infomax' , 'infomax_ext' , 'infomax_orth']
if algorithm not in all_algorithms:
raise ValueError("Stabilized ICA supports only algorithms in %s, got"
" %s." % (all_algorithms, algorithm))
all_funs = ['exp' , 'cube' , 'logcosh' , 'tanh']
if (isinstance(fun , str)) and (fun not in all_funs):
raise ValueError("Stabilized ICA supports only string functions in %s, got"
" %s. Please see sklearn.FastICA or picard for alternatives (customed functions)" % (all_algorithms, algorithm))
if fun == 'tanh' and algorithm in ['fastica_par' , 'fastica_def']:
warnings.warn(" 'tanh' is not available for sklearn.FastICA. By default, we assumed 'logcosh' was the desired function")
fun = 'logcosh'
if fun == 'logcosh' and algorithm in ['fastica_picard' , 'infomax' , 'infomax_ext' , 'infomax_orth']:
warnings.warn("'logcosh' is not available for picard. By default, we assumed 'tanh' was the desired function")
fun = 'tanh'
if fun != 'tanh' and algorithm in ['fastica_picard' , 'infomax_ext'] :
warnings.warn("Using a different density than `'tanh'` may lead to erratic behavior of the picard algorithm"
"when extended=True (see picard package for more explanations)")
if algorithm == 'fastica_par' :
return 'fastica' , {'algorithm' : 'parallel', 'fun' : fun}
elif algorithm == 'fastica_def' :
return 'fastica' , {'algorithm' : 'deflation', 'fun' : fun}
elif algorithm == 'fastica_picard':
return 'picard' , {'ortho' : True , 'extended' : True , 'fun' : fun}
elif algorithm == 'infomax':
return 'picard' , {'ortho' : False , 'extended' : False , 'fun' : fun}
elif algorithm == 'infomax_ext' :
return 'picard' , {'ortho' : False , 'extended' : True , 'fun' : fun}
elif algorithm == 'infomax_orth' :
return 'picard' , {'ortho' : True , 'extended' : False , 'fun' : fun}
def _ICA_decomposition(X , dict_params , method , max_iter):
""" Apply FastICA or infomax (picard package) algorithm to the matrix X to solve the ICA problem.
Parameters
----------
X : 2D array-like, shape (n_observations , n_components)
Whitened matrix.
dict_params : dict
dictionary of keyword arguments for the functions FastICA or picard. See _check algorithm.
method : str {'picard' , 'fastica'}
python algorithm to solve the ICA problem. Either FastICA from scikit-learn or infomax and its extensions
from picard package. See _check_algorithm.
max_iter : int
see https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html
Returns
-------
2D array , shape (n_components , n_observations)
components obtained from the ICA decomposition of X
"""
if method == 'picard' :
_, _, S = picard(X.T , max_iter = max_iter , whiten = False , centering = False , **dict_params)
else :
ica = FastICA(max_iter = max_iter , whiten = False , **dict_params)
S = ica.fit_transform(X).T
return S
def _centrotype(X , Sim , cluster_labels):
"""Compute the centrotype of the cluster of ICA components defined by cluster_labels
centrotype : component of the cluster which is the most similar to the other components
of the cluster
Parameters
----------
X : 2D array, shape (n_components , n_observations)
matrix of independent ICA components
Sim : 2D array, shape (n_components , n_components)
similarity matrix for ICA components (i.e rows of X)
cluster_labels : list of integers
indexes of the cluster of components (ex:[0 , 1 , 7] refers to the rows 0, 1 and 7 of X)
Returns
-------
1D array, shape (n_observations)
centrotype of the cluster of ICA components defined by cluster_labels
"""
temp = np.argmax(np.sum(Sim[np.ix_(cluster_labels , cluster_labels)] , axis=0))
return X[cluster_labels[temp] , :]
def _stability_index(Sim , cluster_labels):
"""Compute the stability index for the cluster of ICA components defined by cluster_labels.
Note : Please refer to https://bmcgenomics.biomedcentral.com/track/pdf/10.1186/s12864-017-4112-9
(section "Method") for the exact formula of the stability index.
Parameters
----------
Sim : 2D array, shape (n_components , n_components)
similarity matrix for ICA components
cluster_labels : list of integers
indexes of the cluster of components (ex:[0 , 1 , 7] refers to the rows 0, 1 and 7 of X)
Returns
-------
Float between 0 and 1
stability index for the cluster of ICA components defined by cluster_labels
"""
temp = Sim[np.ix_(cluster_labels , cluster_labels)]
ex_cluster = list(set(range(Sim.shape[1])) - set(cluster_labels))
#aics = average intra-cluster similarities
aics = (1/len(cluster_labels)**2)*np.sum(temp)
#aecs = average extra-cluster similarities
aecs = (1/(len(ex_cluster)*len(cluster_labels)))*np.sum(Sim[np.ix_(cluster_labels , ex_cluster)])
return aics - aecs
class StabilizedICA(object):
""" Implement a stabilized version of the Independent Component Analysis algorithm
Parameters
----------
n_components : int
number of ICA components
max_iter : int
maximum number of iteration for the FastICA algorithm
n_jobs : int
number of jobs to run in parallel. -1 means using all processors.
See the joblib package documentation for more explanations. Default is 1.
verbose: int
control the verbosity: the higher, the more messages. Default is 0.
Attributes
----------
S_: 2D array, shape (n_components , n_observations)
array of sources/metagenes, each line corresponds to a stabilized ICA component (i.e the centrotype of
a cluster of components)
A_: 2D array, shape (n_variables , n_components)
pseudo-inverse of S_, each column corresponds to a metasample
stability_indexes_ : 1D array, shape (n_components)
stability indexes for the stabilized ICA components
Notes
----------
n_runs is the number of time we repeat the ICA decompostion; see fit method
"""
def __init__(self , n_components , max_iter , n_jobs = 1 , verbose = 0):
self.n_components = n_components
self.max_iter = max_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.S_ = None
self.A_ = None
self.stability_indexes_ = None
def fit(self, X , n_runs , fun = 'logcosh' , algorithm = 'fastica_par' , plot = False , normalize = True , reorientation = True , whiten = True
, pca_solver = 'full' , chunked = False , chunk_size = None , zero_center = True):
"""1. Compute the ICA components of X n_runs times
2. Cluster all the components (N = self.n_components*n_runs) with agglomerative
hierarchical clustering (average linkage) into self.n_components clusters
3. For each cluster compute its stability index and return its centrotype as the
final ICA component
Note : Please refer to ICASSO method for more details about the process
(see https://www.cs.helsinki.fi/u/ahyvarin/papers/Himberg03.pdf)
Parameters
----------
X : 2D array-like, shape (n_observations , n_variables) or (n_observations , n_components) if whiten is False.
n_runs : int
number of times we run the FastICA algorithm
fun : str {'cube' , 'exp' , 'logcosh' , 'tanh'} or function, optional.
If algorithm is in {'fastica_par' , 'fastica_def'}, it represents the functional form of the G function used in
the approximation to neg-entropy. Could be either ‘logcosh’, ‘exp’, or ‘cube’.
If algorithm is in {'fastica_picard' , 'infomax' , 'infomax_ext' , 'infomax_orth'}, it is associated with the choice of
a density model for the sources. See supplementary explanations for more details.
The default is 'logcosh'.
algorithm : str {'fastica_par' , 'fastica_def' , 'fastica_picard' , 'infomax' , 'infomax_ext' , 'infomax_orth'}, optional.
The algorithm applied for solving the ICA problem at each run. Please the supplementary explanations for more details.
The default is 'fastica_par', i.e FastICA from sklearn with parallel implementation.
plot : boolean, optional
if True plot the stability indexes for each cluster in decreasing order.
The default is False.
normalize : boolean, optional
if True normalize the rows of S_ (i.e the stabilized ICA components) to unit standard deviation.
The default is True.
reorientation : boolean,optional
if True re-oriente the rows of S_ towards positive heavy tail.
The default is True.
whiten : boolean, optional
if True the matrix X is whitened, i.e centered then projected in the space defined by its
first self.n_components PCA components and reduced to unit variance along each of these axis.
If False the input X matrix must be already whitened.
The default is True.
pca_solver : str {‘auto’, ‘full’, ‘arpack’, ‘randomized’ , 'lobpcg'}, optional
solver for the different PCA methods. Please note that some solvers may not be compatible with
some of the PCA methods. See _whitening.py for more details.
The default is "full" (i.e SVD decomposition)
chunked : boolean, optional
Parameter for the whitening step, see _whitening.py for more details.
The default is False.
chunk_size : int, optional
Parameter for the whitening step, see _whitening.py for more details.
The default is None.
zero_center : boolean, optional
Parameter for the whitening step, see _whitening.py for more details.
The default is True.
Returns
-------
None.
Note
------
If whiten is False, we suppose that X results from a whitening pre-processing step. The columns must be
centered, scaled to unit variance and uncorrelated.
"""
## Initialisation
n_observations , n_variables = X.shape
Centrotypes = np.zeros((self.n_components , n_observations))
Index = np.zeros(self.n_components)
method , dict_params = _check_algorithm(algorithm, fun)
X = check_array(X , dtype=FLOAT_DTYPES , accept_sparse=True , copy=whiten)
## Pre-processing (whitening)
if whiten :
# pca = PCA(n_components = self.n_components , whiten=True , svd_solver = pca_solver)
# X_w = pca.fit_transform(X)
X_w = whitening(X , n_components = self.n_components , svd_solver = pca_solver , chunked = chunked , chunk_size = chunk_size
, zero_center = zero_center)
else :
X_w = as_float_array(X, copy=False)
## Compute the self.n_components*n_runs ICA components and store into array Components
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
# We noticed some numerical instabilities with FastICA from sklearn. We deal with this with the following lines.
if algorithm in ['fastica_par' , 'fastica_def'] :
maxtrials = 10
for i in range(maxtrials):
try:
decomposition = parallel(delayed(_ICA_decomposition)(X_w , dict_params = dict_params ,
method = method, max_iter = self.max_iter)
for _ in range(n_runs))
except ValueError:
if i < maxtrials - 1:
print("FastICA from sklearn did not converge due to numerical instabilities - Retrying...")
continue
else:
print("Too many attempts : FastICA did not converge !")
raise
break
else :
decomposition = parallel(delayed(_ICA_decomposition)(X_w , dict_params = dict_params , method = method, max_iter = self.max_iter)
for _ in range(n_runs))
self._Components = np.vstack(decomposition)
## Compute Similarity matrix between ICA components (Pearson correlation)
self._Sim = np.abs(np.corrcoef(x=self._Components , rowvar=True))
## Cluster the components with hierarchical clustering
clustering = AgglomerativeClustering(n_clusters = self.n_components , affinity = "precomputed"
,linkage = 'average' ).fit(1 - self._Sim)
self._clusters = clustering.labels_
## For each cluster compute the stability index and the centrotype
for i in range(self.n_components):
cluster_labels = list(np.argwhere(clustering.labels_ == i ).flatten())
Centrotypes[i , :] = _centrotype(self._Components , self._Sim , cluster_labels)
Index[i] = _stability_index(self._Sim , cluster_labels)
## Sort the centrotypes (i.e final components) by stability index
indices = np.argsort(-1*Index)
Centrotypes , Index = Centrotypes[indices , :] , Index[indices]
# Re-oriente the stabilized ICA components towards positive heaviest tails
if reorientation :
self.S_ = (np.where(stats.skew(Centrotypes , axis = 1) >= 0 , 1 , -1).reshape(-1 , 1))*Centrotypes
else :
self.S_ = Centrotypes
# Normalize the stabilized ICA components to unit variance
if normalize :
self.S_ = self.S_/(np.std(self.S_ , axis = 1).reshape(-1 ,1))
self.stability_indexes_ = Index
self.A_ = (X.T).dot(np.linalg.pinv(self.S_))
#self.A_ = np.dot(X.T , np.linalg.pinv(self.S_))
if plot:
plt.figure(figsize=(10 , 7))
plt.plot(range(1 , self.n_components + 1) , self.stability_indexes_ , linestyle='--', marker='o')
plt.title("Stability of ICA components")
plt.xlabel("ICA components")
plt.ylabel("Stability index")
return
def projection(self , method = "mds" , ax = None):
"""Plot the ICA components computed during fit() (N = self.n_components*n_runs) in 2D.
Approximate the original dissimilarities between components by Euclidean distance.
Each cluster is represented with a different color.
Note : We use the dissimilarity measure sqrt(1 - |rho_ij|) (rho the Pearson correlation)
instead of 1 - |rho_ij| to reduce overlapping.
Parameters
----------
method : string, optional
name of the dimensionality reduction method (e.g "tsne" , "mds" or "umap")
The default is "umap".
ax : matplotlib.axes, optional
The default is None.
Returns
-------
None.
Note
-------
Please note that multidimensional scaling (MDS) is more computationally demanding than t-SNE or UMAP.
However it takes into account the global structures of the data set while the others don't. For t-SNE or
UMAP one cannot really interpret the inter-cluster distances.
For more details about the UMAP (Uniform Manifold Approximation and Projection),
see https://pypi.org/project/umap-learn/
"""
if ax is None :
fig , ax = plt.subplots(figsize = (10 , 6))
elif not isinstance(ax , matplotlib.axes.Axes) :
warnings.warn("ax should be a matplotlib.axes.Axes object. It was redefined by default.")
fig , ax = plt.subplots(figsize = (10 , 6))
if method == "tsne":
embedding = manifold.TSNE(n_components = 2 , metric = "precomputed")
elif method == "mds" :
embedding = manifold.MDS(n_components=2, dissimilarity= "precomputed" , n_jobs = -1)
elif method == "umap" :
embedding = umap.UMAP(n_components = 2 , metric = "precomputed" )
P = embedding.fit_transform(
|
np.sqrt(1 - self._Sim)
|
numpy.sqrt
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import openvino.opset8 as ov
from openvino.impl import Shape, Type
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, range_start, range_end",
[
(ov.absolute, np.abs, -1, 1),
(ov.abs, np.abs, -1, 1),
(ov.acos, np.arccos, -1, 1),
(ov.acosh, np.arccosh, 1, 2),
(ov.asin, np.arcsin, -1, 1),
(ov.asinh, np.arcsinh, -1, 1),
(ov.atan, np.arctan, -100.0, 100.0),
(ov.atanh, np.arctanh, 0.0, 1.0),
(ov.ceiling, np.ceil, -100.0, 100.0),
(ov.ceil, np.ceil, -100.0, 100.0),
(ov.cos, np.cos, -100.0, 100.0),
(ov.cosh, np.cosh, -100.0, 100.0),
(ov.exp, np.exp, -100.0, 100.0),
(ov.floor, np.floor, -100.0, 100.0),
(ov.log, np.log, 0, 100.0),
(ov.relu, lambda x: np.maximum(0, x), -100.0, 100.0),
(ov.sign, np.sign, -100.0, 100.0),
(ov.sin, np.sin, -100.0, 100.0),
(ov.sinh, np.sinh, -100.0, 100.0),
(ov.sqrt, np.sqrt, 0.0, 100.0),
(ov.tan, np.tan, -1.0, 1.0),
(ov.tanh, np.tanh, -100.0, 100.0),
],
)
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32)
expected = numpy_fn(input_data)
result = run_op_node([input_data], ng_api_fn)
assert np.allclose(result, expected, rtol=0.001)
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, input_data",
[
pytest.param(ov.absolute, np.abs, np.float32(-3)),
pytest.param(ov.abs, np.abs, np.float32(-3)),
pytest.param(ov.acos, np.arccos, np.float32(-0.5)),
pytest.param(ov.asin, np.arcsin, np.float32(-0.5)),
pytest.param(ov.atan, np.arctan, np.float32(-0.5)),
pytest.param(ov.ceiling, np.ceil, np.float32(1.5)),
pytest.param(ov.ceil, np.ceil, np.float32(1.5)),
pytest.param(ov.cos, np.cos, np.float32(np.pi / 4.0)),
pytest.param(ov.cosh, np.cosh, np.float32(np.pi / 4.0)),
pytest.param(ov.exp, np.exp, np.float32(1.5)),
pytest.param(ov.floor, np.floor, np.float32(1.5)),
pytest.param(ov.log, np.log, np.float32(1.5)),
pytest.param(ov.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),
pytest.param(ov.sign, np.sign, np.float32(0.0)),
pytest.param(ov.sin, np.sin, np.float32(np.pi / 4.0)),
pytest.param(ov.sinh, np.sinh,
|
np.float32(0.0)
|
numpy.float32
|
from re import T
import matplotlib.pyplot as plt
import random
import numpy as np
import pandas as pd
import concurrent
import pandas as pd
import seaborn as sns
from sklearn import datasets
from sklearn import manifold
import sklearn
import tqdm
import glob
from PIL import Image
from typing import *
from concurrent.futures import ProcessPoolExecutor
from types import SimpleNamespace
import os
from pathlib import Path
def ifnone(a, b):
"""
Return if None
"""
return b if a is None else a
def listify(o):
"""
Convert to list
"""
if o is None:
return []
if isinstance(o, list):
return o
if isinstance(o, str):
return [o]
if isinstance(o, Iterable):
return list(o)
return [o]
def num_cpus() -> int:
"Get number of cpus"
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
_default_cpus = max(16, num_cpus())
defaults = SimpleNamespace(
cpus=_default_cpus, cmap="viridis", return_fig=False, silent=False
)
def parallel(func, arr: Collection, max_workers: int = None, leave=False): # %t
"Call `func` on every element of `arr` in parallel using `max_workers`."
max_workers = ifnone(max_workers, defaults.cpus)
if max_workers < 2:
results = [
func(o, i)
for i, o in tqdm.tqdm(enumerate(arr), total=len(arr), leave=leave)
]
else:
with ProcessPoolExecutor(max_workers=max_workers) as ex:
futures = [ex.submit(func, o, i) for i, o in enumerate(arr)]
results = []
for f in tqdm.tqdm(
concurrent.futures.as_completed(futures), total=len(arr), leave=leave
):
results.append(f.result())
if any([o is not None for o in results]):
return results
def get_label(fname, *args, **kwargs):
return Path(fname).parent.name
def run_clustering(mnist_tr, files,nclust):
from sklearn.cluster import KMeans
km = KMeans(n_clusters = nclust).fit_predict(mnist_tr)
df = pd.DataFrame.from_dict({x:km[i] for i,x in enumerate(files)}.items())
df.to_csv("output_cluster.csv")
return df
def plot_unsupervised(mnist_tr, features, image_size):
tx, ty = mnist_tr[:, 0], mnist_tr[:, 1]
tx = (tx-np.min(tx)) / (np.max(tx) - np.min(tx))
ty = (ty-np.min(ty)) / (np.max(ty) -
|
np.min(ty)
|
numpy.min
|
from __future__ import print_function, division, absolute_import
import time
import multiprocessing
import pickle
from collections import defaultdict
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import six.moves as sm
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import imgaug as ia
import imgaug.multicore as multicore
import imgaug.random as iarandom
from imgaug import augmenters as iaa
from imgaug.testutils import reseed
from imgaug.augmentables.batches import Batch, UnnormalizedBatch
class TestPool(unittest.TestCase):
def setUp(self):
reseed()
def test___init___seed_out_of_bounds(self):
augseq = iaa.Noop()
with self.assertRaises(AssertionError) as context:
_ = multicore.Pool(augseq, seed=iarandom.SEED_MAX_VALUE + 100)
assert "Expected `seed` to be" in str(context.exception)
def test_property_pool(self):
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.close.return_value = None
mock_Pool.join.return_value = None
with mock.patch("multiprocessing.Pool", mock_Pool):
augseq = iaa.Noop()
pool_config = multicore.Pool(
augseq, processes=1, maxtasksperchild=4, seed=123)
with pool_config as pool:
assert pool.processes == 1
assert pool._pool is None
assert mock_Pool.call_count == 1
assert mock_Pool.close.call_count == 1
assert mock_Pool.join.call_count == 1
assert mock_Pool.call_args[0][0] == 1 # processes
assert mock_Pool.call_args[1]["initargs"] == (augseq, 123)
assert mock_Pool.call_args[1]["maxtasksperchild"] == 4
def test_processes(self):
augseq = iaa.Noop()
mock_Pool = mock.MagicMock()
mock_cpu_count = mock.Mock()
patch_pool = mock.patch("multiprocessing.Pool", mock_Pool)
patch_cpu_count = mock.patch("multiprocessing.cpu_count",
mock_cpu_count)
with patch_pool, patch_cpu_count:
# (cpu cores available, processes requested, processes started)
combos = [
(1, 1, 1),
(2, 1, 1),
(3, 1, 1),
(1, 2, 2),
(3, 2, 2),
(1, None, None),
(2, None, None),
(3, None, None),
(1, -1, 1),
(2, -1, 1),
(3, -1, 2),
(4, -2, 2)
]
for cores_available, processes_req, expected in combos:
with self.subTest(cpu_count_available=cores_available,
processes_requested=processes_req):
mock_cpu_count.return_value = cores_available
with multicore.Pool(augseq,
processes=processes_req) as _pool:
pass
if expected is None:
assert mock_Pool.call_args[0][0] is None
else:
assert mock_Pool.call_args[0][0] == expected
@mock.patch("multiprocessing.cpu_count")
@mock.patch("multiprocessing.Pool")
def test_cpu_count_does_not_exist(self, mock_pool, mock_cpu_count):
def _side_effect():
raise NotImplementedError
mock_cpu_count.side_effect = _side_effect
augseq = iaa.Noop()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
with multicore.Pool(augseq, processes=-1):
pass
assert mock_cpu_count.call_count == 1
assert mock_pool.call_count == 1
# 'processes' arg to Pool was expected to be set to None as cpu_count
# produced an error
assert mock_pool.call_args_list[0][0][0] is None
assert len(caught_warnings) == 1
assert (
"Could not find method multiprocessing.cpu_count(). "
in str(caught_warnings[-1].message))
@classmethod
def _test_map_batches_both(cls, call_async):
for clazz in [Batch, UnnormalizedBatch]:
augseq = iaa.Noop()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.map.return_value = "X"
mock_Pool.map_async.return_value = "X"
with mock.patch("multiprocessing.Pool", mock_Pool):
batches = [
clazz(images=[ia.quokka()]),
clazz(images=[ia.quokka()+1])
]
with multicore.Pool(augseq, processes=1) as pool:
if call_async:
_ = pool.map_batches_async(batches)
else:
_ = pool.map_batches(batches)
if call_async:
to_check = mock_Pool.map_async
else:
to_check = mock_Pool.map
assert to_check.call_count == 1
# args, arg 0
assert to_check.call_args[0][0] == multicore._Pool_starworker
# args, arg 1 (batches with ids), tuple 0,
# entry 0 in tuple (=> batch id)
assert to_check.call_args[0][1][0][0] == 0
# args, arg 1 (batches with ids), tuple 0,
# entry 1 in tuple (=> batch)
assert np.array_equal(
to_check.call_args[0][1][0][1].images_unaug,
batches[0].images_unaug)
# args, arg 1 (batches with ids), tuple 1,
# entry 0 in tuple (=> batch id)
assert to_check.call_args[0][1][1][0] == 1
# args, arg 1 (batches with ids), tuple 1,
# entry 1 in tuple (=> batch)
assert np.array_equal(
to_check.call_args[0][1][1][1].images_unaug,
batches[1].images_unaug)
def test_map_batches(self):
self._test_map_batches_both(call_async=False)
def test_map_batches_async(self):
self._test_map_batches_both(call_async=True)
@classmethod
def _test_imap_batches_both(cls, call_unordered):
for clazz in [Batch, UnnormalizedBatch]:
batches = [clazz(images=[ia.quokka()]),
clazz(images=[ia.quokka()+1])]
def _generate_batches():
for batch in batches:
yield batch
augseq = iaa.Noop()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.imap.return_value = batches
mock_Pool.imap_unordered.return_value = batches
with mock.patch("multiprocessing.Pool", mock_Pool):
with multicore.Pool(augseq, processes=1) as pool:
gen = _generate_batches()
if call_unordered:
_ = list(pool.imap_batches_unordered(gen))
else:
_ = list(pool.imap_batches(gen))
if call_unordered:
to_check = mock_Pool.imap_unordered
else:
to_check = mock_Pool.imap
assert to_check.call_count == 1
assert to_check.call_args[0][0] == multicore._Pool_starworker
# convert generator to list, make it subscriptable
arg_batches = list(to_check.call_args[0][1])
# args, arg 1 (batches with ids), tuple 0,
# entry 0 in tuple (=> batch id)
assert arg_batches[0][0] == 0
# tuple 0, entry 1 in tuple (=> batch)
assert np.array_equal(
arg_batches[0][1].images_unaug,
batches[0].images_unaug)
# tuple 1, entry 0 in tuple (=> batch id)
assert arg_batches[1][0] == 1
# tuple 1, entry 1 in tuple (=> batch)
assert np.array_equal(
arg_batches[1][1].images_unaug,
batches[1].images_unaug)
@classmethod
def _test_imap_batches_both_output_buffer_size(cls, call_unordered,
timeout=0.075):
batches = [
ia.Batch(images=[np.full((1, 1), i, dtype=np.uint8)])
for i in range(8)]
def _generate_batches(times):
for batch in batches:
yield batch
times.append(time.time())
def callfunc(pool, gen, output_buffer_size):
func = (
pool.imap_batches_unordered
if call_unordered
else pool.imap_batches
)
for v in func(gen, output_buffer_size=output_buffer_size):
yield v
def contains_all_ids(inputs):
arrs = np.uint8([batch.images_aug for batch in inputs])
ids_uq = np.unique(arrs)
return (
len(ids_uq) == len(batches)
and np.all(0 <= ids_uq)
and np.all(ids_uq < len(batches))
)
augseq = iaa.Noop()
with multicore.Pool(augseq, processes=1) as pool:
# no output buffer limit, there should be no noteworthy lag
# for any batch requested from _generate_batches()
times = []
gen = callfunc(pool, _generate_batches(times), None)
result = next(gen)
time.sleep(timeout)
result = [result] + list(gen)
times = np.float64(times)
times_diffs = times[1:] - times[0:-1]
assert np.all(times_diffs < timeout)
assert contains_all_ids(result)
# with output buffer limit, but set to the number of batches,
# i.e. should again not lead to any lag
times = []
gen = callfunc(pool, _generate_batches(times), len(batches))
result = next(gen)
time.sleep(timeout)
result = [result] + list(gen)
times = np.float64(times)
times_diffs = times[1:] - times[0:-1]
assert np.all(times_diffs < timeout)
assert contains_all_ids(result)
# With output buffer limit of #batches/2 (=4), followed by a
# timeout after starting the loading process. This should quickly
# load batches until the buffer is full, then wait until the
# batches are requested from the buffer (i.e. after the timeout
# ended) and then proceed to produce batches at the speed at which
# they are requested. This should lead to a measureable lag between
# batch 4 and 5 (matching the timeout).
times = []
gen = callfunc(pool, _generate_batches(times), 4)
result = next(gen)
time.sleep(timeout)
result = [result] + list(gen)
times = np.float64(times)
times_diffs = times[1:] - times[0:-1]
# use -1 here because we have N-1 times for N batches as
# diffs denote diffs between Nth and N+1th batch
assert np.all(times_diffs[0:4-1] < timeout)
assert np.all(times_diffs[4-1:4-1+1] >= timeout)
assert np.all(times_diffs[4-1+1:] < timeout)
assert contains_all_ids(result)
def test_imap_batches(self):
self._test_imap_batches_both(call_unordered=False)
def test_imap_batches_unordered(self):
self._test_imap_batches_both(call_unordered=True)
def test_imap_batches_output_buffer_size(self):
self._test_imap_batches_both_output_buffer_size(call_unordered=False)
def test_imap_batches_unordered_output_buffer_size(self):
self._test_imap_batches_both_output_buffer_size(call_unordered=True)
@classmethod
def _assert_each_augmentation_not_more_than_once(cls, batches_aug):
sum_to_vecs = defaultdict(list)
for batch in batches_aug:
assert not np.array_equal(batch.images_aug[0], batch.images_aug[1])
vec = batch.images_aug.flatten()
vecsum = int(np.sum(vec))
if vecsum in sum_to_vecs:
for other_vec in sum_to_vecs[vecsum]:
assert not np.array_equal(vec, other_vec)
else:
sum_to_vecs[vecsum].append(vec)
def test_augmentations_with_seed_match(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
batch = ia.Batch(images=np.uint8([image, image]))
batches = [batch.deepcopy() for _ in sm.xrange(60)]
# seed=1
with multicore.Pool(augseq, processes=2, maxtasksperchild=30,
seed=1) as pool:
batches_aug1 = pool.map_batches(batches, chunksize=2)
# seed=1
with multicore.Pool(augseq, processes=2, seed=1) as pool:
batches_aug2 = pool.map_batches(batches, chunksize=1)
# seed=2
with multicore.Pool(augseq, processes=2, seed=2) as pool:
batches_aug3 = pool.map_batches(batches, chunksize=1)
assert len(batches_aug1) == 60
assert len(batches_aug2) == 60
assert len(batches_aug3) == 60
for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
# images were augmented
assert not np.array_equal(b1.images_unaug, b1.images_aug)
assert not np.array_equal(b2.images_unaug, b2.images_aug)
assert not np.array_equal(b3.images_unaug, b3.images_aug)
# original images still the same
assert np.array_equal(b1.images_unaug, batch.images_unaug)
assert np.array_equal(b2.images_unaug, batch.images_unaug)
assert np.array_equal(b3.images_unaug, batch.images_unaug)
# augmentations for same seed are the same
assert np.array_equal(b1.images_aug, b2.images_aug)
# augmentations for different seeds are different
assert not np.array_equal(b1.images_aug, b3.images_aug)
# make sure that batches for the two pools with same seed did not
# repeat within results (only between the results of the two pools)
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_with_seed_match_for_images_and_keypoints(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
# keypoints here will not be changed by augseq, but they will induce
# deterministic mode to start in augment_batches() as each batch
# contains images AND keypoints
kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
batches = [batch.deepcopy() for _ in sm.xrange(60)]
# seed=1
with multicore.Pool(augseq, processes=2, maxtasksperchild=30,
seed=1) as pool:
batches_aug1 = pool.map_batches(batches, chunksize=2)
# seed=1
with multicore.Pool(augseq, processes=2, seed=1) as pool:
batches_aug2 = pool.map_batches(batches, chunksize=1)
# seed=2
with multicore.Pool(augseq, processes=2, seed=2) as pool:
batches_aug3 = pool.map_batches(batches, chunksize=1)
assert len(batches_aug1) == 60
assert len(batches_aug2) == 60
assert len(batches_aug3) == 60
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
for batch in batches_aug:
for keypoints_aug in batch.keypoints_aug:
assert keypoints_aug.keypoints[0].x == 2
assert keypoints_aug.keypoints[0].y == 0
for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
# images were augmented
assert not np.array_equal(b1.images_unaug, b1.images_aug)
assert not
|
np.array_equal(b2.images_unaug, b2.images_aug)
|
numpy.array_equal
|
import warnings
from general.base import Struct
import matplotlib.pylab as pl
import numpy as np
import scipy.stats as st
from uq.gpc import GPC
from uq.samples import Samples
rv_list={
'arcsine': Struct(name='arcsine',
fun=st.arcsine,
supp=
|
np.array([0, 1], dtype=np.float)
|
numpy.array
|
import numpy as np
import os
import matplotlib
import tkinter as tk
import tkinter.messagebox
# Set the matplotlib backend to tk figures.
# If imported, cannot plot to regular matplotlib figures!
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from seas.signalanalysis import lag_n_autocorr, sort_noise, short_time_fourier_transform
from seas.waveletAnalysis import waveletAnalysis
from seas.hdf5manager import hdf5manager
from seas.defaults import config
from seas.ica import rebuild_eigenbrain
from seas.domains import get_domain_map, domain_map, get_domain_edges
def run_gui(components, rotate=0, savepath=None, default_assignment=None):
'''
Create a tkinter GUI to select noise components from ica-decomposition components file.
Returns toggle, a boolean array
of either True or False. Components that have been determined to be
noise are 'True', components to keep are 'False'.
Optional toggle input is a boolean of starting values
for noise_components.
'''
print('\nStarting ICA Component Selection GUI\n-----------------------')
print(
'If you experience problems with this GUI, check your tk version with the following terminal command:'
)
print('\tpython -m tkinter')
print(
'This will launch a GUI that tells you your tk version. Make sure it is >=8.6'
)
print(
"If installed using conda, use 'conda install -c anaconda tk' to update"
)
if type(components) is str:
f = hdf5manager(components)
print('loading components...')
components = f.load()
load_hdf5 = True
else:
load_hdf5 = False
if savepath is not None:
f = hdf5manager(savepath)
load_hdf5 = True
assert type(components) is dict, 'Components were not in expected format'
# load all components from dict
eig_vec = components['eig_vec']
if 'thresholds' in components.keys():
thresholds = components['thresholds']
roimask = components['roimask']
shape = components['shape']
t, x, y = shape
eigb_shape = (x, y)
# Find number of components
n_components = eig_vec.shape[1]
print('number of components:', n_components)
# start timecourses variable for storing rebuilt timecourses of PCs
if 'timecourses' in components:
print('timecourses found')
timecourses = components['timecourses']
else:
print('timecourses not found in components')
print('Initializing empty timecourse vector')
timecourses = components['timecourses']
# start noise_components variable for listing which components are noise
if ('noise_components' in components) and ('cutoff' in components):
noise_components = components['noise_components']
cutoff = components['cutoff']
else:
print('calculating noise components...')
noise_components, cutoff = sort_noise(timecourses)
maxval = np.argmax(noise_components == 1)
if 'lag1_full' in components:
lag1 = components['lag1_full']
_, _, log_pdf = sort_noise(timecourses, lag1=lag1, return_logpdf=True)
else:
lag1 = lag_n_autocorr(timecourses, 1, verbose=False)
_, _, log_pdf = sort_noise(timecourses, return_logpdf=True)
# start toggle variable for checking which components shouldn't
# be included
if 'artifact_components' in components:
toggle = components['artifact_components']
else:
print('initializing artifact_components toggle')
toggle = np.zeros((n_components,), dtype='uint8')
if 'flipped' in components:
flipped = components['flipped']
timecourses = timecourses * flipped[:, None]
eig_vec = eig_vec * flipped
if 'domain_ROIs' in components:
domain_ROIs = components['domain_ROIs']
if rotate > 0:
domain_ROIs = np.rot90(domain_ROIs, rotate)
if 'region_assignment' in components:
region_assignment = components['region_assignment']
else:
print('initializing region_assignment vector')
n_domains = int(np.nanmax(domain_ROIs) + 1)
region_assignment = np.zeros((n_domains,))
if default_assignment is not None:
try:
region_assignment += default_assignment
except:
raise TypeError('Region Assignment was invalid {0}'.format(
default_assignment))
else:
region_assignment[:] = np.nan
else:
print('domain_ROIs not found')
domain_ROIs = None
# Load mask indexing for faster rebuilding of eigenbrains
if roimask is not None:
maskind = np.where(roimask.flat == 1)
else:
maskind = None
region_cm = config['colormap']['domains']
component_cmap = config['colormap']['components']
corr_cmap = config['colormap']['correlation']
regions = config['regions']
# convert from defaults dict to sorted list of tuples
keylist = []
valuelist = []
for key in regions:
# print(key, regions[key])
keylist.append(key)
valuelist.append(regions[key])
sortindex = [i[0] for i in sorted(enumerate(valuelist), key=lambda x: x[1])]
regions = []
for i in sortindex:
regions.append((keylist[i], valuelist[i]))
# general font settings
LARGE_FONT = ('Verdana', 12)
togglesave = [True] # default on for easy click-to-save figures
toggledebug = [False] # default off -- don't load ICA pixel properties
def saveFigure(fig_handle):
print('figure was pressed!')
# callback for clicks on image.
# Change color and toggle value
if togglesave[0]:
print('trying to save..')
file_path = tk.filedialog.asksaveasfilename()
print(file_path)
if type(file_path) is str: # If path was provided
# If there was no extension, add .png
if os.path.splitext(file_path)[1] == '':
file_path += '.png'
if os.path.isfile(file_path):
print('file already exists!')
yn = tk.messagebox.askquestion(
'Overwriting File',
'The following file already exists, would you '
'like to overwrite it?' + '\n' + file_path)
if yn == 'yes':
fig_handle.savefig(file_path)
print('File saved to:', file_path)
else:
print('Not overwriting')
else:
print('file doesnt exist (yet)')
fig_handle.savefig(file_path)
print('File saved to:', file_path)
else:
print('No file selected')
else:
print('Saving functionality is turned off')
# Create main application as tk.Tk
class PCAgui(tk.Tk): #<- inherits from Tk class
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs) # initialize the tk class
tk.Tk.wm_title(self, 'Component Viewer')
# every page initializes a container to hold its content
container = tk.Frame(self)
container.pack(side='top', fill='both', expand=True)
# set page expansion properties
container.grid_rowconfigure(0, weight=1)
# row/columns expand equally
container.grid_columnconfigure(0, weight=1)
# Make the menu bar (top banner)
menubar = tk.Menu(container)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_separator()
filemenu.add_command(label='save', command=self.quit)
filemenu.add_command(label='exit',
command=lambda: self.cancelcallback(toggle))
menubar.add_cascade(label='file', menu=filemenu)
def toggleFigSaving():
togglesave[0] = not togglesave[0]
def toggleIcaDebug():
toggledebug[0] = not toggledebug[0]
editmenu = tk.Menu(menubar, tearoff=0)
editmenu.add_separator()
editmenu.add_command(label='toggle figure saving',
command=lambda: toggleFigSaving())
editmenu.add_command(label='toggle ica pixel debug',
command=lambda: toggleIcaDebug())
menubar.add_cascade(label='edit', menu=editmenu)
pagemenu = tk.Menu(menubar, tearoff=1)
pagemenu.add_separator()
pagemenu.add_command(label='view components',
command=lambda: self.show_frame(PCpage))
pagemenu.add_command(label='PC information',
command=lambda: self.show_frame(PCinfo))
pagemenu.add_command(label='Domain Correlations',
command=lambda: self.show_frame(DomainROIs))
pagemenu.add_command(label='Domain Region Assignment',
command=lambda: self.show_frame(DomainRegions))
pagemenu.add_command(label='Domain Autocorrelations',
command=lambda: self.show_frame(PCautocorr))
menubar.add_cascade(label='view', menu=pagemenu)
tk.Tk.config(self, menu=menubar)
# Create container to hold for all pages for page switching
self.frames = {}
# List all pages here:
for F in (PCpage, PCinfo, DomainROIs, DomainRegions, PCautocorr):
frame = F(container, self)
self.frames[F] = frame
# set currently active frame to StartPage
frame.grid(row=0, column=0, sticky='nsew')
# Initialize default page
default_page = PCpage
self.show_frame(default_page)
# Global event binding commands
self.bind("<Escape>", lambda event: self.cancelcallback(toggle))
self.bind("s", lambda event: self.quit())
self.bind("<F1>", lambda event: self.show_frame(PCpage))
self.bind("<F2>", lambda event: self.show_frame(PCinfo))
self.bind("<F3>", lambda event: self.show_frame(DomainROIs))
self.bind("<F4>", lambda event: self.show_frame(DomainRegions))
self.bind("<F5>", lambda event: self.show_frame(PCautocorr))
# Use global focus to capture bindings,
# send them to local callback manager
self.bind("<Right>",
lambda event: self.current_page.callback_manager(event))
self.bind("<Left>",
lambda event: self.current_page.callback_manager(event))
self.bind("<Up>",
lambda event: self.current_page.callback_manager(event))
self.bind("<Down>",
lambda event: self.current_page.callback_manager(event))
# When window is closed, cancel and don't save results.
self.protocol('WM_DELETE_WINDOW',
lambda: self.cancelcallback(toggle))
# Methods for main app:
def show_frame(self, cont):
# selects frame, raises it
frame = self.frames[cont]
self.current_page = frame
frame.tkraise()
def cancelcallback(self, toggle):
# quits the app, doesn't rebuild the movie
print('Operation was cancelled.')
toggle[0] = 100
self.quit()
# Create each frame and components as individual classes:
class PCpage(tk.Frame):
# Page to view all principal component images, toggle via clicks
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent) # parent is controller
# Create frame for PCs, initialize PC indices
self.nimages = 15
self.PCpage = 0
# initialize the title
label = tk.Label(self,
text='Select Components to Remove:',
font=LARGE_FONT)
# initialize navigation buttons
navbuttons = tk.Frame(self)
self.llbutton = tk.Button(
navbuttons,
text='<<< {0} PCs'.format(4 * self.nimages),
command=lambda: self.changePCpage(self.PCpage - 4))
self.lbutton = tk.Button(
navbuttons,
text='<<< {0} PCs'.format(self.nimages),
command=lambda: self.changePCpage(self.PCpage - 1))
self.homebutton = tk.Button(navbuttons,
text='Home',
command=lambda: self.changePCpage(0))
self.rbutton = tk.Button(
navbuttons,
text='{0} PCs >>>'.format(self.nimages),
command=lambda: self.changePCpage(self.PCpage + 1))
self.rrbutton = tk.Button(
navbuttons,
text='{0} PCs >>>'.format(4 * self.nimages),
command=lambda: self.changePCpage(self.PCpage + 4))
# Make frame for pc max value controller
pccontrol = tk.Frame(self)
upper_limit = tk.StringVar()
upper_limit.set(str(maxval))
maxentry = tk.Entry(pccontrol, textvariable=upper_limit, width=5)
entrylabel = tk.Label(pccontrol, text='Noise Floor:')
# place the title
label.grid(column=0, row=0)
# place the navigation buttons and load panel
self.llbutton.grid(column=0, row=0)
self.lbutton.grid(column=1, row=0)
self.homebutton.grid(column=2, row=0)
self.rbutton.grid(column=3, row=0)
self.rrbutton.grid(column=4, row=0)
navbuttons.grid(column=0, row=2)
# place max pc control panel
entrylabel.grid(column=0, row=0)
maxentry.grid(column=1, row=0)
pccontrol.grid(column=0, row=3)
self.loadPCpage()
def callback_manager(self, event):
if event.keysym == 'Right':
if len(toggle) - self.nimages * (self.PCpage + 1) > 0:
self.changePCpage(self.PCpage + 1)
elif event.keysym == 'Left':
if self.PCpage != 0:
self.changePCpage(self.PCpage - 1)
else:
print('No callback defined for:', event.keysym)
class PCframe(tk.Frame):
# Create a frame to hold all eigenbrains
def __init__(self, parent, indices):
tk.Frame.__init__(self, parent)
# variables to hold image buttons
self.ncol = 5
self.PCplotframe = tk.Frame(self, borderwidth=2)
self.imagebutton = []
# initialize and grid image buttons
for i, n in enumerate(indices):
frame = (tk.Frame(self.PCplotframe))
self.imagebutton.append(self.imButton(frame, n))
c = i % self.ncol
r = i // self.ncol
frame.grid(row=r, column=c)
self.PCplotframe.grid(column=0, row=1) # grid main frame
def update(self, parent, indices):
# update each image button to contain indices given
imagebutton = self.imagebutton
#if there isn't a full set of indices, append None
if len(indices) < len(imagebutton):
print('Not enough indices to fill page')
lendiff = len(imagebutton) - len(indices)
indices.extend([None] * lendiff)
# update each image with new PC index
for i, buttonhandle in enumerate(imagebutton):
buttonhandle.update(indices[i])
class imButton(tk.Frame):
# Image button. When clicked, toggle[index] is switched.
# Colormap is also switched to see status of PC
def __init__(self, parent, pc_id):
self.pc_id = pc_id
f = Figure(figsize=(3, 3), dpi=100, frameon=False)
self.ax = f.add_subplot(111)
f.subplots_adjust(left=0.05,
bottom=0.05,
right=0.98,
top=0.98)
self.canvas = FigureCanvasTkAgg(f, parent)
self.canvas.get_tk_widget().grid(row=1, column=1)
self.canvas.mpl_connect(
'button_press_event',
lambda event: self.on_key_press(event))
self.imgplot = self.ax.imshow(np.zeros((x, y)))
self.ax.axis('off')
self.update(pc_id)
def update(self, pc_id):
self.pc_id = pc_id
self.ax.cla()
self.ax.axis('off')
if pc_id is None: # clear image
im = np.empty((x, y))
im[:] = np.NAN
self.imgplot = self.ax.imshow(im)
self.canvas.draw()
return ()
eigenbrain = rebuild_eigenbrain(eig_vec, pc_id, roimask,
eigb_shape, maskind)
if rotate > 0:
eigenbrain = np.rot90(eigenbrain, rotate)
mean = np.nanmean(eigenbrain)
std = np.abs(np.nanstd(eigenbrain))
self.imgplot = self.ax.imshow(eigenbrain,
cmap=corr_cmap,
vmin=mean - 4 * std,
vmax=mean + 4 * std)
if toggle[pc_id] == 0:
self.imgplot.set_cmap(component_cmap)
else:
self.imgplot.set_cmap('Greys')
self.ax.annotate('Component {0}'.format(pc_id),
color='grey',
fontsize=10,
xy=(1, 1))
self.canvas.draw()
def on_key_press(self, event):
# callback for clicks on image.
# Change color and toggle value
try:
if toggle[self.pc_id] == 0:
self.imgplot.set_cmap('Greys_r')
toggle[self.pc_id] = 1
elif toggle[self.pc_id] == 1:
self.imgplot.set_cmap(component_cmap)
toggle[self.pc_id] = 0
self.canvas.draw()
except:
print('Index was out of range')
def changePCpage(self, newpage):
# callback for buttons to change page
self.PCpage = newpage
self.loadPCpage()
def loadPCpage(self):
# load a PC page based on self.PCpage
# Reenable all buttons, disable any that shouldn't be allowed
self.llbutton.config(state=['normal'])
self.lbutton.config(state=['normal'])
self.homebutton.config(state=['normal'])
self.rbutton.config(state=['normal'])
self.rrbutton.config(state=['normal'])
if self.PCpage == 0: # if on start page, disable home, -n
self.homebutton.config(state=['disabled'])
self.lbutton.config(state=['disabled'])
self.llbutton.config(state=['disabled'])
elif self.PCpage < 4:
self.llbutton.config(state=['disabled'])
if len(toggle) - (self.nimages * (self.PCpage + 1)) <= 0:
# if total images - next page's max <=0, disable + n
self.rbutton.config(state=['disabled'])
self.rrbutton.config(state=['disabled'])
elif len(toggle) - self.nimages * self.PCpage \
- 4*self.nimages <= 0:
self.rrbutton.config(state=['disabled'])
# Refresh the PC Grid
startPC = self.PCpage * self.nimages
endPC = startPC + self.nimages
if endPC > len(toggle):
endPC = len(toggle)
PCindices = list(range(startPC, endPC))
if hasattr(self, 'PCfigure'):
self.PCfigure.update(self, PCindices)
self.PCfigure.grid(column=0, row=1)
else:
self.PCfigure = self.PCframe(self, PCindices)
self.PCfigure.grid(column=0, row=1)
# grid PCfigure into StartPage
class PCinfo(tk.Frame):
# Page for viewing information about individual PCs
def __init__(self, parent, controller):
# Change the PC text box using the +/- keys.
# This triggers updatePCvalue to run
def updatePCval_button(delta):
newval = int(self.selected_pc.get()) + delta
self.selected_pc.set(newval)
# Change the PC index using the text box
def updatePCval():
newvalue = self.selected_pc.get()
print('Changing PC index to: {0}'.format(newvalue))
if newvalue == '': # empty value
print('Text box is blank. Not updating')
else:
try: # make sure entry is an int, inside range of PCs
assert int(newvalue) < n_components - 1, (
'Index exceeds range')
assert int(newvalue) >= 0, 'Index below 0'
self.pc_id[0] = int(newvalue)
self.selected_pc.set(str(self.pc_id[0]))
fig.updateFigures(self.pc_id[0])
except:
print('Not changing upper PC cutoff.')
self.selected_pc.set(str(self.pc_id[0]))
# reset text box to previous value
# Initialize PC info page
tk.Frame.__init__(self, parent)
label = tk.Label(self, text='Component Viewer:', font=LARGE_FONT)
label.pack(pady=10, padx=10)
# Two components to page:
pcviewer = tk.Frame(self)
# grid of figures about selected PC
pc_toolbar = tk.Frame(pcviewer)
# toolbar for selecting PC index
# Make PC selection toolbar
self.pc_id = [0]
self.selected_pc = tk.StringVar()
self.selected_pc.set(str(self.pc_id[0]))
pc_entry = tk.Entry(pc_toolbar,
textvariable=self.selected_pc,
width=5)
self.selected_pc.trace('w',
lambda nm, idx, mode, var=0: updatePCval())
pc_entry_label = tk.Label(pc_toolbar, text='Component:')
pm_toolbar = tk.Frame(pcviewer)
inc = tk.Button(pm_toolbar,
text='+',
command=lambda: updatePCval_button(1))
dec = tk.Button(pm_toolbar,
text='-',
command=lambda: updatePCval_button(-1))
# grid pc selector frame
pc_entry_label.grid(column=0, row=0)
pc_entry.grid(column=1, row=0)
inc.grid(column=0, row=0)
dec.grid(column=1, row=0)
# grid pcviewer frame
pc_toolbar.pack()
pm_toolbar.pack()
pcviewer.pack()
fig = self.PCfigures(self, self.pc_id)
fig.pack()
def callback_manager(self, event):
if event.keysym == 'Right':
newval = int(self.selected_pc.get()) + 1
self.selected_pc.set(newval)
elif event.keysym == 'Left':
newval = int(self.selected_pc.get()) - 1
self.selected_pc.set(newval)
else:
print('No callback defined for:', event.keysym)
class PCfigures(tk.Frame):
# Create class to hold and update all figures
def __init__(self, parent, controller):
# Create main frame, child of PCinfo page
tk.Frame.__init__(self, parent)
ncol = 2 # number of columns of figures
self.figures = {}
# List desired figures
figlist = [
self.pcImage, self.timeCourse, self.fourierWaveletHistogram,
self.waveletSpectrum
]
for i, Fig in enumerate(figlist):
# Not being used: self.fourierTimecourse
# self.fourierHistogram
figure = Fig(self) # initialize each figure
c = i % ncol
r = i // ncol
figure.grid(row=r, column=c) # grid it
self.figures[Fig] = figure
# store each handle in self.figures
# initialize figures for PC #0
self.updateFigures(0)
def updateFigures(self, pc_id):
# Update all figures in self.figures
self.timecourse = timecourses[pc_id]
eigenbrain = rebuild_eigenbrain(eig_vec, pc_id, roimask,
eigb_shape, maskind)
if rotate > 0:
eigenbrain = np.rot90(eigenbrain, rotate)
mean = np.nanmean(eigenbrain)
std = np.abs(np.nanstd(eigenbrain))
eigenbrain[np.where(
|
np.isnan(eigenbrain)
|
numpy.isnan
|
"""This module implements Experiments, which add analysis to types of Measurements
All experiments should have a background-subtracted ixdat ECMSMeasurement with
calibrated current and potential, and a mdict containing ixdat MSCalResult objects
which correctly calibrate the MS data of the meas.
The StandardExperiment is a constant-potential OER (or composite thereof) with
ICPMS-MS samples taken during the measurement. The sample is a labeled test sample or
un-labeled control sample and the electrolyte is natural, so all m/z=34 and m/z=36 is
excess lattice O. The StandardExperiment is only calibrated for O2, though at all
three isotopes, and gets the calibration factor from the trend in the project's
CalibrationSeries. """
from pathlib import Path
import json
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import gridspec
from ixdat.techniques.ec_ms import MSCalResult
from .constants import (
EXPERIMENT_DIR,
EXPERIMENT_ID_FILE,
STANDARD_ALPHA,
STANDARD_EXPERIMENT_TAGS,
FARADAY_CONSTANT,
)
from .tools import singleton_decorator, CounterWithFile
from .measurement import Measurement
from .calibration import CalibrationSeries
from .calc import calc_current
calibration_series = CalibrationSeries.load()
def all_experiments(experiment_dir=EXPERIMENT_DIR):
"""returns an iterator that yields experiments in order of their id"""
N_experiments = ExperimentCounter().last()
for n in range(1, N_experiments):
try:
measurement = Experiment.open(n, experiment_dir=experiment_dir)
except FileNotFoundError as e:
print(f"itermeasurement skipping {n} due to error = \n{e}")
else:
yield measurement
def all_standard_experiments(experiment_dir=EXPERIMENT_DIR):
N_experiments = ExperimentCounter().last()
for n in range(1, N_experiments):
try:
standard_experiment = StandardExperiment.open(
n, experiment_dir=experiment_dir
)
if standard_experiment.experiment_type.startswith("a"):
# then it is an activity experiment
raise TypeError("wrong type of experiment")
except (FileNotFoundError, TypeError) as e:
print(f"itermeasurement skipping {n} due to error = \n{e}")
else:
yield standard_experiment
def all_activity_experiments(experiment_dir=EXPERIMENT_DIR):
N_experiments = ExperimentCounter().last()
for n in range(1, N_experiments):
try:
activity_experiment = ActExperiment.open(n, experiment_dir=experiment_dir)
if not activity_experiment.experiment_type.startswith("a"):
raise TypeError("wrong type of experiment.")
except (FileNotFoundError, TypeError) as e:
print(f"itermeasurement skipping {n} due to error = \n{e}")
else:
yield activity_experiment
@singleton_decorator
class ExperimentCounter(CounterWithFile):
"""Counts measurements. 'id' increments the counter. 'last()' retrieves last id"""
_file = EXPERIMENT_ID_FILE
def open_experiment(e_id, experiment_dir=EXPERIMENT_DIR):
"""Open as the appropriate type of Experiment based on the experiment_type field"""
try:
path_to_file = next(
path
for path in Path(experiment_dir).iterdir()
if path.stem.startswith(f"e{e_id}")
)
except StopIteration:
raise FileNotFoundError(f"no standard experiment with id = e{e_id}")
return Experiment.open(e_id)
class Experiment:
"""Joins a pyOER measurement with extra metadata and methods for deriving results
This is a base class for more complex experiments, with methods and saveable
metadata (typically tspans) for subtracting background, plotting nicely,
calibrating signals, determining electrolyte isotopic composition, etc.
It also has a list of TOFs, which (separately) contain the metadata for deriving
specific results.
Inheriting classes will contain methods to
"""
def __init__(
self,
m_id,
experiment_type=None,
tspan_plot=None,
F=None,
alpha=None,
cap=None,
tspan_bg=None,
tspan_bg_current=None,
tspan_F=None,
tspan_alpha=None,
tspan_cap=None,
V_DL=(1.22, 1.3),
e_id=None,
**kwargs,
):
"""Initiate an experiment
Args:
m_id (int): The measurement id
experiment_type (str): Tag for the type of standard experiment. Options are
- standard experiments (activity + exchange + dissolution):
"y": "yes, purely systematic", # 30 minutes at one current density
"s": "starts systematic",
"k": "shortened systematic (<30 minutes)",
"c": "composite systematic" # one sample multiple current densities
- activity experiments (constant potential steps):
tspan_plot (timespan): The timespan in which to make the experiment
plot. If not given, the plot will use the measurement's tspan
F (float): The O2 sensitivity in [C/mol]. By default the experiment will
use the O2 sensitivity given by the CalibrationSeries represented in
TREND.json in the calibration directory
alhpa (float): The ^{16}O portion in the electrolyte. By default it takes
the natural value of 99.80%
cap (float): The capacitance- in [Farads], if known.
tspan_bg (timespan): The timespan to consider the background
tspan_F (timespan): The timespan from which O2 sensitivity (F) can be
calculated from the measurement
tspan_alpha (timespan): The timespan from which the isotopic composition of
the electrolyte (alpha) can be calculated form the measurement F is to
be calculated from the measurement
tspan_cap (timespan): The timespan from which capacitance can be measured
V_DL (list of float): The voltage range for capacitance calculation / [V]
plot_specs (dict): Additional specs for the plot, e.g. axis limits ("ylims")
e_id (int): The StandardExperiment's principle key
"""
self.m_id = m_id
self.experiment_type = experiment_type
self.measurement = Measurement.open(m_id)
self._meas = None
self.tspan_plot = tspan_plot
self.tspan_bg = tspan_bg
self.tspan_bg_current = tspan_bg_current
self.tspan_F = tspan_F
self.F_0 = F # for saving, so that if no F is given and the CalibrationSeries
# is updated, the updated CalibrationSeries will determine F upon loading.
self._F = None
self._mdict = {}
self.tspan_alpha = tspan_alpha
self.alpha_0 = alpha # for saving, so that if no alpha is given and the
# natural ratio is updated, this will determine alpha upon loading.
self._alpha = None
self._cap = cap
self.tspan_cap = tspan_cap
self.V_DL = V_DL
self._cap = None
self._icpms_points = None
self.id = e_id or ExperimentCounter().id
self.default_masses = ["M32", "M34", "M36"]
self._tofs = None
self.extra_stuff = kwargs
def as_dict(self):
return dict(
m_id=self.m_id,
experiment_type=self.experiment_type,
tspan_plot=self.tspan_plot,
tspan_bg=self.tspan_bg,
tspan_bg_current=self.tspan_bg_current,
tspan_F=self.tspan_F,
tspan_cap=self.tspan_cap,
tspan_alpha=self.tspan_alpha,
F=self.F_0,
alpha=self.alpha_0,
e_id=self.id,
)
def __repr__(self):
return f"e{self.id} is from m{self.m_id} of {self.measurement.sample_name}"
def save(self):
self_as_dict = self.as_dict()
file = EXPERIMENT_DIR / f"{self}.json"
with open(file, "w") as f:
json.dump(self_as_dict, f, indent=4)
@classmethod
def load(cls, file):
"""Load a standard experiment given the path to its json file."""
with open(file, "r") as f:
self_as_dict = json.load(f)
if "plot_specs" in self_as_dict and "ylims" in self_as_dict["plot_specs"]:
# json turns integer keys to strings. This fixes.
self_as_dict["plot_specs"]["ylims"] = {
int(s): ylim for s, ylim in self_as_dict["plot_specs"]["ylims"].items()
}
experiment_class = cls
if "experiment_type" in self_as_dict:
if self_as_dict["experiment_type"].startswith("a"):
experiment_class = ActExperiment
elif self_as_dict["experiment_type"] in STANDARD_EXPERIMENT_TAGS:
experiment_class = StandardExperiment
return experiment_class(**self_as_dict)
@classmethod
def open(cls, e_id, experiment_dir=EXPERIMENT_DIR):
try:
path_to_file = next(
path
for path in Path(experiment_dir).iterdir()
if path.stem.startswith(f"e{e_id}")
)
except StopIteration:
raise FileNotFoundError(f"no standard experiment with id = e{e_id}")
return cls.load(path_to_file)
@property
def meas(self):
"""The ixdat measurement with the experimental data"""
if not self._meas:
meas = self.measurement.meas
meas.calibrate(
RE_vs_RHE=self.measurement.RE_vs_RHE,
A_el=0.196,
)
if self.measurement.R_Ohm:
meas.correct_ohmic_drop(self.measurement.R_Ohm)
if self.tspan_bg:
meas.set_bg(self.tspan_bg)
self._meas = meas
return self._meas
@property
def beta(self):
"""Float: The m/z=34 to m/z=32 signal ratio from oxidation of the electrolyte"""
return 2 * (1 - self.alpha) / self.alpha
@property
def gamma(self):
"""Float: The m/z=34 to m/z=36 signal ratio from oxidation of the electrolyte"""
return 2 * self.alpha / (1 - self.alpha)
@property
def sample(self):
return self.measurement.sample
@property
def sample_name(self):
return self.measurement.sample_name
@property
def tofs(self):
if not self._tofs:
self.load_tofs()
return self._tofs
def load_tofs(self):
from .tof import all_tofs
self._tofs = [tof for tof in all_tofs() if tof.e_id == self.id]
@property
def mol_list(self):
return [f"O2_{mass}" for mass in self.mass_list]
@property
def mass_list(self):
if "18" in (self.measurement.isotope or ""):
mass_list = ["M34", "M36"]
elif "16" in (self.measurement.isotope or ""):
mass_list = ["M32"]
else:
print(f"The electrolyte isotope for '{self}' is not known!")
mass_list = ["M32", "M34", "M36"]
return mass_list
@property
def tof_sets(self):
from .tof import all_tof_sets
return [t_set for t_set in all_tof_sets() if t_set.experiment.id == self.id]
def calc_alpha(self, tspan=None):
"""Return fraction ^{16}O in the electrolyte based on tspan with steady OER"""
tspan = tspan or self.tspan_alpha
x_32, y_32 = self.meas.get_signal(mass="M32", tspan=tspan)
x_34, y_34 = self.meas.get_signal(mass="M34", tspan=tspan)
gamma = np.mean(y_34) / np.mean(y_32)
alpha = 2 / (2 + gamma)
return alpha
@property
def cap(self):
"""Capacitance in Farads"""
if not self._cap:
cap_cv = self.meas.cut(self.tspan_cap).as_cv()
self._cap = cap_cv.get_capacitance(V_DL=self.V_DL) * self.meas.A_el
# Farad/cm^2 * cm^2
return self._cap
@property
def ECSA(self):
"""Electrochemical surface area in [cm^2]"""
return self.cap / self.sample.specific_capacitance # Farad / (Farad/cm^2)
@property
def n_sites(self):
"""Number of sites in [mol]"""
return self.ECSA * self.sample.site_density # cm^2 * mol/cm^2
def populate_mdict(self):
"""Fill in self.mdict with the EC-MS.Molecules O2_M32, O2_M34, and O2_M36"""
for mass in ["M32", "M34", "M36"]:
m = MSCalResult(mol="O2", mass=mass, F=self.F)
self._mdict[f"O2_{mass}"] = m
@property
def mdict(self):
if not self._mdict:
self.populate_mdict()
return self._mdict
@property
def F(self):
if not self._F:
if self.tspan_F:
F = 0
for mass in self.mass_list:
try:
x, y = self.meas.grab(mass, tspan=self.tspan_F)
I = calc_current(self, tspan=self.tspan_F)
F_M = np.mean(y) / (I / (4 * FARADAY_CONSTANT))
F += F_M
except KeyError:
continue
elif self.F_0:
F = self.F_0
else:
F = calibration_series.F_of_tstamp(self.meas.tstamp)
self._F = F
return self._F
@property
def alpha(self):
if not self._alpha:
if self.tspan_alpha:
alpha = self.calc_alpha()
else:
alpha = self.alpha_0
self._alpha = alpha or STANDARD_ALPHA
return self._alpha
def calc_flux(self, mol, tspan, remove_background=True, **kwargs):
"""Return the flux for a calibrated mol (a key to self.mdict)"""
m = self.mdict[mol]
return self.meas.grab_flux(
m, tspan=tspan, remove_background=remove_background, **kwargs
)
def get_tofs(self):
"""Return a list of TOFS from the experiment"""
from .tof import all_tofs
tofs = [] # icpms points
for tof in all_tofs():
if tof.e_id == self.id:
tofs += [tof]
return tofs
def calc_background_current(self):
cap_cv = self.meas.cut(self.tspan_cap).as_cv()
sweep_1 = cap_cv.select_sweep(vspan=self.V_DL)
sweep_2 = cap_cv.select_sweep(vspan=[self.V_DL[-1], self.V_DL[0]])
I_1 = np.mean(sweep_1.grab("raw_current")[1]) * 1e-3 # [mA] -> [A]
I_2 = np.mean(sweep_2.grab("raw_current")[1]) * 1e-3 # [mA] -> [A]
I_bg = (I_1 + I_2) / 2 # the background current is the center of these two.
return I_bg
def correct_current(self):
I_bg = self.calc_background_current()
I_name = self.meas.I_name
J_name = self.meas.J_name
A_el = self.meas.A_el
print(f"subtracting {I_bg * 1e3 / A_el} from '{J_name}'")
self.meas.correct_data(I_name, self.meas[I_name].data - I_bg * 1e3)
self.meas.correct_data(J_name, self.meas[J_name].data - I_bg * 1e3 / A_el)
class StandardExperiment(Experiment):
"""This class describes the experiments from which 3x TOF measurements are derived
These are EC-MS measurements of a labeled (or control) sample in non-labeled
elcctroyte at constant current, which ICP-MS samples taken during or between
measurements. The class wraps the corresponding measurement with extra functions.
They are best represented as an EC-MS-ICPMS plot where the MS panel has left and
rignt y-axes representing labeled and non-labeled O2, respectively. Such a plot
is made with StandardExperment.plot_EC_MS_ICPMS
"""
def __init__(
self,
m_id,
experiment_type=None,
tspan_plot=None,
F=None,
alpha=None,
tspan_bg=None,
tspan_F=None,
tspan_alpha=None,
e_id=None,
plot_specs=None,
**kwargs,
):
super().__init__(
m_id=m_id,
experiment_type=experiment_type,
tspan_plot=tspan_plot,
F=F,
tspan_F=tspan_F,
alpha=alpha,
tspan_alpha=tspan_alpha,
tspan_bg=tspan_bg,
e_id=e_id,
**kwargs,
)
if not experiment_type in STANDARD_EXPERIMENT_TAGS:
raise TypeError(
f"Cannot make StandardExperiment of '{self.measurement}' "
f"with experiment_type='{experiment_type}'"
)
self.plot_specs = plot_specs
self._icpms_points = None
def as_dict(self):
self_as_dict = super().as_dict()
self_as_dict.update(plot_specs=self.plot_specs)
return self_as_dict
@property
def icpms_points(self):
"""List of ICPMSPoint: The ICPMS samples from the experiment"""
if not self._icpms_points:
self._icpms_points = self.measurement.get_icpms_points()
return self._icpms_points
def get_dissolution_points(self):
"""Return the ICPMS sampling times (t_vec) and molar amounts (n_vec)"""
icpms_points = self.icpms_points
t_vec = np.array([icpms_point.sampling_time for icpms_point in icpms_points])
n_vec = np.array([icpms_point.amount for icpms_point in icpms_points])
return t_vec, n_vec
def get_dissolution_rates(self):
"""Return the ICPMS sampling times (t_vec) and dissolution raties (n_dot_vec)"""
t_points, n_points = self.get_dissolution_points()
t_last = 0
t_vec = np.array([])
n_dot_vec = np.array([])
for (
t,
n,
) in zip(t_points, n_points):
if t == 0:
continue
if t == t_last:
input(f"Waring! {self.measurement} has two ICPMS samples at t={t}.")
continue
t_vec =
|
np.append(t_vec, t)
|
numpy.append
|
"""
Test for file IO
"""
import os
import pytest
import numpy as np
import biorbd_casadi as biorbd
from bioptim import OdeSolver
from .utils import TestUtils
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.COLLOCATION, OdeSolver.IRK])
def test_muscle_activations_and_states_tracking(ode_solver):
# Load muscle_activations_tracker
from bioptim.examples.muscle_driven_ocp import muscle_activations_tracker as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
# Define the problem
model_path = bioptim_folder + "/models/arm26.bioMod"
biorbd_model = biorbd.Model(model_path)
final_time = 0.1
n_shooting = 5
use_residual_torque = True
# Generate random data to fit
np.random.seed(42)
t, markers_ref, x_ref, muscle_activations_ref = ocp_module.generate_data(
biorbd_model, final_time, n_shooting, use_residual_torque=use_residual_torque
)
biorbd_model = biorbd.Model(model_path) # To allow for non free variable, the model must be reloaded
ocp = ocp_module.prepare_ocp(
biorbd_model,
final_time,
n_shooting,
markers_ref,
muscle_activations_ref,
x_ref[: biorbd_model.nbQ(), :],
use_residual_torque=use_residual_torque,
kin_data_to_track="q",
ode_solver=ode_solver(),
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
# Check constraints
g = np.array(sol.constraints)
if ode_solver == OdeSolver.COLLOCATION:
np.testing.assert_equal(g.shape, (20 * 5, 1))
np.testing.assert_almost_equal(g, np.zeros((20 * 5, 1)), decimal=6)
else:
np.testing.assert_equal(g.shape, (20, 1))
np.testing.assert_almost_equal(g, np.zeros((20, 1)), decimal=6)
# Check some of the results
q, qdot, tau, mus = sol.states["q"], sol.states["qdot"], sol.controls["tau"], sol.controls["muscles"]
if ode_solver == OdeSolver.IRK:
np.testing.assert_almost_equal(f[0, 0], 3.624795808383824e-08)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-1.26294409e-05, -5.94685627e-06]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.10541975, -0.48577985]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.00074118, -0.00036854]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-4.21473881, 7.26398638]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.19231945e-08, 1.78181204e-06]))
np.testing.assert_almost_equal(tau[:, -2], np.array([2.55285701e-06, -5.12710950e-06]))
np.testing.assert_almost_equal(
mus[:, 0], np.array([0.37451645, 0.95067812, 0.73199474, 0.59864193, 0.15601703, 0.15600089])
)
np.testing.assert_almost_equal(
mus[:, -2], np.array([0.4559321, 0.78521782, 0.19970124, 0.51419847, 0.59238012, 0.04656187])
)
elif ode_solver == OdeSolver.COLLOCATION:
np.testing.assert_almost_equal(f[0, 0], 3.6846293820760475e-08)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-1.26294409e-05, -5.94685627e-06]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.10541975, -0.48577985]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.00074233, -0.00037249]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-4.21473503, 7.26397692]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([-3.19231945e-08, 1.78181204e-06]))
np.testing.assert_almost_equal(tau[:, -2], np.array([2.55285701e-06, -5.12710950e-06]))
np.testing.assert_almost_equal(
mus[:, 0], np.array([0.37451633, 0.95067815, 0.73199481, 0.5986417, 0.15601682, 0.15600081])
)
np.testing.assert_almost_equal(
mus[:, -2], np.array([0.4559318, 0.78521793, 0.19970129, 0.51419838, 0.59238004, 0.04656203])
)
elif ode_solver == OdeSolver.RK4:
np.testing.assert_almost_equal(f[0, 0], 3.624795808383824e-08)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-1.24603457e-05, -5.56567245e-06]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.10542008, -0.48578046]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.00071319, -0.00034956]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-4.21476386, 7.26402641]))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([7.86364319e-08, 1.43718933e-06]))
np.testing.assert_almost_equal(tau[:, -2], np.array([2.33336715e-06, -4.52483197e-06]))
np.testing.assert_almost_equal(
mus[:, 0], np.array([0.3745183, 0.9506776, 0.7319939, 0.59864459, 0.15601947, 0.15600189])
)
np.testing.assert_almost_equal(
mus[:, -2], np.array([0.45594578, 0.78521284, 0.19969902, 0.51420259, 0.5923839, 0.04655438])
)
else:
raise ValueError("Test not implemented")
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, decimal_value=5)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.COLLOCATION, OdeSolver.IRK])
def test_muscle_activation_no_residual_torque_and_markers_tracking(ode_solver):
# Load muscle_activations_tracker
from bioptim.examples.muscle_driven_ocp import muscle_activations_tracker as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
# Define the problem
model_path = bioptim_folder + "/models/arm26.bioMod"
biorbd_model = biorbd.Model(model_path)
final_time = 0.1
n_shooting = 5
use_residual_torque = False
# Generate random data to fit
np.random.seed(42)
t, markers_ref, x_ref, muscle_activations_ref = ocp_module.generate_data(
biorbd_model, final_time, n_shooting, use_residual_torque=use_residual_torque
)
biorbd_model = biorbd.Model(model_path) # To allow for non free variable, the model must be reloaded
ocp = ocp_module.prepare_ocp(
biorbd_model,
final_time,
n_shooting,
markers_ref,
muscle_activations_ref,
x_ref[: biorbd_model.nbQ(), :],
use_residual_torque=use_residual_torque,
kin_data_to_track="q",
ode_solver=ode_solver(),
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 3.634248634056222e-08)
# Check constraints
g = np.array(sol.constraints)
if ode_solver == OdeSolver.COLLOCATION:
np.testing.assert_equal(g.shape, (20 * 5, 1))
np.testing.assert_almost_equal(g, np.zeros((20 * 5, 1)), decimal=6)
else:
np.testing.assert_equal(g.shape, (20, 1))
np.testing.assert_almost_equal(g, np.zeros((20, 1)), decimal=6)
# Check some of the results
q, qdot, mus = sol.states["q"], sol.states["qdot"], sol.controls["muscles"]
if ode_solver == OdeSolver.IRK:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-1.26502327e-05, -5.98498658e-06]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.10541969, -0.48577983]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.00074251, -0.00036937]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-4.21474217, 7.26398954]))
# initial and final controls
np.testing.assert_almost_equal(
mus[:, 0], np.array([0.37451604, 0.95067823, 0.73199494, 0.59864126, 0.15601641, 0.15600064])
)
np.testing.assert_almost_equal(
mus[:, -2], np.array([0.45593194, 0.78521787, 0.19970125, 0.51419844, 0.5923801, 0.04656193])
)
elif ode_solver == OdeSolver.COLLOCATION:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-1.26434090e-05, -5.99992755e-06]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.10541971, -0.48577986]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.00074381, -0.00037358]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-4.21473839, 7.26398039]))
# initial and final controls
np.testing.assert_almost_equal(
mus[:, 0], np.array([0.37451604, 0.95067823, 0.73199495, 0.59864125, 0.1560164, 0.15600064])
)
np.testing.assert_almost_equal(
mus[:, -2], np.array([0.45593167, 0.78521797, 0.1997013, 0.51419836, 0.59238002, 0.04656208])
)
elif ode_solver == OdeSolver.RK4:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-1.24679103e-05, -5.63685028e-06]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.10542003, -0.48578047]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([0.00071458, -0.00035055]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-4.21476717, 7.26402945]))
# initial and final controls
np.testing.assert_almost_equal(
mus[:, 0], np.array([0.3745179, 0.95067771, 0.7319941, 0.59864394, 0.15601888, 0.15600164])
)
np.testing.assert_almost_equal(
mus[:, -2], np.array([0.45594564, 0.78521289, 0.19969903, 0.51420257, 0.59238388, 0.04655442])
)
else:
raise ValueError("Test not ready")
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, decimal_value=6)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.COLLOCATION, OdeSolver.IRK])
def test_muscle_excitation_with_torque_and_markers_tracking(ode_solver):
# Load muscle_excitations_tracker
from bioptim.examples.muscle_driven_ocp import muscle_excitations_tracker as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
# Define the problem
model_path = bioptim_folder + "/models/arm26.bioMod"
biorbd_model = biorbd.Model(model_path)
final_time = 0.1
n_shooting = 5
# Generate random data to fit
np.random.seed(42)
t, markers_ref, x_ref, muscle_excitations_ref = ocp_module.generate_data(biorbd_model, final_time, n_shooting)
biorbd_model = biorbd.Model(model_path) # To allow for non free variable, the model must be reloaded
ocp = ocp_module.prepare_ocp(
biorbd_model,
final_time,
n_shooting,
markers_ref,
muscle_excitations_ref,
x_ref[: biorbd_model.nbQ(), :].T,
use_residual_torque=True,
kin_data_to_track="markers",
ode_solver=ode_solver(),
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
# Check constraints
g = np.array(sol.constraints)
if ode_solver == OdeSolver.COLLOCATION:
np.testing.assert_equal(g.shape, (50 * 5, 1))
np.testing.assert_almost_equal(g, np.zeros((50 * 5, 1)), decimal=6)
else:
np.testing.assert_equal(g.shape, (50, 1))
np.testing.assert_almost_equal(g, np.zeros((50, 1)), decimal=6)
# Check some of the results
q, qdot, mus_states, tau, mus_controls = (
sol.states["q"],
sol.states["qdot"],
sol.states["muscles"],
sol.controls["tau"],
sol.controls["muscles"],
)
if ode_solver == OdeSolver.IRK:
np.testing.assert_almost_equal(f[0, 0], 3.9377280548492226e-05)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-0.00351782, 0.01702219]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.14352637, -0.72030433]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([1.02984019, -3.91364352]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-3.67284629, 3.62405443]))
# initial and final muscle state
np.testing.assert_almost_equal(
mus_states[:, 0], np.array([0.37454012, 0.95071431, 0.73199394, 0.59865848, 0.15601864, 0.15599452])
)
np.testing.assert_almost_equal(
mus_states[:, -1], np.array([0.51285729, 0.69943619, 0.40390569, 0.48032451, 0.53752346, 0.31437668])
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([5.42775569e-05, -3.45713249e-04]))
np.testing.assert_almost_equal(tau[:, -2], np.array([-2.73167136e-05, -3.83494902e-05]))
np.testing.assert_almost_equal(
mus_controls[:, 0], np.array([0.37743387, 0.95055777, 0.73174428, 0.60093014, 0.15924303, 0.15866534])
)
np.testing.assert_almost_equal(
mus_controls[:, -2], np.array([0.4560975, 0.78519158, 0.19973384, 0.51408083, 0.59227422, 0.04659415])
)
elif ode_solver == OdeSolver.COLLOCATION:
np.testing.assert_almost_equal(f[0, 0], 3.9378422266498184e-05)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-0.00351729, 0.01701928]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.14352497, -0.72030059]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([1.02972633, -3.91317111]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-3.6728683, 3.62413508]))
# initial and final muscle state
np.testing.assert_almost_equal(
mus_states[:, 0], np.array([0.37454012, 0.95071431, 0.73199394, 0.59865848, 0.15601864, 0.15599452])
)
np.testing.assert_almost_equal(
mus_states[:, -1], np.array([0.51285285, 0.69943161, 0.40390586, 0.48032585, 0.53752527, 0.31437738])
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([5.42926592e-05, -3.45716906e-04]))
np.testing.assert_almost_equal(tau[:, -2], np.array([-2.72776735e-05, -3.84479459e-05]))
np.testing.assert_almost_equal(
mus_controls[:, 0], np.array([0.37744597, 0.95044549, 0.73173082, 0.60092211, 0.15932209, 0.15869578])
)
np.testing.assert_almost_equal(
mus_controls[:, -2], np.array([0.45609644, 0.78518702, 0.19973488, 0.51408246, 0.59227441, 0.04659677])
)
elif ode_solver == OdeSolver.RK4:
np.testing.assert_almost_equal(f[0, 0], 3.9163147567423305e-05)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array([-0.00352334, 0.01700853]))
np.testing.assert_almost_equal(q[:, -1], np.array([0.14350606, -0.72027301]))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array([1.02920952, -3.91032827]))
np.testing.assert_almost_equal(qdot[:, -1], np.array([-3.67351448, 3.62485659]))
# initial and final muscle state
np.testing.assert_almost_equal(
mus_states[:, 0], np.array([0.37454012, 0.95071431, 0.73199394, 0.59865848, 0.15601864, 0.15599452])
)
np.testing.assert_almost_equal(
mus_states[:, -1], np.array([0.51283945, 0.6994339, 0.40390624, 0.48031161, 0.53750849, 0.31441088])
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array([5.44773721e-05, -3.45454293e-04]))
np.testing.assert_almost_equal(tau[:, -2], np.array([-2.68029143e-05, -3.90467765e-05]))
np.testing.assert_almost_equal(
mus_controls[:, 0], np.array([0.37740553, 0.95056685, 0.73174651, 0.60092669, 0.15924254, 0.15856357])
)
np.testing.assert_almost_equal(
mus_controls[:, -2], np.array([0.45609247, 0.7851955, 0.19973458, 0.51407787, 0.59227145, 0.04659596])
)
else:
raise ValueError("Test not ready")
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK4, OdeSolver.COLLOCATION, OdeSolver.IRK])
def test_muscle_excitation_no_residual_torque_and_markers_tracking(ode_solver):
# Load muscle_excitations_tracker
from bioptim.examples.muscle_driven_ocp import muscle_excitations_tracker as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
# Define the problem
model_path = bioptim_folder + "/models/arm26.bioMod"
biorbd_model = biorbd.Model(model_path)
final_time = 0.1
n_shooting = 5
# Generate random data to fit
np.random.seed(42)
t, markers_ref, x_ref, muscle_excitations_ref = ocp_module.generate_data(biorbd_model, final_time, n_shooting)
biorbd_model = biorbd.Model(model_path) # To allow for non free variable, the model must be reloaded
ocp = ocp_module.prepare_ocp(
biorbd_model,
final_time,
n_shooting,
markers_ref,
muscle_excitations_ref,
x_ref[: biorbd_model.nbQ(), :].T,
use_residual_torque=False,
kin_data_to_track="markers",
ode_solver=ode_solver(),
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol.cost)
np.testing.assert_equal(f.shape, (1, 1))
# Check constraints
g = np.array(sol.constraints)
if ode_solver == OdeSolver.COLLOCATION:
np.testing.assert_equal(g.shape, (50 * 5, 1))
np.testing.assert_almost_equal(g, np.zeros((50 * 5, 1)), decimal=6)
else:
np.testing.assert_equal(g.shape, (50, 1))
np.testing.assert_almost_equal(g, np.zeros((50, 1)), decimal=6)
# Check some of the results
q, qdot, mus_states, mus_controls = (
sol.states["q"],
sol.states["qdot"],
sol.states["muscles"],
sol.controls["muscles"],
)
if ode_solver == OdeSolver.IRK:
|
np.testing.assert_almost_equal(f[0, 0], 3.939617534835209e-05)
|
numpy.testing.assert_almost_equal
|
import os
import pdb
import time
import torch
import laspy
import argparse
import numpy as np
from tqdm import tqdm
from typing import Tuple, Union
from pynvml import nvmlInit, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo
def apply_offset(pc: np.ndarray, offset: np.ndarray, scale: float = 1.0, nodata_value: float = None) -> np.ndarray:
"""
Apply offset and scaling to the point cloud data.
@param pc: [N, X] point cloud. (X >= 3, the fourth and later columns are non-coordinates).
@param offset: [3] offset vector.
@param scale: Float number for scaling.
@param nodata_value: Float number for nodata value.
return Point cloud w/ offset and scaling.
"""
pc = pc.copy()
if nodata_value is None:
pc[:, :3] -= offset.reshape(1, 3)
pc[:, :3] /= scale
else:
nodata_value = float(nodata_value)
pc[pc[:, 0] != nodata_value, :3] -= offset.reshape(1, 3)
pc[pc[:, 0] != nodata_value, :3] /= scale
return pc
def pc_local_search(big_pc: torch.Tensor, ref_patch: torch.Tensor, nodata_value: float = -1.0) -> torch.Tensor:
"""
Point cloud local search based on XYZ boundaries.
@param big_pc: [N, D] point cloud. (D >= 3, the fourth and later columns are non-coordinates).
@param ref_patch: [K, 3] point cloud as reference.
@param nodata_value: Float number for nodata value.
return A subset of pertinent point clouds.
"""
if ref_patch.numel() == torch.sum(ref_patch == nodata_value).item():
selected_pc = torch.empty(0)
else:
xyz_max, _ = ref_patch[ref_patch[:, 1] != nodata_value].max(dim=0) # [3]
xyz_min, _ = ref_patch[ref_patch[:, 1] != nodata_value].min(dim=0) # [3]
flag = torch.logical_and(big_pc[:, :3] <= xyz_max, big_pc[:, :3] >= xyz_min).sum(dim=1) == 3
if torch.sum(flag).item() == 0:
margin = max(20.0, np.sqrt(len(ref_patch) / 5.0))
flag = torch.logical_and(big_pc[:, :3] <= xyz_max + margin,
big_pc[:, :3] >= xyz_min - margin).sum(dim=1) == 3
selected_pc = big_pc[flag]
if len(selected_pc) == 0:
selected_pc = torch.empty(0)
return selected_pc
def split_scene_coord(sc: np.ndarray, block_h: int, block_w: int) -> np.ndarray:
"""
Split the scene coordinate associated with image pixels.
@param sc: [H, W, 3] scene coordinates.
@param block_h: Block size in height direction.
@param block_w: Block size in width direction.
return an array of block-wise coordinates, [rows, cols, block_h, block_w, 3].
"""
h, w = sc.shape[:2]
assert h // block_h == h / block_h
assert w // block_w == w / block_w
sc_split_h_ls = np.vsplit(sc, np.arange(h)[::block_h][1:]) # vertical split in height direction
sc_split = [[] for _ in range(len(sc_split_h_ls))]
for row, sc_split_h in enumerate(sc_split_h_ls):
sc_split_w = np.hsplit(sc_split_h, np.arange(w)[::block_w][1:]) # horizontal split in width direction
sc_split[row] = sc_split_w
return np.array(sc_split)
def convert_to_tensor(data: np.ndarray, cuda=False, retain_tensor=False, float16=False) \
-> Tuple[bool, Union[None, torch.Tensor]]:
"""
Try making tensor from numpy array.
"""
if float16:
data_tensor = torch.tensor(data).bfloat16()
else:
data_tensor = torch.tensor(data).float()
flag_ok = torch.isnan(data_tensor).sum() == 0 and torch.isinf(data_tensor).sum() == 0
data_tensor = data_tensor if retain_tensor else torch.zeros(1)
if flag_ok:
data_tensor = data_tensor.cuda() if cuda else data_tensor
return True, data_tensor
else:
del data_tensor
return False, None
def sc_query(sc: torch.Tensor, pc: torch.Tensor, nodata_value: float = -1.0) -> np.ndarray:
"""
Query the scene coords' semantic labels in the given point cloud.
@param sc: [H, W, 3] scene coordinates.
@param pc: [N, 4] point cloud w/ semantic labels.
@param nodata_value: Float number for nodata value.
@return [H, W] semantic label.
"""
h, w = sc.shape[:2]
pc = pc.clone()
sc = sc.reshape(-1, 3) # [K, 3]
mask_nodata = sc[:, 0] == nodata_value
sc_cdist = sc[torch.logical_not(mask_nodata)] # [K', 3]
pc_cdist = pc[:, :3] # [N, 3]
# torch cdist for distance computation, only p=2 is supported as of pytorch 1.9!
# See issue: https://github.com/pytorch/pytorch/issues/49928
qeury2pc_dist = torch.cdist(sc_cdist, pc_cdist, p=2.0) # [K', N]
# matrix multiplication, too much GPU memory, don't use.
# qeury2pc_dist = torch.mm(sc_cdist, pc_cdist.transpose(1, 0)) # [K', N]
# l1 distance, too much GPU memory, don't use.
# qeury2pc_dist = (sc_cdist[:, None, :] - pc_cdist[None, :, :]).abs().sum(dim=-1) # [K', N]
cloest_dist, idx_cloest_pt = qeury2pc_dist.min(dim=1) # [K'] + [K']
semantics_label = -torch.ones(h * w, 2).to(sc.device) # [H * W]
semantics_label[torch.logical_not(mask_nodata), 0] = pc[idx_cloest_pt, -1].float()
semantics_label[torch.logical_not(mask_nodata), 1] = cloest_dist.float()
semantics_label = semantics_label.reshape(h, w, 2).cpu().numpy()
return semantics_label
def check_mem(sc_cdist_len: int, pc_cdist_len: int, secure_mem: bool) -> bool:
"""
check whether the cdist operation will out of memory
:param sc_cdist_len: number of pixels in the split image patch
:param pc_cdist_len: number of point in the query scene
:param secure_mem: flag to use a more conservative and safer GPU memory checking policy
:return: bool
"""
nvmlInit()
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
max_mem_gb = info.free / 1024 ** 3
max_mem_gb = max_mem_gb - 1.5 if secure_mem else max_mem_gb - 0.75
if secure_mem:
flag_memory_okay = ((sc_cdist_len * pc_cdist_len) / 1e9) <= (max_mem_gb * 2.5 / 15)
else:
flag_memory_okay = ((sc_cdist_len * pc_cdist_len) / 1e9) <= (max_mem_gb * 3.25 / 15)
return flag_memory_okay
def find_opt_split(_sc: np.ndarray, _big_pc_label_tensor: torch.Tensor,
block_h: int, block_w: int, secure_mem: bool = False, float16: bool = False) -> (int, int):
"""
find the optimal strategy to split the input image while fully utilise the GPU
:param block_w: default split block width
:param block_h: default split block height
:param _sc: input image [480, 720, 3]
:param _big_pc_label_tensor: entire point cloud w/ label
:param secure_mem: flag to use a more conservative and safer GPU memory checking policy
:param float16: flag to use float16 accuracy
:return block_h, block_w: optimal split of the image in [block_h, block_w]
"""
sc_cdist_len, pc_cdist_len = block_h * block_w, _big_pc_label_tensor.shape[0]
pattern_idx = 0
optional_list = [(240, 180), (120, 180), (120, 90), (60, 90), (60, 72), (60, 45), (48, 45),
(48, 36), (24, 24), (24, 12), (12, 12), (6, 6), (1, 1)]
selected_pc_ls = []
_sc_split = None
while not check_mem(sc_cdist_len, pc_cdist_len, secure_mem):
block_h, block_w = optional_list[pattern_idx]
sc_cdist_len = block_h * block_w
_sc_split = split_scene_coord(_sc, block_h, block_w) # [rows, cols, b_h, b_w, 3]
flag_tensor, _sc_split = convert_to_tensor(_sc_split, cuda=True, retain_tensor=True, float16=float16)
assert flag_tensor
# selected_pc_len_max
pc_cdist_len = 0
selected_pc_ls = []
for row in range(_sc_split.shape[0]):
selected_pc_row_ls = []
for col in range(_sc_split.shape[1]):
selected_pc = pc_local_search(_big_pc_label_tensor, _sc_split[row, col].reshape(-1, 3),
nodata_value=-1) # [X, 4]
pc_cdist_len = max(pc_cdist_len, selected_pc.shape[0])
selected_pc_row_ls.append(selected_pc)
selected_pc_ls.append(selected_pc_row_ls)
pattern_idx += 1
# release the GPU memory
torch.cuda.empty_cache()
return block_h, block_w, _sc_split, selected_pc_ls
def all_path(dirname: str, filter_list: list) -> [list, list]:
"""
extract all the path of .npy file from the main dir
:param filter_list: file format to extract
:param dirname: main dir name
:return: file name list with detailed path
"""
file_path_list = []
folder_path_list = []
for main_dir, subdir, file_name_list in os.walk(dirname):
# # current main dir
# print('main dir:', maindir)
# # current sub dir
# print('sub dir:', subdir)
# # all file under current main dir
# print('file name list:', file_name_list)
for this_dir in subdir:
folder_path_list.append(main_dir + '/' + this_dir)
for filename in file_name_list:
if 'poses' in os.path.splitext(filename)[0].split('_'):
continue
if os.path.splitext(filename)[1] in filter_list:
path_detail = os.path.join(main_dir, '_'.join(filename.split('_')[:-1]))
file_path_list.append(path_detail)
file_path_list = np.unique(file_path_list).tolist()
file_path_list.sort()
folder_path_list = np.unique(folder_path_list).tolist()
return file_path_list, folder_path_list
def mkdir(output_path: str, folder_ls: list) -> None:
"""
create folder as the structure of input path
:param output_path:
:param folder_ls:
:return:
"""
os.makedirs(output_path, exist_ok=True)
for folder in folder_ls:
output_folder = os.path.exists(os.path.join(output_path, folder))
if not output_folder:
os.makedirs(os.path.join(output_path, folder))
def remove_extreme_points(sc: np.ndarray, threshold: float, nodata_value: float = -1.0):
"""
Pick the extremely remote points w.r.t. the median center.
@param sc: [H, W, 3] scene coordinate.
@param threshold: Threshold for the extremely remote points.
@param nodata_value: Float number for nodata value.
return: masked sc, number of outlier points
"""
sc_shape = sc.shape
sc = sc.reshape(-1, 3) # [H*W, 3]
mask_has_data = sc[:, 0] != nodata_value # [X]
sc_valid = sc[mask_has_data] # [X, 3]
center_median = np.median(sc_valid, axis=0) # [3]
# make sure the thresholding is robust, we purge at most 1 % of the scene coordinates
dist_to_center = np.linalg.norm(sc_valid - center_median, axis=1) # [X]
dist_max_quantile = np.quantile(dist_to_center, 0.99, axis=0, interpolation='nearest') # scalar
threshold_robust = np.max([dist_max_quantile, threshold])
# reset the possible outliers
mask_outlier = dist_to_center > threshold_robust # [X]
sc_valid[mask_outlier] = nodata_value
sc[mask_has_data] = sc_valid # [H*W, 3]
print("Actual threshold: {:.1f} m, number of points to remove: {:d} ({:.2f}%)".format(
threshold_robust, np.sum(mask_outlier), np.sum(mask_outlier) / len(sc) * 100.0))
return sc.reshape(sc_shape)
def main():
args = config_parser()
print(args)
downsample_rate = args.downsample_rate
start_idx = args.start_idx
end_idx = args.end_idx
# set the input and output path
las_file_path = os.path.abspath(args.las_path)
input_path = os.path.abspath(args.input_path)
output_path_semantics = os.path.abspath(args.output_path_semantics)
output_path_distance = os.path.abspath(args.output_path_distance)
file_ls, folder_ls = all_path(input_path, filter_list=['.npy'])
# load raw las and turn into 3D point array
_big_pc_label = []
las_ls = [las for las in os.listdir(las_file_path) if las.endswith('.las')]
for idx, las_name in enumerate(las_ls):
las = laspy.read(os.path.join(las_file_path, las_name))
las = np.stack([las.x, las.y, las.z, np.array(las.classification)], axis=1)
_big_pc_label.extend(las)
_big_pc_label = np.array(_big_pc_label) # [N, 4]
_big_pc_label = np.ascontiguousarray(_big_pc_label) # [N, 4]
# read point cloud with semantic label data from .npy file
bound_xyz_min = _big_pc_label[:, :3].min(axis=0) # [3]
bound_xyz_max = _big_pc_label[:, :3].max(axis=0) # [3]
offset_center = (bound_xyz_max + bound_xyz_min) / 2 # [3]
interval_xyz = bound_xyz_max - bound_xyz_min # [3]
if args.float16:
scale = np.array(interval_xyz / 1.e5, np.float64) # [3]
else:
scale = 1.0
# print('Offset origin XYZ: {}, {}, {}, scale: {}'.format(*offset_center, scale))
_big_pc_label = apply_offset(_big_pc_label, offset_center, scale, nodata_value=None) # [X, 4]
flag_tensor, _big_pc_label_tensor = convert_to_tensor(_big_pc_label, cuda=True, retain_tensor=True,
float16=args.float16)
assert flag_tensor, "Cannot build tensor for the original data (w/ offset)!"
# create output folder structure
input_path_len = len(input_path.split('/'))
folder_ls = ['/'.join(folder.split('/')[input_path_len:]) for folder in folder_ls]
folder_ls =
|
np.unique(folder_ls)
|
numpy.unique
|
import numpy as np
import free_energy_clustering.GMM as GMM
import scipy.optimize as opt
class LandscapeStacker(object):
def __init__(self, data, list_of_validation_data, list_of_models, n_splits=1, convergence_tol=5e-3, n_iterations=1,
model_weights=None):
"""
Class for weighting density estimators with EM, based on how well they describe the validation dataset.
:param data: [n_samples x n_dimensions]
:param list_of_estimators:
:param n_splits: Number of folds in K-fold cross-validation
:param convergence_tol:
"""
self.GMM_list_ = list_of_models
self.val_data_list_ = list_of_validation_data
self.data_ = data
self.convergence_tol_ = convergence_tol
self.n_models_ = int(len(list_of_models)/n_splits)
self.n_splits_ = n_splits
self.n_iterations_ = n_iterations
self.n_components_list_ = []
# Initlialize weights
if model_weights is None:
if self.n_models_ > 0:
self.model_weights_ = 1.0 / self.n_models_ * np.ones(self.n_models_)
else:
self.model_weights_ = model_weights
self._sparisify_model()
print('Model weights: ' + str(self.model_weights_))
print('GMM list: '+str(self.GMM_list_))
self._set_n_component_list()
print('# Components in models: '+str(self.n_components_list_))
return
def objective_function(self,W):
# -log(likelihood)
W /= W.sum()
return -self.loglikelihood(self.val_data_list_, list_of_validation_data=True, weights=W)
def fit(self):
do_EM = True
print('Training density model weights.')
if do_EM:
loglikelihood = -np.inf
prev_loglikelihood = 0
while (np.abs(prev_loglikelihood - loglikelihood) > self.convergence_tol_):
beta = self._expectation()
self._maximization(beta)
prev_loglikelihood = loglikelihood
loglikelihood = self.loglikelihood(self.val_data_list_, list_of_validation_data=True)
else:
self.model_weights_ = opt.fmin_cg(self.objective_function, self.model_weights_)
# Keep only models with nonzero weight
self._sparisify_model()
self._set_n_component_list()
# Train each density model on the full dataset.
print('Training each model on the full dataset.')
for i_model in range(self.n_models_):
n_components = self.GMM_list_[i_model].n_components_
print(' - Training model with '+str(n_components)+' components')
best_loglikelihood = -np.inf
for i_iter in range(self.n_iterations_):
density_model = GMM.GaussianMixture(n_components=n_components,
convergence_tol=self.convergence_tol_)
density_model.fit(self.data_)
loglikelihood = density_model.loglikelihood(self.data_)
if loglikelihood > best_loglikelihood:
best_loglikelihood = loglikelihood
self.GMM_list_[i_model] = density_model
self.n_components_list_ =
|
np.asarray(self.n_components_list_)
|
numpy.asarray
|
import sys
sys.path.append("../")
from Utils import Obj
from sklearn.neighbors import NearestNeighbors
from extension.arap.cuda.arap import Arap
from extension.arap_closed.cuda.arap import ClosedArap as ArapSolveRhs
from extension.grad_arap.cuda.arap import ArapGrad
from extension.bending.cuda.arap import Bending
import numpy as np
import torch
import header
import os
import time
import trimesh
def find_nearest(array, value):
array = np.asarray(array)
idx = np.mean(np.abs(array - np.expand_dims(value,0)),1).argmin()
return idx
hi_arap = Arap()
hi_arap_solve_rhs = ArapSolveRhs()
bending = Bending()
from sklearn.neighbors import NearestNeighbors
clf = NearestNeighbors(n_neighbors=1,p=1,n_jobs=15)
'''
def norm_points(vertices,ref_verts):
bmin = np.min(ref_verts,axis=0)
bmax = np.max(ref_verts,axis=0)
diagsq = np.sum(np.power(bmax-bmin,2))
ref_diag = np.sqrt(diagsq)
bmin = np.min(vertices,axis=0)
bmax = np.max(vertices,axis=0)
diagsq = np.sum(np.power(bmax-bmin,2))
diag = np.sqrt(diagsq)
s = np.eye(3)
s *= (ref_diag/diag)
vertices = np.dot(vertices,s)
bmin = np.min(ref_verts,axis=0)
bmax = np.max(ref_verts,axis=0)
ref_bcenter = (bmin+bmax)/2.0
bmin = np.min(vertices,axis=0)
bmax = np.max(vertices,axis=0)
bcenter = (bmin+bmax)/2.0
#vertices = vertices + (ref_bcenter-bcenter)
return vertices
'''
def norm_points(vertices):
bmin = np.min(vertices,axis=0)
bmax = np.max(vertices,axis=0)
diagsq = np.sum(np.power(bmax-bmin,2))
diag = np.sqrt(diagsq)
s = np.eye(3)
s *= (1.0/diag)
vertices = np.dot(vertices,s)
bmin = np.min(vertices,axis=0)
bmax =
|
np.max(vertices,axis=0)
|
numpy.max
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 13:51:19 2021
@author: sopmathieu
This script is an example of how to use to package to study the sunspot
numbers.
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 14
import pkg_resources as pkg
from uncertainty import errors as err
### load data (loaded automatically with package)
data_path = pkg.resource_filename(pkg.Requirement.parse("uncertainty"), 'data')
with open(data_path + '/data_21_1947', 'rb') as file: #subset of 21 stations
#with open(data_path + '/data_1981', 'rb') as file: #all stations
my_depickler = pickle.Unpickler(file)
Ns = my_depickler.load() #number of spots
Ng = my_depickler.load() #number of sunspot groups
Nc = my_depickler.load() #Ns+10Ng
station_names = my_depickler.load() #index of the stations
time = my_depickler.load() #time (fraction of years)
Ns_rescaled = err.rescaling(Ns, 8)
####################################
### Solar signal
mus_Ns = err.median_transformed(Ns, period_rescaling=8)
mus_Ng = err.median_transformed(Ng, period_rescaling=14)
mus_Nc = err.median_transformed(Nc, period_rescaling=10)
### histograms
plt.rcParams['figure.figsize'] = (10.0, 6.0)
plt.figure(1)
plt.hist(mus_Ns[~np.isnan(mus_Ns)], range=[0,150], bins='auto', density=True, facecolor='b')
plt.title("Solar signal (Ns)")
plt.text(60, 0.04, 'mean: ' '%4f' %np.nanmean(mus_Ns))
plt.text(60, 0.03, 'std: ' '%4f' %np.nanstd(mus_Ns))
plt.axis([0,150, 0, 0.08])
plt.grid(True)
plt.show()
plt.figure(2)
plt.hist(mus_Ng[~np.isnan(mus_Ng)], range=[0,20], bins=20, density=True, facecolor='b')
plt.title("Solar signal (Ng)")
plt.text(15, 0.2, 'mean: ' '%4f' %np.nanmean(mus_Ng))
plt.text(15, 0.15, 'std: ' '%4f' %np.nanstd(mus_Ng))
plt.axis([0, 20, 0, 0.25])
plt.grid(True)
plt.show()
plt.figure(3)
plt.hist(mus_Nc[~np.isnan(mus_Nc)], range=[0,300], bins='auto', density=True, facecolor='b')
plt.title("Solat signal (Nc)")
plt.text(150, 0.015, 'mean: ' '%4f' %np.nanmean(mus_Nc))
plt.text(150, 0.0125, 'std: ' '%4f' %np.nanstd(mus_Nc))
plt.axis([0, 300, 0, 0.03])
plt.grid(True)
plt.show()
####################################
### Long-term error
mu2_81 = err.long_term_error(Ns, period_rescaling=8, wdw=81)
mu2_1 = err.long_term_error(Ns, period_rescaling=8, wdw=365)
mu2_2 = err.long_term_error(Ns, period_rescaling=8, wdw=912)
#stability criterion
#mse_mu2, names = err.mse_criterion(mu2_1, station_names, ref=None)
mse_mu2, names = err.mse_criterion(mu2_1, station_names, ref=1)
start = np.where(time == 1960)[0][0]
stop = np.where(time == 2010)[0][0]
stat = 19
plt.plot(time[start:stop], mu2_81[start:stop, stat], ':', c='tab:green', label='$\hat \mu_2$ (81)')
plt.plot(time[start:stop], mu2_1[start:stop, stat], '--', c='tab:red', label='$\hat \mu_2$ (1)')
plt.plot(time[start:stop], mu2_2[start:stop, stat], lw=3, c='tab:blue', label='$\hat \mu_2$ (2)')
plt.plot([time[start], time[stop]], [1, 1], 'k-', lw=2)
plt.legend(loc='upper right')
#f4.set_ylim([-10,20]); f4.set_xlim([time[start-20], time[stop+20]])
if stop-start < 4000:
x_ticks = np.arange(np.round(time[start]), np.round(time[stop])+1, 1)
else :
x_ticks = np.arange(np.round(time[start]), np.round(time[stop])+1, 10)
plt.xticks(x_ticks)
plt.title("Long-term error for Ns in %s" %station_names[stat])
plt.ylabel('$\hat \mu_2$')
plt.xlabel('year')
plt.tick_params(axis='x', which='major')
plt.show()
####################################
### Error at minima
e3_Ns = err.error_at_minima(Ns, period_rescaling=8)
e3_Ng = err.error_at_minima(Ng, period_rescaling=14)
e3_Nc = err.error_at_minima(Nc, period_rescaling=10)
### histograms
binning = int(6/(3.5*np.nanstd(e3_Ns)*len(e3_Ns)**(-1/3))) #Scott's rule
plt.rcParams['figure.figsize'] = (10.0, 6.0)
plt.figure(1)
plt.hist(e3_Ns[~np.isnan(e3_Ns)], range=[0,5], bins=binning, density=True, facecolor='b')
plt.title("Error at minima (Ns)")
plt.text(2, 0.8, 'mean:' '%4f' %np.nanmean(e3_Ns))
plt.text(2, 0.6, 'std:' '%4f' %np.nanstd(e3_Ns))
plt.axis([0, 5, 0, 1])
plt.grid(True)
plt.show()
binning = int(6/(3.5*np.nanstd(e3_Ng)*len(e3_Ng)**(-1/3)))
plt.figure(2)
plt.hist(e3_Ng[~np.isnan(e3_Ng)], range=[0,5], bins=binning, density=True, facecolor='b')
plt.title("Error at minima (Ng)")
plt.text(2, 0.8, 'mean:' '%4f' %np.nanmean(e3_Ng))
plt.text(2, 0.6, 'std:' '%4f' %np.nanstd(e3_Ng))
plt.axis([0, 5, 0, 1])
plt.grid(True)
plt.show()
binning = int(6/(3.5*np.nanstd(e3_Nc)*len(e3_Nc)**(-1/3)))
plt.figure(3)
plt.hist(e3_Nc[~np.isnan(e3_Nc)], range=[0,30], bins=binning, density=True, facecolor='b')
plt.title("Error at minima (Nc)")
plt.text(10, 0.03, 'mean:' '%4f' %np.nanmean(e3_Nc))
plt.text(10, 0.02, 'std:' '%4f' %np.nanstd(e3_Nc))
plt.axis([0, 30, 0, 0.04])
plt.grid(True)
plt.show()
##################
### Short-term error
e1_Ns = err.short_term_error(Ns, period_rescaling=8)
e1_Ng = err.short_term_error(Ng, period_rescaling=14)
e1_Nc = err.short_term_error(Nc, period_rescaling=10)
#stability criterion
#mse_e1, names = err.mse_criterion(e1_Ns, station_names, ref=None)
mse_e1, names = err.mse_criterion(e1_Ns, station_names, ref=1)
###histograms
binning = int(6/0.0328) #Scott's rule for the binning
plt.rcParams['figure.figsize'] = (10.0, 6.0)
plt.figure(1)
plt.hist(e1_Ns[~np.isnan(e1_Ns)], range=[0,5], bins=binning, density=True, facecolor='b')
plt.title("Short-term error (Ns)")
plt.text(2, 1, 'mean:' '%4f' %np.nanmean(e1_Ns))
plt.text(2, 0.6, 'std:' '%4f' %np.nanstd(e1_Ns))
plt.axis([0, 5, 0, 3.5])
plt.grid(True)
plt.show()
binning = int(6/0.0328)
plt.figure(2)
plt.hist(e1_Ng[~np.isnan(e1_Ng)], range=[0,5], bins=binning, density=True, facecolor='b')
plt.title("Short-term error (Ng)")
plt.text(2, 1, 'mean:' '%4f' %np.nanmean(e1_Ng))
plt.text(2, 0.6, 'std:' '%4f' %np.nanstd(e1_Ng))
plt.axis([0, 5, 0, 3.5])
plt.grid(True)
plt.show()
binning = int(6/0.0433)
plt.figure(3)
plt.hist(e1_Nc[~np.isnan(e1_Nc)], range=[0,5], bins=binning, density=True, facecolor='b')
plt.title("Short-term error (Nc)")
plt.text(2, 1, 'mean:' '%4f' %np.nanmean(e1_Nc))
plt.text(2, 0.6, 'std:' '%4f' %np.nanstd(e1_Nc))
plt.axis([0, 5, 0, 3.5])
plt.grid(True)
plt.show()
#====================================================================
#stability criterion combining short and long-term
#====================================================================
### Long-term error without levels
mu2_81 = err.long_term_error(Ns, period_rescaling=8, wdw=81, level=True, wdw_level=4000)
mu2_1 = err.long_term_error(Ns, period_rescaling=8, wdw=365, level=True, wdw_level=4000)
mu2_2 = err.long_term_error(Ns, period_rescaling=8, wdw=912, level=True, wdw_level=4000)
mse_add = err.mse_criterion(e1_Ns, station_names, ref=1)[0] + \
err.mse_criterion(mu2_1, station_names, ref=0)[0]
ind_order = np.argsort(mse_add)
names_add = [station_names[i] for i in ind_order]
#should be the same if the errors were perfectly independent
mse_comb, names_comb = err.mse_criterion(e1_Ns+mu2_1, station_names, ref=1)
#====================================================================
#error bars
#====================================================================
add, ref = err.error_add(Ns, period_rescaling=8)
start = np.where(time == 2005)[0][0]
stop = np.where(np.round(time,1) == 2005.5)[0][0]
stat = 20
plt.stem(time[start:stop], ref[start:stop]+add[start:stop, stat], label='ref+errors', markerfmt='C0.', basefmt='C0-')
plt.plot(time[start:stop], ref[start:stop], c='tab:purple', label='ref', lw=3)
plt.legend(loc='upper right')
if stop-start < 4000:
x_ticks = np.arange(np.round(time[start],1), np.round(time[stop],1), 0.1)
else :
x_ticks = np.arange(np.round(time[start]), np.round(time[stop])+1, 5)
plt.xticks(x_ticks)
plt.title("Additive errors in %s" %station_names[stat])
plt.ylabel('$Y_i(t)$')
plt.xlabel('year')
plt.tick_params(axis='x', which='major')
plt.show()
plt.stem(time[start:stop], Ns[start:stop, stat], label='real values', markerfmt='C0.', basefmt='C0-')
plt.plot(time[start:stop], ref[start:stop], c='tab:purple', label='ref', lw=3)
plt.legend(loc='upper right')
if stop-start < 4000:
x_ticks = np.arange(np.round(time[start],1), np.round(time[stop],1), 0.1)
else :
x_ticks = np.arange(np.round(time[start]), np.round(time[stop])+1, 5)
plt.xticks(x_ticks)
plt.title("Additive errors in %s" %station_names[stat])
plt.ylabel('$Y_i(t)$')
plt.xlabel('year')
plt.tick_params(axis='x', which='major')
plt.show()
plt.stem(time[start:stop], add[start:stop, stat], label='errors', markerfmt='C0.', basefmt='C0-')
plt.plot(time[start:stop], ref[start:stop], c='tab:purple', label='ref', lw=3)
plt.legend(loc='upper right')
if stop-start < 4000:
x_ticks = np.arange(np.round(time[start],1), np.round(time[stop],1), 0.1)
else :
x_ticks = np.arange(np.round(time[start]), np.round(time[stop])+1, 5)
plt.xticks(x_ticks)
plt.title("Additive errors in %s" %station_names[stat])
plt.ylabel('$Y_i(t)$')
plt.xlabel('year')
plt.tick_params(axis='x', which='major')
plt.show()
#####################################
bars_m, bars_p, ref = err.error_bars(Ns, period_rescaling=8)
stat = 20
start = np.where(time == 2000)[0][0]
stop = np.where(np.round(time,1) == 2000.3)[0][0]
y = ref[start:stop]
bars = np.array((bars_m[start:stop,stat], bars_p[start:stop,stat]))
plt.vlines(time[start:stop], y + bars[0,:], y + bars[1,:], lw=0.8)
plt.scatter(time[start:stop], y + bars[0,:], marker='_', c='orange')
plt.scatter(time[start:stop], y + bars[1,:], marker='_', c='orange')
plt.plot(time[start:stop], y, c='tab:purple', label='ref', lw=3)
if stop-start < 4000:
x_ticks = np.arange(np.round(time[start],1),
|
np.round(time[stop],1)
|
numpy.round
|
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def init_parameters(input_size, hidden_sizes, output_size, init_method='Xavier'):
scales = []
layers = [input_size] + hidden_sizes
if 'Xavier' in init_method:
for i in range(1, len(layers) + 1):
scales.append(np.sqrt(2. / layers[i - 1]))
if 'Xavier' in init_method:
first_layer_scale = scales[0]
elif 'Random' in init_method:
first_layer_scale = 0.01
else:
first_layer_scale = 0.0
parameters = dict()
parameters['W1'] = np.random.randn(hidden_sizes[0], input_size) * first_layer_scale
parameters['b1'] = np.zeros((hidden_sizes[0], 1))
for l in range(len(hidden_sizes)):
if len(hidden_sizes) - 1 == l: # last layer
parameters['W' + str(l + 2)] = np.random.randn(output_size, hidden_sizes[l]) * scales[l + 1]
parameters['b' + str(l + 2)] = np.zeros((output_size, 1))
else:
parameters['W' + str(l + 2)] = np.random.randn(hidden_sizes[l + 1], hidden_sizes[l]) * scales[
l + 1]
parameters['b' + str(l + 2)] = np.zeros((hidden_sizes[l + 1], 1))
return parameters
def apply_non_linearity(Z, activation_func):
if 'tanh' in activation_func:
return tanh_forward(Z)
elif 'relu' in activation_func:
return relu_forward(Z)
else:
raise AssertionError
def forward_propagation(X, parameters, activation_func):
num_layers = int(len(parameters) / 2)
cache = dict()
cache['A0'] = X
for l in range(1, num_layers):
W = parameters['W' + str(l)]
b = parameters['b' + str(l)]
Z = linear_forward(cache['A' + str(l - 1)], W, b)
cache['A' + str(l)] = apply_non_linearity(Z, activation_func)
Z = linear_forward(Z, parameters['W' + str(num_layers)], parameters['b' + str(num_layers)])
cache['A' + str(num_layers)] = sigmoid_forward(Z)
return cache
def gradient_descent(parameters, derivatives, learning_rate):
num_layers = int(len(parameters) / 2)
for l in range(1, num_layers + 1):
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate * derivatives[
'dW' + str(l)]
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate * derivatives[
'db' + str(l)]
return parameters
def compute_l2_reg_backprop(parameters, lambda_reg, batch_size):
l2_regs = {name: (lambda_reg / batch_size) * w for name, w in parameters.items()
if 'W' in name}
return l2_regs
def backward_activation(Z, activation_func):
if 'tanh' in activation_func:
return tanh_backward(Z)
elif 'relu' in activation_func:
return relu_backward(Z)
else:
raise AssertionError
def backward_propagation(X, parameters, cache, Y_true, activation_func, num_layers=3,
lambda_reg=0.0):
derivatives = dict()
batch_size = Y_true.shape[1]
if lambda_reg != 0.0:
l2_regs = compute_l2_reg_backprop(parameters, lambda_reg, batch_size)
else:
l2_regs = {name: 0.0 for name, w in parameters.items()
if 'W' in name}
# sigmoid layer
dZ_out = cache['A' + str(num_layers + 1)] - Y_true # 1 x m
dW_out = 1. / batch_size * np.dot(dZ_out, cache['A' + str(num_layers)].T) + l2_regs[
'W' + str(num_layers + 1)] # A1 h x m
db_out = 1. / batch_size * np.sum(dZ_out, axis=1, keepdims=True) # (1,1)
derivatives['dA' + str(num_layers + 1)] = dZ_out
derivatives['dW' + str(num_layers + 1)] = dW_out
derivatives['db' + str(num_layers + 1)] = db_out
# relu layer
for i in reversed(range(num_layers)):
# derivatives['dA' + str(i + 1)] = np.dot(parameters['W' + str(i + 2)].T, derivatives['dA' + str(i + 2)]) * relu_backward(cache['A' + str(i + 1)]) (h x 1) x (1 x m) x (h x m) = (h x m)
derivatives['dA' + str(i + 1)] = np.dot(parameters['W' + str(i + 2)].T,
derivatives['dA' + str(i + 2)])
# derivatives['dA' + str(i + 1)] = derivatives['dA' + str(i + 1)] * tanh_backward(cache['A' + str(i + 1)])
derivatives['dA' + str(i + 1)] = derivatives['dA' + str(i + 1)] * backward_activation(
cache['A' + str(i + 1)],
activation_func)
derivatives['dW' + str(i + 1)] = 1. / batch_size * np.dot(derivatives['dA' + str(i + 1)],
cache['A' + str(i)].T) + l2_regs[
'W' + str(i + 1)] # (h x m) x (m x n_x)
derivatives['db' + str(i + 1)] = 1. / batch_size * np.sum(derivatives['dA' + str(i + 1)],
axis=1,
keepdims=True) # (h,1)
return derivatives
def linear_forward(X, W, b):
return np.dot(W, X) + b
def relu_forward(Z):
return np.maximum(Z, 0)
def relu_backward(Z):
return 1. * (Z > 0)
def tanh_forward(Z):
return np.tanh(Z)
def tanh_backward(Z):
return 1 - np.power(np.tanh(Z), 2)
def sigmoid_forward(Z):
return 1 / (1 +
|
np.exp(-Z)
|
numpy.exp
|
from util import *
from parameters import *
import numpy as np
# repeatable test runs, seed before importing tensorflow
|
np.random.seed(np_random_seed)
|
numpy.random.seed
|
#!/usr/bin/env python
"""Tests for `ramachandran` package."""
import unittest
import numpy as np
import pandas as pd
from ramachandran.ramachandran import Ramachandran
class TestRamachandran(unittest.TestCase):
"""Tests for `ramachandran` package."""
def setUp(self):
"""Set up test fixtures, if any."""
pass
def tearDown(self):
"""Tear down test fixtures, if any."""
pass
def test_001_vector_norm_dihedral(self):
"""Test something."""
P = np.array([1, -2, 0])
Q = np.array([3, 1, 4])
R = np.array([0, -1, 2])
S = np.array([0, 22, 2])
x = Ramachandran.vector_norm(self, P, Q, R)
y = Ramachandran.vector_norm(self, Q, R, S)
z = Ramachandran.dihedral(self, x, y)
# test calculation of vector normal to the plane containing the points P, Q & R
PQ = np.array([Q[0]-P[0], Q[1]-P[1], Q[2]-P[2]])
PR = np.array([R[0]-P[0],R[1]-P[1], R[2]-P[2]])
normal_1 = np.array([PQ[1]*PR[2]-PQ[2]*PR[1], PQ[2]*PR[0]-PQ[0]*PR[2], PQ[0]*PR[1]-PQ[1]*PR[0]])
# test calculation of vector normal to the plane containing the points Q, R & S
QR = np.array([R[0]-Q[0], R[1]-Q[1], R[2]-Q[2]])
QS = np.array([S[0]-Q[0],S[1]-Q[1], S[2]-Q[2]])
normal_2 = np.array([QR[1]*QS[2]-QR[2]*QS[1], QR[2]*QS[0]-QR[0]*QS[2], QR[0]*QS[1]-QR[1]*QS[0]])
# claculate the dihedral angle between the two planes by calculating the angle between the two normal vectors
cos_test_numerator = np.absolute((normal_1[0]*normal_2[0]) + (normal_1[1]*normal_2[1]) + (normal_1[2]*normal_2[2]))
cos_test_denominator =
|
np.sqrt(normal_1[0]**2 + normal_1[1]**2 + normal_1[2]**2)
|
numpy.sqrt
|
import os
import argparse
import cv2
import os.path as osp
import numpy as np
from tqdm import tqdm
from pycocotools.coco import COCO
def scoring(item):
return item['score']
parser = argparse.ArgumentParser(description='Failure Cases Analysis')
parser.add_argument('-i', '--img-dir', action='store', dest='img_dir', help='image directory')
parser.add_argument('-b', '--bbox-dir', action='store', dest='bbox_dir', help='bounding box directory')
parser.add_argument('-s', '--save-dir', action='store', dest='save_dir', help='save directory')
parser.add_argument('-n', '--num', action='store', dest='num_of_skus', help='number of SKUs in an image')
args = parser.parse_args()
img_dir = args.img_dir
bbox_dir = args.bbox_dir
save_dir = args.save_dir
num_of_skus = int(args.num_of_skus)
# For example:
# img_dir = '../testing_images'
# bbox_dir = '../sku-box-test-101-200-v1'
# save_dir = '../1_Products'
if not osp.exists(save_dir):
os.mkdir(save_dir)
if not osp.exists(osp.join(save_dir, 'None_Detections')):
os.mkdir(osp.join(save_dir, 'None_Detections'))
if not osp.exists(osp.join(save_dir, '{}_Product_fix'.format(str(num_of_skus)))):
os.mkdir(osp.join(save_dir, '{}_Product_fix'.format(str(num_of_skus))))
# load annotations
anno_path = osp.join(img_dir, 'annotations_train.json')
# DO NOT USE
bbox_path = osp.join(bbox_dir, 'bbox_fix_done.json')
coco = COCO(anno_path)
cocoDt = coco.loadRes(bbox_path)
print ('--- --- End Loading Annotations and Predictions --- ---')
# read img
imgs = coco.imgs
cats = coco.cats
anns = coco.imgToAnns
dets = cocoDt.imgToAnns
gt_color = (0, 255, 255)
det_color = (0, 0, 255)
thick = 2
inconsistency_flag = False
pbar = tqdm(total=len(imgs))
for key, val in imgs.items():
key = int(key)
# print (key, val)
file_path = osp.join(img_dir, val['file_name'])
gt_im = cv2.imread(file_path)
det_im = cv2.imread(file_path)
h = val['height']
num_bbox = len(anns[key])
print(num_bbox)
if num_bbox != num_of_skus:
continue
gt_cls_all = []
det_cls_all = []
inconsistency_flag = False
for idx in range(0, num_bbox):
gt_bbox = anns[key][idx]['bbox']
gt_bbox = [int(p) for p in gt_bbox]
gt_catId = anns[key][idx]['category_id']
gt_cls = str(cats[gt_catId]['name'])
gt_cls_all.append(gt_cls)
gt_score = 1
gt_title = 'GT: %s:%.2f' % (gt_cls, gt_score)
print("--- gt_box: ", gt_bbox)
cv2.rectangle(gt_im,
(gt_bbox[0], gt_bbox[1]), (gt_bbox[2], gt_bbox[3]),
gt_color, thick)
cv2.putText(gt_im, gt_title, (gt_bbox[0], gt_bbox[3] + 8),
0, 8e-4 * h, gt_color, thick // 3)
# cv2.imwrite(osp.join(save_dir, '{}_Product_fix'.format(str(num_of_skus)), val['file_name']), gt_im)
if len(dets[key]) == 0:
print('{} None detections'.format(key))
im = np.hstack((det_im, gt_im))
cv2.imwrite(osp.join(save_dir, 'None_Detections', val['file_name']), im)
continue
dets_sorted = sorted(dets[key], key=scoring, reverse=True)
num_det_bbox = len(dets_sorted)
# not drawing all the detected bounding boxes
if num_det_bbox > num_bbox:
num_det_bbox = num_bbox
for idx in range(0, num_det_bbox):
det_bbox = dets_sorted[idx]['bbox']
det_catId = dets_sorted[idx]['category_id']
det_cls = str(cats[det_catId]['name'])
det_cls_all.append(det_cls)
det_socre = dets_sorted[idx]['score']
det_title = 'DET: %s:%.2f' % (det_cls, det_socre)
# print ("--- det_box: ", det_bbox)
cv2.rectangle(det_im,
(int(det_bbox[0]), int(det_bbox[1])), (int(det_bbox[2]), int(det_bbox[3])),
det_color, thick)
cv2.putText(det_im, det_title, (int(det_bbox[0]), int(det_bbox[1]) - 8),
0, 8e-4 * h, det_color, thick // 3)
im =
|
np.hstack((det_im, gt_im))
|
numpy.hstack
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from davg.lanefinding.ImgMgr import ImgMgr
from davg.lanefinding.BirdsEyeTransform import BirdsEyeTransform
from davg.lanefinding.Thresholds import Thresholds
from davg.lanefinding.LaneMath import LaneMath
from davg.lanefinding.Prediction import Prediction
from davg.lanefinding.Line import Line
from davg.lanefinding.DiagnosticScreen import DiagnosticScreen
class Pipeline():
def __init__(self):
self.img_mgr = ImgMgr()
self.birdseye = BirdsEyeTransform()
pass
def get_birdseye_binary_warped(self, img, undistort=True):
''' Convenience method.
Undistorts an image (using previously determined globally accessible
calibration data), warps it to the birdseye view, converting it to a uint8
after warping, then applying the combined threshold.
Optionally: skip the undistort step.
'''
if (undistort):
undistorted = self.img_mgr.undistort(img)
else:
undistorted = img
# Warp to birds-eye view
masked = self.birdseye.apply_cropping_mask(undistorted)
warped = self.birdseye.warp(masked)
# Apply the thresholds
return Thresholds.davg_thresh(warped)
def find_lane_lines_using_windows(self, binary_warped):
# This code was taken from the Udacity 'Finding the Lines' section of the
# Advanced Lane Finding lesson
# https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/2b62a1c3-e151-4a0e-b6b6-e424fa46ceab/lessons/40ec78ee-fb7c-4b53-94a8-028c5c60b858/concepts/c41a4b6b-9e57-44e6-9df9-7e4e74a1a49a
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[int(binary_warped.shape[0]*0.75):,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base =
|
np.argmax(histogram[:midpoint])
|
numpy.argmax
|
from warnings import WarningMessage
import numpy as np
from numpy.lib.npyio import save
from cellpose import models
import torch
import cv2
import os,sys,time
import skimage
default_cellpose_kwargs = {
'anisotropy': 1,
'diameter': 40,
'min_size': 500,
'stitch_threshold': 0.1,
}
default_pixel_sizes = [250,108,108]
class Cellpose_Segmentation_Psedu3D:
""""""
def __init__(self, _im, data_type='DAPI',
save_filename=None, verbose=True):
""""""
# inherit from superclass
super().__init__()
# save images
self.raw_im = _im
self.data_type = data_type
self.allowed_types = ['DAPI', 'polyT']
if self.data_type not in self.allowed_types:
raise ValueError(f"input datatype {self.data_type} not in {self.allowed_types}")
# save
self.save_filename = save_filename
self.verbose = verbose
def run(self):
""""""
_lys, _sel_ids = Cellpose_Segmentation_Psedu3D.pick_Z_stacks(self.raw_im)
_mask = Cellpose_Segmentation_Psedu3D.run_segmentation(_lys)
_clean_mask = Cellpose_Segmentation_Psedu3D.merge_3d_masks(_mask)
_z = Cellpose_Segmentation_Psedu3D.convert_layer_list_to_um(_sel_ids)
_full_mask = Cellpose_Segmentation_Psedu3D.interploate_z_masks(_clean_mask, _z)
return _full_mask
@staticmethod
def pick_Z_stacks(im:np.ndarray,
num_layer_project:int=5,
num_layer_overlap:int=1,
projection_method:'function'=np.mean,
verbose=True,
):
_im = im.copy()
# projection on z
_sel_layers = []
for _i, _ly in enumerate(_im):
if _i < num_layer_project-1:
continue
if len(_sel_layers) == 0 or min(_sel_layers[-1][-1*num_layer_overlap-1:]) + num_layer_project <= _i:
_sel_layers.append(np.arange(_i-num_layer_project+1, _i+1))
# generate max projections
_max_proj_layers = np.array([projection_method(_im[np.array(_lys)],axis=0) for _lys in _sel_layers])
if verbose:
print(f"- {len(_max_proj_layers)} layers selected with {projection_method} projection.")
return _max_proj_layers, _sel_layers
@staticmethod
def run_segmentation(_projected_im,
model_type='nuclei',
use_gpu=True,
diameter=60, min_size=10,
cellprob_threshold=0.5, stitch_threshold=0.2,
flow_threshold=1.0,
verbose=True,
):
from cellpose import models
# segmentation
seg_model = models.Cellpose(gpu=use_gpu, model_type=model_type)
masks, _, _, _ = seg_model.eval(
np.array([_projected_im,_projected_im]),
z_axis=1,
channel_axis=0,
diameter=diameter,
channels=[0,0],
min_size=min_size,
cellprob_threshold=cellprob_threshold, # -6 to 6, positively correlate with number of masks
stitch_threshold=stitch_threshold,
flow_threshold=flow_threshold,
do_3D=False)
# clear ram
del(seg_model)
return masks
@staticmethod
def merge_3d_masks(masks, overlap_th=0.9, verbose=True):
import time
# copy masks
_masks = np.array(masks).copy()
all_mask_ids = np.unique(_masks)
all_mask_ids = all_mask_ids[all_mask_ids>0]
xy_projections = [(_masks==_i).any(0) for _i in all_mask_ids]
kept_masks = np.zeros(np.shape(_masks), dtype=np.uint16)
kept_ids = []
# intialize
if verbose:
print(f"- start merging 3d masks")
_start_time = time.time()
unprocessed_ids = list(all_mask_ids)
while len(unprocessed_ids) > 0:
# default: kept this cell
_kept_flag = True
# extract i
_i = unprocessed_ids.pop(0)
_i_msk = xy_projections[list(all_mask_ids).index(_i)]
# calculate j percentage to see whether merge this into _j
for _j in unprocessed_ids:
# extract j
_j_msk = xy_projections[list(all_mask_ids).index(_j)]
# compare these two masks
_i_percent = np.sum(_i_msk*_j_msk) / np.sum(_i_msk)
_j_percent = np.sum(_i_msk*_j_msk) / np.sum(_j_msk)
if _i_percent > 0 or _j_percent > 0:
if verbose:
print(f"-- overlap found for cell:{_i} to {_j}", _i_percent, _j_percent)
# remove i, merge into j
if _i_percent > overlap_th:
_kept_flag = False
# update mask, i already removed by continue
_masks[_masks==_i] = _j
xy_projections[list(all_mask_ids).index(_j)] = (_masks==_j).any(0)
if verbose:
print(f"--- skip {_i}")
break
# remove j, merge into i
elif _j_percent > overlap_th:
_kept_flag = False
# remove j
unprocessed_ids.pop(unprocessed_ids.index(_j))
# update mask
_masks[_masks==_j] = _i
xy_projections[list(all_mask_ids).index(_i)] = (_masks==_i).any(0)
# redo i
unprocessed_ids = [_i] + unprocessed_ids
if verbose:
print(f"--- redo {_i}")
break
# save this mask if there's no overlap
if _kept_flag:
kept_masks[_masks==_i] = np.max(np.unique(kept_masks))+1
kept_ids.append(_i)
if verbose:
print(f"- {np.max(kept_masks)} labels kept.")
print(f"- finish in {time.time()-_start_time:.2f}s. ")
return kept_masks
@staticmethod
def convert_layer_list_to_um(layer_lists:list,
step_sizes:float=0.2,
select_method:'function'=np.median):
return step_sizes * np.array([select_method(_lys) for _lys in layer_lists])
@staticmethod
def interploate_z_masks(z_masks,
z_coords,
target_z_coords=np.round(np.arange(0,12,0.2),2),
mode='nearest',
verbose=True,
):
# target z
_final_mask = []
_final_coords = np.round(target_z_coords, 3)
for _fz in _final_coords:
if _fz in z_coords:
_final_mask.append(z_masks[np.where(z_coords==_fz)[0][0]])
else:
if mode == 'nearest':
_final_mask.append(z_masks[np.argmin(np.abs(z_coords-_fz))])
continue
# find nearest neighbors
if np.sum(z_coords > _fz) > 0:
_upper_z = np.min(z_coords[z_coords > _fz])
else:
_upper_z = np.max(z_coords)
if np.sum(z_coords < _fz) > 0:
_lower_z = np.max(z_coords[z_coords < _fz])
else:
_lower_z = np.min(z_coords)
if _upper_z == _lower_z:
# copy the closest mask to extrapolate
_final_mask.append(z_masks[np.where(z_coords==_upper_z)[0][0]])
else:
# interploate
_upper_mask = z_masks[np.where(z_coords==_upper_z)[0][0]].astype(np.float32)
_lower_mask = z_masks[np.where(z_coords==_lower_z)[0][0]].astype(np.float32)
_inter_mask = (_upper_z-_fz)/(_upper_z-_lower_z) * _lower_mask
if verbose:
print(f"- reconstruct {len(_final_mask)} layers")
return np.array(_final_mask)
class Cellpose_Segmentation_3D():
"""Do 3D cellpose segmentation to DAPI image, and additionally watershed on polyT iamge given the DAPI seeds
Minimal usage:
seg_cls = Cellpose_Segmentation_3D(dapi_im, polyt_im, pixel_size, save_filename=filename) # create class
labels = seg_cls.run() # run segmentation
seg_cls.save() # save segmentation results
"""
def __init__(self, dapi_im, polyt_im=None,
pixel_sizes=default_pixel_sizes,
cellpose_kwargs={},
watershed_beta=1,
save_filename=None,
load_from_savefile=True,
verbose=True):
"""Create CellposeSegmentation3D class"""
# inherit from superclass
#super().__init__()
# save images
self.dapi_im = dapi_im
self.polyt_im = polyt_im
# parameters
self.pixel_sizes = pixel_sizes
self.cellpose_kwargs = {_k:_v for _k,_v in default_cellpose_kwargs.items()}
self.cellpose_kwargs.update(cellpose_kwargs)
self.watershed_beta = watershed_beta
# save info
self.save_filename = save_filename
self.verbose = verbose
# load
if load_from_savefile and save_filename is not None and os.path.exists(save_filename):
self.load()
def run(self, model_type='nuclei', use_gpu=True, overwrite=False,):
"""Composite segmentation with reshaped image"""
if hasattr(self, 'segmentation_masks') and not overwrite:
if self.verbose:
print(f"-- segmentation_masks already exist, skip.")
return self.segmentation_masks
else:
#
_resized_shape = self.generate_resize_shape(self.dapi_im.shape, self.pixel_sizes)
#
_resized_dapi_im = self.reshape_raw_images(self.dapi_im, _resized_shape)
if hasattr(self, 'polyt_im') and getattr(self, 'polyt_im') is not None:
_resized_polyt_im = self.reshape_raw_images(self.polyt_im, _resized_shape)
else:
_resized_polyt_im = None
#
_resized_masks = self.run_segmentation(_resized_dapi_im, _resized_polyt_im,
model_type=model_type,
use_gpu=use_gpu,
cellpose_kwargs=self.cellpose_kwargs)
# revert mask size
_masks = self.reshape_masks(_resized_masks, np.shape(self.dapi_im))
# watershed
if hasattr(self, 'polyt_im') and getattr(self, 'polyt_im') is not None:
_extended_masks = self.watershed_with_mask(self.polyt_im, _masks, self.watershed_beta)
else:
_extended_masks = self.watershed_with_mask(self.dapi_im, _masks, self.watershed_beta)
# add to attributes
setattr(self, 'segmentation_masks', _extended_masks)
return _extended_masks
def save(self, save_filename=None, overwrite=False):
# decide save_filename
if save_filename is None and self.save_filename is None:
WarningMessage(f"save_filename not given.")
elif save_filename is not None:
_save_filename = save_filename
else:
_save_filename = self.save_filename
# save
if not os.path.exists(_save_filename) or overwrite:
# save
if self.verbose:
print(f"-- saving mask into file: {_save_filename}")
np.save(_save_filename.split(os.path.extsep+_save_filename.split(os.path.extsep)[-1])[0],
self.segmentation_masks)
else:
if self.verbose:
print(f"-- save_file:{_save_filename} already exists, skip. ")
def load(self, save_filename=None, overwrite=False):
# decide save_filename
if save_filename is None and self.save_filename is None:
WarningMessage(f"save_filename not given.")
elif save_filename is not None:
_save_filename = save_filename
else:
_save_filename = self.save_filename
# load
if not hasattr(self, 'segmentation_masks') or overwrite:
if os.path.exists(_save_filename):
if self.verbose:
print(f"-- loading mask from file: {_save_filename}")
self.segmentation_masks = np.load(_save_filename)
else:
if self.verbose:
print(f"-- file: {_save_filename} doesn't exist, skip. ")
else:
if self.verbose:
print(f"-- segmentation_masks already exists, skip. ")
def clear(self):
if self.verbose:
print(f"-- removing segmentation_masks from class")
if hasattr(self, 'segmentation_masks'):
delattr(self, 'segmentation_masks')
@staticmethod
def generate_resize_shape(image_shape, pixel_sizes):
resize_shape = np.floor(np.array(image_shape)[1:] * np.array(pixel_sizes)[1:] \
/ np.array(pixel_sizes)[0]).astype(np.int32)
return resize_shape
@staticmethod
def reshape_raw_images(raw_im,
resize_shape,
):
"""Reshape raw image into smaller image to fit-in GPU"""
_reshaped_im = np.array([cv2.resize(_lr, tuple(resize_shape[-2:]),
interpolation=cv2.INTER_AREA) for _lr in raw_im])
return _reshaped_im
@staticmethod
def reshape_masks(masks,
resize_shape,
):
"""Reshape raw image into smaller image to fit-in GPU"""
_reshaped_masks = np.array([cv2.resize(_lr, tuple(resize_shape[-2:]),
interpolation=cv2.INTER_NEAREST) for _lr in masks])
return _reshaped_masks
@staticmethod
def run_segmentation(small_dapi_im,
small_polyt_im=None,
model_type='nuclei',
use_gpu=True,
cellpose_kwargs={},
verbose=True,
):
# check inputs
_start_time = time.time()
if small_polyt_im is None:
small_polyt_im = np.zeros(np.shape(small_dapi_im), dtype=small_dapi_im.dtype)
# create model
seg_model = models.Cellpose(gpu=use_gpu, model_type=model_type)
# parameters
_cellpose_kwargs = {_k:_v for _k,_v in default_cellpose_kwargs.items()}
_cellpose_kwargs.update(cellpose_kwargs)
# run segmentation
masks, _, _, _ = seg_model.eval(
|
np.stack([small_polyt_im, small_dapi_im], axis=3)
|
numpy.stack
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from utils import *
class GraphMaker:
foreground = 1
background = 0
seeds = 0
segmented = 1
def __init__(self):
self.image = None
self.graph = None
self.overlay = None
self.seed_overlay = None
self.segment_overlay = None
self.mask = None
self.load_image('images/cup1.jpg')
self.background_seeds = []
self.foreground_seeds = []
self.background_average = np.array(3)
self.foreground_average = np.array(3)
self.nodes = []
self.edges = []
self.current_overlay = self.seeds
def load_image(self, filename):
self.image = cv2.imread(filename)
self.height, self.width, _ = np.shape(self.image)
print(self.height, self.width)
self.graph = np.zeros_like(self.image)
self.seed_overlay = np.zeros_like(self.image)
self.segment_overlay = np.zeros_like(self.image)
self.mask = None
def add_seed(self, x, y, type):
if self.image is None:
print('Please load an image before adding seeds.')
if type == self.background:
if not self.background_seeds.__contains__((x, y)):
self.background_seeds.append((x, y))
# 在图上画出一个矩形点
cv2.rectangle(self.seed_overlay, (x-1, y-1), (x+1, y+1), (0, 0, 255), -1)
elif type == self.foreground:
if not self.foreground_seeds.__contains__((x, y)):
self.foreground_seeds.append((x, y))
cv2.rectangle(self.seed_overlay, (x-1, y-1), (x+1, y+1), (0, 255, 0), -1)
def clear_seeds(self):
self.background_seeds = []
self.foreground_seeds = []
self.seed_overlay = np.zeros_like(self.seed_overlay)
def get_overlay(self):
if self.current_overlay == self.seeds:
return self.seed_overlay
else:
return self.segment_overlay
def get_image_with_overlay(self, overlayNumber):
if overlayNumber == self.seeds:
return cv2.addWeighted(self.image, 0.9, self.seed_overlay, 0.4, 0.1)
else:
return cv2.addWeighted(self.image, 0.9, self.segment_overlay, 0.9, 0.1)
def create_graph(self):
if len(self.background_seeds) == 0 or len(self.foreground_seeds) == 0:
print("Please enter at least one foreground and background seed.")
return
print("Making graph")
print("Finding foreground and background averages")
self.find_averages()
print("Cutting graph")
self.cut_graph()
def find_averages(self):
self.background_average = np.zeros(3)
self.foreground_average = np.zeros(3)
for coordinate in self.background_seeds:
print(self.image[coordinate[1], coordinate[0]])
self.background_average += self.image[coordinate[1], coordinate[0]]
self.background_average /= len(self.background_seeds)
for coordinate in self.foreground_seeds:
self.foreground_average += self.image[coordinate[1], coordinate[0]]
self.foreground_average /= len(self.foreground_seeds)
def cut_graph(self):
self.foreground_plane = np.zeros((self.height, self.width, 3))
self.background_plane = np.zeros((self.height, self.width, 3))
self.foreground_plane[:, :, 0] = self.foreground_average[0]
self.foreground_plane[:, :, 1] = self.foreground_average[1]
self.foreground_plane[:, :, 2] = self.foreground_average[2]
self.background_plane[:, :, 0] = self.background_average[0]
self.background_plane[:, :, 1] = self.background_average[1]
self.background_plane[:, :, 2] = self.background_average[2]
d1 = self.image - self.foreground_plane
f1 = np.sqrt(d1[:, :, 0] ** 2 + d1[:, :, 1] ** 2 + d1[:, :, 2] ** 2)
d2 = self.image - self.background_plane
f2 = np.sqrt(d2[:, :, 0] ** 2 + d2[:, :, 1] ** 2 + d2[:, :, 2] ** 2)
f = f1 - f2
tau = 0.1
sigma = 0.05
mu = 1
imgray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
u = primal_dual(imgray, sigma, tau, mu, f, iters=500)
u[u > 0.5] = 1
u[u <= 0.5] = 0
for coordinate in self.background_seeds:
u[coordinate[1], coordinate[0]] = 0
for coordinate in self.foreground_seeds:
u[coordinate[1], coordinate[0]] = 1
u = np.array(u, dtype=np.int32)
self.segment_overlay = np.zeros_like(self.segment_overlay)
self.mask = np.zeros_like(self.image, dtype=bool)
indices =
|
np.where(u == 1)
|
numpy.where
|
"""
routines for automatic calibration.
- peakdet (useful to find maxima in an array without taking derivative)
- get_most_prominent_peaks (find by looking for spikes in spectrum derivative)
- match_peaks (identify peaks based on ratios between known gamma energies)
- calibrate_tl208 (main routine -- fits multiple peaks w/ Radford peak shape)
- get_calibration_energies (a good place to put pk energies)
"""
import sys
import numpy as np
from pygama.analysis.peak_fitting import *
from pygama.analysis.histograms import get_bin_centers, get_gaussian_guess
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
from scipy.signal import argrelextrema, medfilt, find_peaks_cwt
from scipy.ndimage.filters import gaussian_filter1d
from scipy.stats import norm
import scipy.optimize as op
def peakdet(v, delta, x):
"""
Converted from MATLAB script at: http://billauer.co.il/peakdet.html
Returns two arrays: [maxtab, mintab] = peakdet(v, delta, x)
An updated (vectorized) version is in pygama.dsp.transforms.peakdet
"""
maxtab, mintab = [], []
# sanity checks
x, v = np.asarray(x), np.asarray(v)
if len(v) != len(x): exit("Input vectors v and x must have same length")
if not np.isscalar(delta): exit("Input argument delta must be a scalar")
if delta <= 0: exit("Input argument delta must be positive")
maxes, mins = [], []
min, max = np.inf, -np.inf
find_max = True
for i in range(len(x)):
# for i=0, all 4 of these get set
if v[i] > max:
max, imax = v[i], x[i]
if v[i] < min:
min, imin = v[i], x[i]
if find_max:
# if the sample is less than the current max,
# declare the previous one a maximum, then set this as the new "min"
if v[i] < max - delta:
maxes.append((imax, max))
min, imin = v[i], x[i]
find_max = False
else:
# if the sample is more than the current min,
# declare the previous one a minimum, then set this as the new "max"
if v[i] > min + delta:
mins.append((imin, min))
max, imax = v[i], x[i]
find_max = True
return np.array(maxes), np.array(mins)
def get_most_prominent_peaks(energySeries, xlo, xhi, xpb,
max_num_peaks=np.inf, test=False):
"""
find the most prominent peaks in a spectrum by looking for spikes in derivative of spectrum
energySeries: array of measured energies
max_num_peaks = maximum number of most prominent peaks to find
return a histogram around the most prominent peak in a spectrum of a given percentage of width
"""
nb = int((xhi-xlo)/xpb)
hist, bin_edges = np.histogram(energySeries, range=(xlo, xhi), bins=nb)
bin_centers = get_bin_centers(bin_edges)
# median filter along the spectrum, do this as a "baseline subtraction"
hist_med = medfilt(hist, 21)
hist = hist - hist_med
# identify peaks with a scipy function (could be improved ...)
peak_idxs = find_peaks_cwt(hist, np.arange(1, 10, 0.1), min_snr=5) #changed range from (5,10,0.1)
peak_idxs = arr = peak_idxs.astype('int32') #was having trouble with int64 dtype
peak_energies = bin_centers[peak_idxs]
# pick the num_peaks most prominent peaks
if max_num_peaks < len(peak_energies):
peak_vals = hist[peak_idxs]
sort_idxs = np.argsort(peak_vals)
peak_idxs_max = peak_idxs[sort_idxs[-max_num_peaks:]]
peak_energies = np.sort(bin_centers[peak_idxs_max])
if test:
plt.plot(bin_centers, hist, ds='steps', lw=1, c='b')
for e in peak_energies:
plt.axvline(e, color="r", lw=1, alpha=0.6)
plt.xlabel("Energy [ADC]", ha='right', x=1)
plt.ylabel("Filtered Spectrum", ha='right', y=1)
plt.tight_layout()
plt.show()
#exit()
return peak_energies
def match_peaks(data_pks, cal_pks, plotFigure=None):
"""
Match uncalibrated peaks with literature energy values.
"""
from itertools import combinations
from scipy.stats import linregress
n_pks = len(cal_pks) if len(cal_pks) < len(data_pks) else len(data_pks)
cal_sets = combinations(range(len(cal_pks)), n_pks)
data_sets = combinations(range(len(data_pks)), n_pks)
best_err, best_m, best_b = np.inf, None, None
for i,cal_set in enumerate(cal_sets):
cal = cal_pks[list(cal_set)] # lit energies for this set
for data_set in data_sets:
data = data_pks[list(data_set)] # uncal energies for this set
m, b, _, _, _ = linregress(data, y=cal)
err =
|
np.sum((cal - (m * data + b))**2)
|
numpy.sum
|
from __future__ import division
from scipy import signal, stats, ndimage
from datetime import datetime
import os
import re
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.patches import Rectangle
import matplotlib.transforms as transforms
from matplotlib.collections import LineCollection
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
from ephysiopy.dacq2py import axonaIO
from ephysiopy.dacq2py.tetrode_dict import TetrodeDict
from ephysiopy.common import binning
from ephysiopy.common.ephys_generic import FieldCalcs
from ephysiopy.dacq2py.spikecalcs import SpikeCalcs
from ephysiopy.common.eegcalcs import EEGCalcs
from ephysiopy.dacq2py.cluster import Kluster
from ephysiopy.dacq2py import tintcolours as tcols
from ephysiopy.common.gridcell import SAC
from itertools import combinations
from mpl_toolkits.axes_grid1 import ImageGrid
import skimage, skimage.morphology
from skimage import feature
from collections import OrderedDict
warnings.filterwarnings("ignore",
message="divide by zero encountered in int_scalars")
warnings.filterwarnings("ignore",
message="divide by zero encountered in divide")
warnings.filterwarnings("ignore",
message="invalid value encountered in divide")
warnings.filterwarnings("ignore",
message="Casting complex values to real discards the imaginary part")
class Trial(axonaIO.IO, SAC, dict):
"""
Provides methods to plot electrophysiology data acquired using the Axona DACQ recording system
and methods to extract some measures from that data
The actual loading of the data is done lazily i.e. only when you ask for
position data (say plotting the path the animal took in the trial) is the
position data actually loaded. The class also uses as attibutes several
instances of subpackages (binning.Ratemap for example) so that the code
could be made more modular.
Parameters
----------
filename_root : str
Absolute location on the filesystem of the set of files without a suffix
Attributes
----------
filename_root : str
Absolute location on the filesystem of the set of files without a suffix
basename : str
Basename of the set of files without a suffix (everything after the last trailing slash)
EEG : dacq2py.axonaIO.EEG
Containing data from .eeg file
EGF : dacq2py.axonaIO.EEG
Containing data from .egf file
STM : dacq2py.axonaIO.Stim
Contains stimulation data (timestamps mostly) and header + some additions work done below
POS : dacq2py.axonaIO.Pos
Contains raw and post-processed position data (xy, dir, speed etc) & header
TETRODE : extension of Pythons dict
Each value is an instance of dacq2py.axonaIO.Tetrode. Contains methods to get cluster spike times, cluster indices etc
posFilter : dict
Keys are things like 'speed', 'time'; values are n x 2 arrays of range of values *to keep*
setheader : dict
Corresponds to the .set file for the file set. Keys/ values are all strings
_available_files : list
All files matching the filename_root + any valid suffix
metadata : collections.OrderedDict
Some basic info if the file is an "rh" one (see _parseMetaData)
ratemap : dacq2py.ephys_generic.binning.Ratemap class instance
See Also
--------
ephysiopy.common.binning :Basic binning of data, calculation of bin sizes etc
ephysiopy.common.eegcalcs : Contains filters, eeg power spectra methods
ephysiopy.common.spikecalcs : Temporal measures of spike trains and extracting parameters from waveforms and clusters
ephysiopy.common.fieldcalcs : Methods for extracting information from 2D ratemaps
Examples
--------
>>> from dacq2py.dacq2py_util import Trial
>>> T = Trial(r'/media/robin/data/Dropbox/Science/Recordings/M851/M851_140908t1rh')
"""
def __init__(self, filename_root, **kwargs):
# try and intelligently get full filename from just the root
filename_root = self.getFullFile(filename_root)
self.basename = os.path.basename(filename_root)
self.filename_root = filename_root
self._EEG = None
self._EGF = None
self._STM = None
self._POS = None
if 'volts' in kwargs:
useVolts = kwargs['volts']
self.TETRODE = TetrodeDict(filename_root, volts=useVolts) # see TETRODE class above
else:
self.TETRODE = TetrodeDict(filename_root)
self._posFilter = None # a dict used to filter pos
self._setheader = None
self.ratemap = None #becomes binning.RateMap instance - see POS getter property below
self.spikecalcs = SpikeCalcs()
self.fieldcalcs = FieldCalcs()
self._isinteractive = 1
self._figNum = 1
self._min_spks = 1
self._available_files = None
self._getAvailableFiles()
self.metadata = OrderedDict()
self.tetrodes = None
self.clusters = None
self.pos_weights = None
if 'cm' in kwargs:
self.useCm = kwargs['cm']
else:
self.useCm = False
try:
self._parseMetaData()
except:
self.metadata = {'Contents': 'Not an rhayman file'}
try:
self.getTsAndCs()
except:
pass
self.eeg_file = 1
def __repr__(self):
return '{self.__class__.__name__}({self.filename_root})'.format(self=self)
def hasFiles(self):
"""
Checks for some automated yaml processing
"""
for i in self.axona_files:
if os.path.isfile(self.filename_root + i):
self['has_' + i[1:]] = True
else:
self['has_' + i[1:]] = False
def getFullFile(self, filename):
"""
Used to constuct filename_root in __init__
Parameters
----------
filename : str
The absolute path the files being analysed here without any suffix
"""
if os.path.isdir(r'/home/robin/Dropbox/Science/Recordings'):
pname, _ = os.path.split(filename)
if len(pname) == 0:
defaultDir = r'/home/robin/Dropbox/Science/Recordings'
animal = filename.split('_')[0]
filename = os.path.join(defaultDir, animal, filename)
return filename
@property
def setheader(self):
"""
Returns
----------
dict : setheader
Matches contents of .set file with keys and values all mapped as strings
"""
if self._setheader is None:
try:
self._setheader = self.getHeader(self.filename_root + '.set')
except IOError:
self._setheader = None
return self._setheader
@setheader.setter
def setheader(self, value):
self._setheader = value
@property
def ppm(self):
return self.__ppm
@ppm.setter
def ppm(self, value):
self.__ppm = value
# Update POS
self.POS.ppm = value
# Update Ratemap
self.ratemap = binning.RateMap(self.POS.xy, self.POS.dir, self.POS.speed, self.pos_weights, self.POS.ppm, self.useCm)
@property
def POS(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.POS:
Contains raw and post-processed position data
"""
if self._POS is None:
try:
self._POS = axonaIO.Pos(self.filename_root, cm=self.useCm)
self._POS.postprocesspos()
self._xlims = (int(self.POS.xy[0,:].min()),
int(self.POS.xy[0,:].max()))
self._ylims = (int(self.POS.xy[1,:].min()),
int(self.POS.xy[1,:].max()))
self.pos_weights = np.ravel(np.ones((1, self.POS.npos), dtype=np.float) / self.POS.pos_sample_rate)
self.ratemap = binning.RateMap(self.POS.xy, self.POS.dir, self.POS.speed, self.pos_weights, self.POS.ppm, self.useCm)
except IOError:
self._POS = None
return self._POS
@POS.setter
def POS(self, value):
self._POS = value
@property
def EEG(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.EEG:
eeg data and header
"""
if self._EEG is None:
try:
self._EEG = axonaIO.EEG(self.filename_root, eeg_file=self.eeg_file)
self.pos2eegScale = int(self.EEG.sample_rate /
self.POS.pos_sample_rate)
except IOError:
self._EEG = None
return self._EEG
@EEG.setter
def EEG(self, value):
self._EEG = value
@property
def EGF(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.EGF:
eeg data and header from .egf file
"""
if self._EGF is None:
try:
self._EGF = axonaIO.EEG(self.filename_root, eeg_file=self.eeg_file, egf=1)
self.pos2egfScale = int(self.EGF.sample_rate /
self.POS.pos_sample_rate)
except IOError:
self._EGF = None
return self._EGF
@EGF.setter
def EGF(self, value):
self._EGF = value
@property
def STM(self):
"""
Returns
-------
ephysiopy.dacq2py.axonaIO.Stim:
Stimulation data and header + some extras parsed from pos, eeg and set files
"""
if self._STM is None:
try:
self._STM = axonaIO.Stim(self.filename_root)
"""
update the STM dict with some relevant values from the .set file and the headers
of the eeg and pos files
"""
posHdr = self.getHeader(self.filename_root + '.pos')
eegHdr = self.getHeader(self.filename_root + '.eeg')
self._STM['posSampRate'] = self.getHeaderVal(posHdr, 'sample_rate')
self._STM['eegSampRate'] = self.getHeaderVal(eegHdr, 'sample_rate')
try:
egfHdr = self.getHeader(self.filename_root + '.egf')
self._STM['egfSampRate'] = self.getHeaderVal(egfHdr, 'sample_rate')
except:
pass
stim_pwidth = int(self.setheader['stim_pwidth']) / int(1000) # get into ms
self._STM['off'] = self._STM['on'] + int(stim_pwidth)
"""
There are a set of key / value pairs in the set file that
correspond to the patterns/ protocols specified in the
Stimulator menu in DACQ. Extract those items now...
There are five possibe "patterns" that can be used in a trial. Those patterns
consist of either "Pause (no stimulation)" or some user-defined stimulation pattern.
Whether or not one of the five was used is specified in "stim_patternmask_n" where n
is 1-5. Confusingly in dacqUSB these 5 things are called "Protocols" accessed from
the menu Stimulator/Protocols... within that window they are actually called "Phase 1",
"Phase 2" etc. To keep everything in order it's best to iterate through using a for loop
as a dict is not guaranteed to be ordered and I cba to use an OrderedDict.
In dacqUSB nomencalture the pattern is actually the stimulation you
want to apply i.e. 10ms pulse every 150ms or whatever. The "pattern" is what is applied
within every Phase.
"""
# phase_info : a dict for each phase that is active
phase_info = {'startTime': None, 'duration': None, 'name': None, 'pulseWidth': None, 'pulsePause': None}
stim_dict = {}
stim_patt_dict = {}
for k,v in self.setheader.items():
if k.startswith("stim_patternmask_"):
if (int(v) == 1):
# get the number of the phase
phase_num = k[-1]
stim_dict['Phase_' + phase_num] = phase_info.copy()
if k.startswith("stim_patt_"):
stim_patt_dict[k] = v
self.patt_dict = stim_patt_dict
for k,v in stim_dict.items():
phase_num = k[-1]
stim_dict[k]['duration'] = int(self.setheader['stim_patterntimes_' + phase_num])
phase_name = self.setheader['stim_patternnames_' + phase_num]
stim_dict[k]['name'] = phase_name
if not (phase_name.startswith("Pause")):
# find the matching string in the stim_patt_dict
for kk,vv in stim_patt_dict.items():
split_str = vv.split('"')
patt_name = split_str[1]
if (patt_name == phase_name):
ss = split_str[2].split()
stim_dict[k]['pulseWidth'] = int(ss[0])
stim_dict[k]['pulsePause'] = int(ss[2])
# make the dict ordered by Phase number
self.STM['stim_params'] = OrderedDict(sorted(stim_dict.items()))
except IOError:
self._STM = None
return self._STM
@STM.setter
def STM(self, value):
self._STM = value
@property
def posFilter(self):
"""
self.posFilter : dict
Keys are strings such as 'speed', 'time' etc. Values are n x 2 arrays of values *to keep*
"""
return self._posFilter
@posFilter.setter
def posFilter(self, value):
"""Filters data depending on the filter specified in the dictionary value
Parameters
----------
value : dict
Filter dict. Legal keys include: 'time', 'dir', 'speed', 'xrange',
'yrange'. If key is 'time', values must be a n x 2 numpy array that
specifies the times to keep in SECONDS. If key is 'dir' values must
be a two element list/ array that specifies the directions to keep
in DEGREES NB the values can be singular strings of either 'w',
'e', 'n' or 's' which filters for a +/-45 degree range around that
cardinal direction. If key is 'speed' values are a 2 element list/
array to keep specified in m/s. If key is 'xrange' or 'yrange'
values are a two element list/ array that specify the x or y values
to keep in PIXELS.
Returns
-------
dacq2py_util.Trial : object
The Trial object is modified in place and all the relevant
variables are filtered and changed to numpy masked arrays
Examples
--------
>>> import numpy as np
>>> T = dacq2py_util.Trial(r'D:\M851\M851_140908t1rh')
>>> T.posFilter = {'time': np.array([600,1200])}
"""
# If masked, remove all masks on all aspects of data
if np.ma.is_masked(self.POS.speed):
self.POS.speed.mask = np.ma.nomask
if np.ma.is_masked(self.POS.dir):
self.POS.dir.mask = np.ma.nomask
if np.ma.is_masked(self.POS.xy):
self.POS.xy.mask = np.ma.nomask
if np.ma.is_masked(self.EEG.eeg):
self.EEG.eeg.mask = np.ma.nomask
if np.ma.is_masked(self.EGF.eeg):
self.EGF.eeg.mask = np.ma.nomask
if np.any(self.EEG.EEGphase):
if np.ma.is_masked(self.EEG.EEGphase):
self.EEG.EEGphase.mask = np.ma.nomask
if self.TETRODE:#true if TETRODE dict has entries
for tet in self.TETRODE.keys():
if np.ma.is_masked(self.TETRODE[tet].waveforms):
self.TETRODE[tet].waveforms.mask = np.ma.nomask
self.TETRODE[tet].spk_ts.mask = np.ma.nomask
if value is None:
return
idx = self.POS.filterPos(value)
if self.TETRODE:
for tet in self.TETRODE.keys():
posSamps = self.TETRODE[tet].getPosSamples()
common = np.in1d(posSamps, np.nonzero(idx)[1])
# Mask timestamps first as this is a vector, then expand
# out the mask array (common)
self.TETRODE[tet].spk_ts = np.ma.masked_where(common, self.TETRODE[tet].spk_ts)
common = common[:, None, None]
common = np.repeat(np.repeat(common, 4, axis=1), 50, axis=-1)
self.TETRODE[tet].waveforms = np.ma.masked_where(common, self.TETRODE[tet].waveforms)
self.POS.speed = np.squeeze(np.ma.masked_where(idx, np.expand_dims(self.POS.speed,0)))
self.POS.dir = np.squeeze(np.ma.masked_where(idx, np.expand_dims(self.POS.dir,0)))
posMask = np.squeeze(idx)
posMask = np.vstack((posMask, posMask))
self.POS.xy = np.ma.masked_where(posMask, self.POS.xy)
self.EEG.eeg = np.ma.masked_where(np.repeat(np.squeeze(idx), self.pos2eegScale), self.EEG.eeg)
if self.EGF:
self.EGF.eeg = np.ma.masked_where(np.repeat(np.squeeze(idx), self.pos2egfScale), self.EGF.eeg)
if np.any(self.EEG.EEGphase):
self.EEG.EEGphase = np.ma.masked_where(np.repeat(np.squeeze(idx), self.pos2eegScale), self.EEG.EEGphase)
self._posFilter = value
def print_stim_dict(self):
"""
Prints out keys/ values of STM dict
"""
for k,v in self.STM.items():
print(k, v)
def _filterForStm(self, laser=None):
"""
Cycles through the STM dict and fiters for laser on / off periods and
applies the filter to the pos and eeg data NB tetrode data not dealt with
yet
Parameters
----------
laser : bool
Whether to filter for laser stimulation events
"""
if laser is not None:
times = [0]
phaseType = []
for k, d in self.STM['stim_params'].items():
for kk, v in d.items():
if 'duration' in kk:
times.append(v)
if 'name' in kk:
phaseType.append(v)
periods = np.cumsum(times)
period_bounds = dict.fromkeys(set(phaseType), [])
for pk in period_bounds.keys():
bounds = []
for k, d in self.STM['stim_params'].items():
if pk == d['name']:
idx = int(k.split('_')[1])
bounds.append(periods[idx-1:idx+1])
period_bounds[pk] = bounds
for k, v in period_bounds.items():
if laser == 0:
if 'Pause' in k:
self.posFilter = {'time': np.array(v)}
elif laser == 1:
if 'Pause' not in k:
self.posFilter = {'time': np.array(v)}
def _getAvailableFiles(self):
self._available_files = glob(self.filename_root + '*')
def _getMap(self, tetrode=None, cluster=None, var2bin='pos', binsize=3,
smooth_sz=5, smooth=True, **kwargs):
"""
Returns the ratemap (smoothed or unsmoothed) for a given tetrode and
cluster
Parameters
----------
tetrode : int
the tetrode you want to look at
cluster : int, 1xn array/ list
a single number or list (or 1xn array) of the clusters to plot
binsize : int, optional
size of bins. Defaults to 3
smooth_sz : int
the width of the smoothing kernel (see **kwargs for more)
var2bin : str
(Optional) Defaults to 'pos'. Which variable to bin. Can be either
'pos', 'dir' or 'speed'. Works with masked arrays
smooth : bool, optional.
Defaults to true. Whether to smooth the data or not
**kwargs : extra arguments include:
'gaussian' - the smoothing kernel used is gaussian in shape
not the default boxcar
'after' - smoothing of the pos and spike maps is done after
spikes are divided by pos
'shuffle' - the time in ms by how much to shift the spikes
by. Used for generated distributions for null hypothesis
testing
Returns
-------
rmap : np.array
The data binned up as requested
"""
if 'pos' in var2bin:
varType = 'xy'
else:
varType = var2bin
if tetrode is None:
idx = np.arange(0, self.POS.npos)
mapType = 'pos'
else:
idx = self.TETRODE[tetrode].getClustIdx(cluster)
mapType = 'rate'
spk_weights = np.bincount(idx, minlength=self.POS.npos)
if 'shuffle' in kwargs.keys():
spk_weights = np.roll(spk_weights, int(kwargs['shuffle']) * 50) # * 50 to go from seconds into pos_samples
if np.ma.is_masked(self.POS.xy):
mask = ~np.ma.getmask(self.POS.xy[0])
pos_weights = mask.astype(np.int)
self.ratemap.pos_weights = pos_weights
spk_weights[~mask] = 0
# Update the ratemap instance with arguments fed into this method
self.ratemap.binsize = binsize
self.ratemap.smooth_sz = smooth_sz
if 'cmsPerBin' in kwargs:
self.ratemap.cmsPerBin = kwargs['cmsPerBin']
if 'ppm' in kwargs:
self.ratemap.ppm = kwargs['ppm']
rmap = self.ratemap.getMap(spk_weights, varType, mapType, smooth)
return rmap
def _getPath(self):
"""
Returns
-------
self.POS.xy : np.array
The smoothed xy positions filtered appropriately
"""
if np.ma.is_masked(self.POS.xy):
return self.POS.xy[:, ~self.POS.xy.mask[0, :]]
return self.POS.xy
def _getDir(self):
"""
Returns
------------
self.POS.dir : np.array
The smoothed directional data filtered appropriately
"""
if np.ma.is_masked(self.POS.dir):
return self.POS.dir[:, ~self.POS.dir.mask[0, :]]
return self.POS.dir
def _getFieldLims(self, tetrode, cluster, binsize=3):
"""
Returns a labelled matrix of the ratemap for a given cluster on a given
tetrode. Binsize can be fractional for smaller bins. Uses anything >
than the half peak rate to select as a field. Data is heavily smoothed
Parameters
----------
tetrode : int
The tetrode to examine
cluster : int
The cluster identity
Returns
-------
labelled ratemap and the x and y edges of the binned data as a 3-tuple
"""
rmap, (ye, xe) = self._getMap(tetrode, cluster, binsize=binsize)
rmap[np.isnan(rmap)] = 0.0
h = int(np.max(rmap.shape) / 2)
sm_rmap = self.ratemap.blurImage(rmap, h, ftype='gaussian')
thresh = np.max(sm_rmap.ravel()) * 0.2 # select area > 20% of peak
# do some image processing magic to get region to keep as field
distance = ndimage.distance_transform_edt(sm_rmap > thresh)
mask = feature.peak_local_max(distance, indices=False,
exclude_border=False,
labels=sm_rmap > thresh)
label = ndimage.label(mask)[0]
w = skimage.morphology.watershed(-distance, label,
mask=sm_rmap > thresh)
label = ndimage.label(w)[0]
return label, xe, ye
def _getClusterPhaseVals(self, tetrode, cluster):
"""
Returns the phases of the LFP theta a given cluster fired at
Parameters
-----------
tetrode : int
The tetrode to examine
cluster : int
The cluster identity
Returns
-------
eegphase : np.array
The phase of theta a cluster fired at
"""
ts = self.TETRODE[tetrode].getSpkTS()
ts = ts / (self.TETRODE[tetrode].timebase / self.EEG.sample_rate)
ts_idx = np.floor(ts[self.TETRODE[tetrode].cut == cluster]).astype(np.int)
self.EEG.thetaAmpPhase()
EEGphase = self.EEG.EEGphase[ts_idx]
return EEGphase
def _getThetaCycles(self):
"""
Return a tuple of indices into the EEG record that denotes the peaks
and troughs of theta cycles
"""
sm_eeg = self.EEG.eegfilter()
df_eeg = np.diff(sm_eeg)
pts = np.diff((df_eeg > 0).astype(int), 2)
pts = ((pts == 1).nonzero()[0]).astype(int)
peaks = pts[sm_eeg[pts] > 0] + 1
troughs = pts[sm_eeg[pts] < 0] + 2
return peaks, troughs
def _getSpikeInCycle(self, peakIdx, spkIdx, whichSpk='first'):
"""
given an array of spike indices into eeg and indices of peaks in the
smoothed, theta-filtered eeg signal this returns the first spike in the
cycle
whichSpk can be 'first' or 'last'
"""
if 'first' in whichSpk:
side = 'left'
elif 'last' in whichSpk:
side = 'right'
peaks, _ = self._getThetaCycles()
spk2eeg_idx = (spkIdx / (self.TETRODE[self.tetrode].timebase /
self.EEG.sample_rate)).astype(np.int)
idx = np.searchsorted(peaks, spk2eeg_idx, side=side)
_, unique_indices = np.unique(idx, return_index=True)
return spk2eeg_idx[unique_indices]
def _parseMetaData(self):
"""
Parses the filename (mine has a standard format) to populate some of
the objects properties (self.animal_id, self.trial_num etc)
"""
pname, fname = os.path.split(self.filename_root)
self.metadata['Filename'] = fname
self.metadata['Path'] = pname
if 'R' in fname[0]:
self.metadata['Animal'] = 'Rat'
else:
self.metadata['Animal'] = 'Mouse'
self.metadata['Experimenter'] = fname[-2:]
self.metadata['Animal_id'] = fname.rsplit('_')[0]
trial_date = self.setheader['trial_date'] + ':' + self.setheader['trial_time']
self.metadata['Trial_date'] = datetime.strptime(trial_date,
'%A, %d %b %Y:%H:%M:%S')
self.metadata['Trial_num'] = int(fname.rsplit('t')[1][0:-2])
def _set_figure_title(self, fig, tet, clust):
fig.canvas.set_window_title('Tetrode: {0} Cluster: {1}'.format(tet, clust))
def _set_ax_title(self, ax, tet, clust):
ax.set_title('Tetrode: {0}\nCluster: {1}'.format(tet, clust))
def klustakwik(self, d):
"""
Calls two methods below (kluster and getPC) to run klustakwik on
a given tetrode with nFet number of features (for the PCA)
Parameters
----------
d : dict
Specifies the vector of features to be used in clustering.
Each key is the identity of a tetrode (i.e. 1, 2 etc)
and the values are the features used to do the clustering for that tetrode (i.e.
'PC1', 'PC2', 'Amp' (amplitude) etc
"""
legal_values = ['PC1', 'PC2', 'PC3', 'PC4', 'Amp',
'Vt', 'P', 'T', 'tP', 'tT', 'En', 'Ar']
reg = re.compile(".*(PC).*") # check for number of principal comps
# check for any input errors in whole dictionary first
for i_tetrode in d.keys():
for v in d[i_tetrode]:
if v not in legal_values:
raise ValueError('Could not find %s in %s' % (v, legal_values))
# iterate through features and see what the max principal component is
for i_tetrode in d.keys():
pcs = [m.group(0) for l in d[i_tetrode] for m in [reg.search(l)] if m]
waves = self.TETRODE[i_tetrode].waveforms
princomp = None
if pcs:
max_pc = []
for pc in pcs:
max_pc.append(int(pc[2]))
num_pcs = np.max(max_pc) # get max number of prin comps
princomp = self.TETRODE[i_tetrode].getParam(waves,
param='PCA', fet=num_pcs)
# Rearrange the output from PCA calc to match the
# number of requested principal components
inds2keep = []
for m in max_pc:
inds2keep.append(np.arange((m-1)*4, (m)*4))
inds2keep = np.hstack(inds2keep)
princomp = np.take(princomp, inds2keep, axis=1)
out = []
for value in d[i_tetrode]:
if 'PC' not in value:
out.append(self.TETRODE[i_tetrode].getParam(waves, param=value))
if princomp is not None:
out.append(princomp)
out = np.hstack(out)
c = Kluster(self.filename_root, i_tetrode, out)
c.make_fet()
mask = c.get_mask()
c.make_fmask(mask)
c.kluster()
def getcoherence(self, tetrode, cluster, binsize=3, **kwargs):
"""
Wrapper for fieldcalcs.coherence - see docs there
"""
smthd = self._getMap(tetrode=tetrode, cluster=cluster, var2bin='pos',
binsize=binsize, smooth_sz=5,
smooth=True, **kwargs)
unsmthd = self._getMap(tetrode=tetrode, cluster=cluster, var2bin='pos',
binsize=binsize, smooth_sz=5,
smooth=False, **kwargs)
return self.fieldcalcs.coherence(smthd[0], unsmthd[0])
def getkldiv(self, tetrode, cluster, binsize=3, **kwargs):
"""
Wrapper for fieldcalcs.kldiv - see there for explanation
"""
polarMap = self._getMap(tetrode=tetrode, cluster=cluster, var2bin='dir',
binsize=binsize, smooth_sz=5,
smooth=True, **kwargs)
return self.fieldcalcs.kldiv_dir(polarMap[0])
def getmrv(self, tetrode, cluster, **kwargs):
"""
Calculate the mean resultant vector length and direction for a given
cluster/ cell
A wrapper for statscalcs.Statscalcs.mean_resultant_vector (see
statscalcs.py)
Parameters
----------
tetrode : int
The tetrode to exmaine
cluster : int
The cluster to examine
Returns
----------
r : float
the mean resultant vector length (range = 0-1)
th : float
the mean resultant vector direction (in radians)
"""
idx = self.TETRODE[tetrode].getClustIdx(cluster)
angsInRads = np.deg2rad(self.POS.dir[idx])
from ephysiopy.common.statscalcs import StatsCalcs
S = StatsCalcs()
r, th = S.mean_resultant_vector(angsInRads)
return r, th
def getcircR(self, tetrode, cluster, **kwargs):
"""
Calculate the mean resultant vector length of circular data
Unlike getmrv (above) this only returns the vector length. This is
calculated differently (using complex numbers) but is a) faster, b)
works with binned data and, c) plays nicer/ easier with shuffles of
the spike train
Parameters
----------
tetrode : int
The tetrode to examine
cluster : int
The cluster to examine
**kwargs:
Legal values of interest:
shuffle: int
the number of seconds to shift the spike train
Returns
----------
r : float
the mean resultant vector length (range = 0-1)
"""
idx = self.TETRODE[tetrode].getClustIdx(cluster)
spk_weights = np.bincount(idx, minlength=self.POS.npos)
if 'shuffle' in kwargs.keys():
spk_weights = np.roll(spk_weights, int(kwargs['shuffle'] * 50))
inc = (np.pi*2) / 120.0
h = self.ratemap._RateMap__binData(np.deg2rad(self.POS.dir), np.arange(0, np.pi*2+inc, inc), spk_weights)
from statscalcs import StatsCalcs
S = StatsCalcs()
R = S.circ_r(h[1][0][0:-1], h[0])
return R
def getskaggsInfo(self, tetrode, cluster, binsize=3, **kwargs):
"""
Gets the Skagss information theory measure for the given cluster.
Parameters
----------
tetrode : int
The tetrode to exmaine
cluster : int
The cluster to examine
binsize : int
Size of bins in cms
Returns
-------
bits per spike : float
Notes
-----
binning could be over any single spatial variable (e.g. location, direction, speed).
See Also
--------
Wrapper for ephysiopy.common.fieldcalcs.skaggsInfo
"""
ratemap = self._getMap(tetrode, cluster, binsize=binsize, **kwargs)[0]
dwelltimes = self._getMap(binsize=binsize, **kwargs)[0]
ratemap, _, dwelltimes = self.ratemap._RateMap__adaptiveMap(ratemap, dwelltimes)
return self.fieldcalcs.skaggsInfo(ratemap, dwelltimes)
def getTsAndCs(self, verbose=False):
"""
Prints out the available tetrodes and clusters
"""
cut_files = [(f) for f in glob(self.filename_root + '*') if 'cut' in f]
m = re.compile('(.*)_(.*).cut', re.M|re.I)
tAndCdict = {}
if cut_files:
for f in cut_files:
tet = int(m.match(f).group(2))
try:
data = self.getCut(tet)
clusters = list(np.unique(data))
if clusters[0]==0:
clusters.pop(0)
if clusters:
tAndCdict[tet] = clusters
if verbose:
print('\nTetrode {0} contains clusters: {1}'.format(tet, clusters))
except:
if verbose:
print('\nTetrode{0} has no cut'.format(tet))
else:
pass
if tAndCdict:
tets = []
clusts = []
for t,c in tAndCdict.items():
for cc in c:
tets.append(str(t))
clusts.append(str(cc))
"""
The two fucking stupid lines below are so yaml can
serialize the object correctly
"""
self.tetrodes = map(int,tets)
self.clusters = map(int,clusts)
return tAndCdict
def plotMap(self, tetrode, clusters, ax=None, var2bin='pos', *args, **kwargs):
"""
Plots a ratemap for a given tetrode and cluster
Parameters
----------
tetrode : int
The tetrode you want to look at
cluster : int or array_like
The cluster(s) to plot
ax : matplotlib.Axes, optional
Defaults to None. Which axis to add the plot to; if None then a new figure window is produced
Keyword arguments
-----------------
'bar' : boolean
For use with directional data to produce a polar histogram plot
'add_peak_rate' : boolean
Adds the peak rate (to 2 decimal places) to the figure
binsize : int, optional
size of bins. Defaults to 3.
smooth_sz : the width of the smoothing kernel (see keyword args for more)
var2bin: optional, defaults to 'pos'. Which variable to bin.
Can be either 'pos', 'dir' or 'speed'. Works with masked arrays
smooth : bool, optional
Defaults to True. Whether to smooth the data
Returns
-------
ratemap : numpy.ndarray
Dimensionality depends on if a directional (1d) or positional (2d) map was requested
See Also
--------
Wrapper for _plotMap() so multiple clusters can be plotted
Examples
--------
>>> T = dacq2py_util.Trial('M845_141003t1rh')
>>> # Plot the ratemap for cluster 1 on tetrode 1
>>> T.plotMap(1,1)
>>> # Add the peak rate to the figure window
>>> T.plotMap(1,1,add_peak_rate=True)
>>> # Plot the polar map for same cluster
>>> T.plotMap(1,1,var2bin='dir')
>>> # Plot the unsmoothed dwell map for the trial
>>> T.plotMap(None,None,smooth=False)
"""
for key in ('var2bin', 'ax', 'binsize','smooth_sz', 'smooth'):
if key in kwargs:
setattr(self, key, kwargs[key])
if isinstance(clusters, int):
setattr(self, 'clusters', [clusters])
elif isinstance(clusters, list):
setattr(self, 'clusters', clusters)
elif isinstance(clusters, str):
if 'all' in clusters:
tetDict = self.getTsAndCs()
setattr(self, 'clusters', tetDict[tetrode])
clusters = getattr(self, 'clusters', None)
# var2bin = getattr(self, 'var2bin', 'pos')
ax = getattr(self, 'ax', None)
binsize = getattr(self, 'binsize', 3)
smooth_sz = getattr(self.ratemap, 'smooth_sz', 5)
smooth = getattr(self, 'smooth', True)
if len(clusters) == 1:
ncols = 1
nrows = 1
elif np.logical_and(len(clusters) > 1, len(clusters) < 6):
ncols = len(clusters)
nrows = 1
else:
ncols = 5
nrows = int(np.floor(len(clusters) / 5) + 1)
if ax is None:
fig = plt.figure()
if 'dir' in var2bin:
ax = fig.add_subplot(nrows, ncols, 1, projection='polar')
else:
ax = fig.add_subplot(nrows, ncols, 1)
axes_out = []
if clusters is None:
axes = fig.add_subplot(1, 1, 1)
ax, ratemap = self._plotMap(None, None, var2bin=var2bin, ax=ax,
binsize=binsize, smooth_sz=smooth_sz, smooth=smooth, *args, **kwargs)
self._set_ax_title(axes, tetrode, clusters)
axes_out.append(ax)
if len(clusters) == 1:
cluster = clusters[0]
ax, ratemap = self._plotMap(tetrode=tetrode, cluster=cluster, var2bin=var2bin, ax=ax,
binsize=binsize, smooth_sz=smooth_sz, smooth=smooth, *args, **kwargs)
axes = ax
# # check kwargs to see if we want to add peak rate to axes
if "add_peak_rate" in kwargs:
if kwargs['add_peak_rate']:
ax.annotate('{:.2f}'.format(np.max(ratemap)), (0.9,0.15), \
xycoords='figure fraction', textcoords='figure fraction', color='k', size=30, weight='bold', ha='center', va='center')
self._set_ax_title(axes, tetrode, cluster)
axes_out.append(ax)
else:
fig.set_facecolor('w')
fig.set_frameon(False)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
for iax, cluster in enumerate(clusters):
inax = fig.add_subplot(nrows, ncols, iax+1)
ax, ratemap = self._plotMap(tetrode=tetrode, cluster=cluster, var2bin=var2bin,
binsize=binsize, smooth_sz=smooth_sz, smooth=smooth,
ax=inax)
self._set_ax_title(inax, tetrode, cluster)
axes_out.append(ax)
return axes_out
def _plotMap(self, tetrode=None, cluster=None, ax=None, var2bin='pos',
binsize=3, smooth_sz=5, smooth=True, **kwargs):
"""
Plots a ratemap for a given tetrode and cluster
Parameters
----------
tetrode : int
the tetrode you want to look at
cluster : int, 1xn array/ list
a single number or list (or 1xn array) of the clusters to plot
binsize : int, optional
size of bins. Defaults to 3
smooth_sz : int
the width of the smoothing kernel (see **kwargs for more)
var2bin : optional, defaults to 'pos'. Which variable to bin.
Can be either 'pos', 'dir' or 'speed'. Works with masked arrays
smooth : bool
Defaults to true. Whether to smooth the data or not
ax : matplotlib.axes
Defaults to None. Which axis to add the plot to; if None
then a new figure window is produced
**kwargs : various
'bar' - for use with directional data to produce a polar
histogram plot
Returns
-------
ratemap: ndarray (1d or 2d)
depending on whether a directional (1d) or positional (2d) map was
asked for an ndarray is returned
"""
rmap = self._getMap(tetrode=tetrode, cluster=cluster, var2bin=var2bin,
binsize=binsize, smooth_sz=smooth_sz,
smooth=smooth, **kwargs)
if rmap[0].ndim == 1:
# polar plot
if ax is None:
fig = plt.figure()
self._set_figure_title(fig, tetrode, cluster)
ax = fig.add_subplot(111, projection='polar')
theta = np.deg2rad(rmap[1][0][1:])
ax.clear()
ax.plot(theta, rmap[0])
ax.set_aspect('equal')
ax.tick_params(axis='both', which='both', bottom='off', left='off', right='off', top='off', labelbottom='off', labelleft='off', labeltop='off', labelright='off')
ax.set_rticks([])
# deal with vmin/ vmax in kwargs
if 'vmax' in kwargs.keys():
ax.set_rmax(kwargs['vmax'])
# See if we should add the mean resultant vector (mrv)
if 'add_mrv' in kwargs.keys():
from statscalcs import StatsCalcs
S = StatsCalcs()
idx = self.TETRODE[tetrode].getClustIdx(cluster)
angles = self.POS.dir[idx]
print('len angles: {}'.format(len(angles)))
r, th = S.mean_resultant_vector(np.deg2rad(angles))
ax.hold(True)
print('r: {}\nth: {}'.format(r,th))
ax.plot([th, th],[0, r*np.max(rmap[0])],'r')
ax.set_thetagrids([0, 90, 180, 270])
ratemap = rmap[0]
elif rmap[0].ndim == 2:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
self._set_figure_title(fig, tetrode, cluster)
# mask the ratemap where NaNs occur for plotting purposes
ratemap = np.ma.MaskedArray(rmap[0], np.isnan(rmap[0]), copy=True)
x, y = np.meshgrid(rmap[1][1][0:-1], rmap[1][0][0:-1][::-1])
# deal with vmin/ vmax in kwargs
if 'vmax' in kwargs.keys():
vmax = kwargs['vmax']
else:
vmax = np.max(np.ravel(ratemap))
ax.pcolormesh(x, y, ratemap, cmap=cm.jet, edgecolors='face', vmax=vmax)
ax.axis([x.min(), x.max(), y.min(), y.max()])
ax.set_aspect('equal')
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return ax, ratemap
def plotPath(self, ax=None, clamp=False, label=False, applyStm=False, **kwargs):
"""
Plots the animals path during a trial. Default is to limit plot range
to the min/ max of x/y extent of path
Parameters
----------
ax : matplotlib.Axes
The axes to plot into. If none a new figure window is created
clamp : bool
whether the axes are clamped to self._xlims and self._ylims or not
applyStm : bool
Whether to overlay r crosses on the path where the laser events occurred
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = plt.gcf()
fig.set_facecolor('w')
xy = self._getPath()
ax.plot(xy[0], xy[1], color=[0.8627, 0.8627, 0.8627],**kwargs)
ax.invert_yaxis()
if applyStm:
stmTS = self.STM.getPosTS()
stmXY = xy[:, stmTS.astype(int)]
ax.plot(stmXY[0], stmXY[1], 'rx', ms=2)
if clamp:
ax.set_xlim(self._xlims)
ax.set_ylim(self._ylims)
ax.set_aspect('equal')
if not label:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
def plotSpikesOnPath(self, tetrode, clusters, ax=None, clamp=False, **kwargs):
"""
Plots the spikes on the path during a trial for a particular tetrode/
cluster(s)
Parameters
----------
tetrode: int
the tetrode you want to look at
cluster : int, 1xn array/ list
a single number or list (or 1xn array) of the clusters to plot
clamp : bool, optional
whether to restrict the plot to the self._xlims and self_ylims
property
ax : matplotlib.Axes
defaults to None. Which axis to add the plot to.
If None a new figure window is produced
"""
if not isinstance(clusters, (np.ndarray, list)):
if isinstance(clusters, str):
clusters = self.availableClusters
else:
clusters = [clusters]
xy = self.POS.xy
for i, clust in enumerate(clusters):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xy[0], xy[1], c=tcols.colours[0], zorder=1)
idx = self.TETRODE[tetrode].getClustIdx(clust)
# useful to override default colour scheme for publication figures
if 'mec' in kwargs.keys():
mec = kwargs.pop('mec')
else:
mec = tcols.colours[clust]
ax.plot(xy[0, idx], xy[1, idx], 's', c=mec, mec=mec, **kwargs)
if clamp:
ax.set_xlim(self._xlims)
ax.set_ylim(self._ylims)
ax.set_aspect('equal')
ax.invert_yaxis()
plt.tick_params(axis='both', which='both', left='off', right='off',
bottom='off', top='off')
plt.setp(ax.get_xticklabels() + ax.get_yticklabels(),
visible=False)
return ax
def plotRaster(self, tetrode, clusters, ax=None, dt=(-50, 100), prc_max = 0.5, ms_per_bin=1, histtype='count', hist=True, **kwargs):
"""
Wrapper for _plotRaster allowing multiple clusters to be plotted in
separate figure windows
Parameters
----------
tetrode : int
cluster : int
dt : 2-tuple
the window of time in ms to examine zeroed on the event of interest
i.e. the first value will probably be negative as in the default example
prc_max : float
the proportion of firing the cell has to 'lose' to count as
silent; a float between 0 and 1
ax - matplotlib.Axes
the axes to plot into. If not provided a new figure is created
ms_per_bin : int
The number of milliseconds in each bin of the raster plot
histtype : str
either 'count' or 'rate' - the resulting histogram plotted above the raster plot will
consist of either the counts of spikes in ms_per_bin or the mean rate
in ms_per_bin
"""
if isinstance(clusters, int):
clusters = [clusters]
elif isinstance(clusters, str):
if 'all' in clusters:
tetDict = self.getTsAndCs()
clusters = tetDict[tetrode]
for cluster in clusters:
# Calculate the stimulation ratio
stim_histo = self.getRasterHist(tetrode, cluster, dt=dt, hist=hist)
mean_stim_spikes = np.sum(stim_histo, 1)
pre_stim_spks = np.mean(mean_stim_spikes[0:50])
post_stim_spks = np.mean(mean_stim_spikes[50:60])
ratio = (post_stim_spks-pre_stim_spks) / (post_stim_spks+pre_stim_spks)
print("Stimulation ratio = {}".format(ratio))
self._plotRaster(tetrode=tetrode, cluster=cluster, dt=dt,prc_max=prc_max, ax=ax, ms_per_bin=ms_per_bin,histtype=histtype, **kwargs)
return ratio
def _plotRaster(self, tetrode, cluster, dt=(-50, 100), prc_max=0.5, ax=None, ms_per_bin=1, histtype='count', **kwargs):
"""
Plots a raster plot for a specified tetrode/ cluster
Parameters
----------
tetrode : int
cluster : int
dt : 2-tuple
the window of time in ms to examine zeroed on the event of interest
i.e. the first value will probably be negative as in the default example
prc_max : float
the proportion of firing the cell has to 'lose' to count as
silent; a float between 0 and 1
ax - matplotlib.Axes
the axes to plot into. If not provided a new figure is created
ms_per_bin : int
The number of milliseconds in each bin of the raster plot
histtype : str
either 'count' or 'rate' - the resulting histogram plotted above the raster plot will
consist of either the counts of spikes in ms_per_bin or the mean rate
in ms_per_bin
"""
if 'x1' in kwargs.keys():
x1 = kwargs.pop('x1')
else:
x1 = self.TETRODE[tetrode].getClustTS(cluster)
x1 = x1 / int(self.TETRODE[tetrode].timebase / 1000.) #in ms
x1.sort()
on_good = self.STM.getTS()
dt = np.array(dt)
irange = on_good[:, np.newaxis] + dt[np.newaxis, :]
dts = np.searchsorted(x1, irange)
y = []
x = []
for i, t in enumerate(dts):
tmp = x1[t[0]:t[1]] - on_good[i]
x.extend(tmp)
y.extend(np.repeat(i, len(tmp)))
if ax is None:
fig = plt.figure(figsize=(4.0, 7.0))
self._set_figure_title(fig, tetrode, cluster)
axScatter = fig.add_subplot(111)
else:
axScatter = ax
axScatter.scatter(x, y, marker='.', s=2, rasterized=False, **kwargs)
divider = make_axes_locatable(axScatter)
axScatter.set_xticks((dt[0], 0, dt[1]))
axScatter.set_xticklabels((str(dt[0]), '0', str(dt[1])))
axHistx = divider.append_axes("top", 0.95, pad=0.2, sharex=axScatter,
transform=axScatter.transAxes)
scattTrans = transforms.blended_transform_factory(axScatter.transData,
axScatter.transAxes)
stim_pwidth = int(self.setheader['stim_pwidth'])
axScatter.add_patch(Rectangle((0, 0), width=stim_pwidth/1000., height=1,
transform=scattTrans,
color=[0, 0, 1], alpha=0.5))
histTrans = transforms.blended_transform_factory(axHistx.transData,
axHistx.transAxes)
axHistx.add_patch(Rectangle((0, 0), width=stim_pwidth/1000., height=1,
transform=histTrans,
color=[0, 0, 1], alpha=0.5))
axScatter.set_ylabel('Laser stimulation events', labelpad=-18.5)
axScatter.set_xlabel('Time to stimulus onset(ms)')
nStms = int(self.STM['num_stm_samples'])
axScatter.set_ylim(0, nStms)
# Label only the min and max of the y-axis
ylabels = axScatter.get_yticklabels()
for i in range(1, len(ylabels)-1):
ylabels[i].set_visible(False)
yticks = axScatter.get_yticklines()
for i in range(1, len(yticks)-1):
yticks[i].set_visible(False)
histColor = [192/255.0,192/255.0,192/255.0]
histX = axHistx.hist(x, bins=np.arange(dt[0], dt[1] + ms_per_bin, ms_per_bin),
color=histColor, alpha=0.6, range=dt, rasterized=True, histtype='stepfilled')
vals = histX[0]
bins = histX[1]
if 'rate' in histtype:
axHistx.set_ylabel('Rate')
mn_rate_pre_stim = np.mean(vals[bins[1:] < 0])
idx = np.logical_and(bins[1:] > 0, bins[1:] < 10).nonzero()[0]
mn_rate_post_stim = np.mean(vals[idx])
above_half_idx = idx[(vals[idx] < mn_rate_pre_stim * prc_max).nonzero()[0]]
half_pre_rate_ms = bins[above_half_idx[0]]
print('\ntime to {0}% of pre-stimulus rate = {1}ms'.format(*(prc_max * 100, half_pre_rate_ms)))
print('mean pre-laser rate = {0}Hz'.format(mn_rate_pre_stim))
print('mean 10ms post-laser rate = {0}'.format(mn_rate_post_stim))
else:
axHistx.set_ylabel('Spike count', labelpad=-2.5)
plt.setp(axHistx.get_xticklabels(),
visible=False)
# Label only the min and max of the y-axis
ylabels = axHistx.get_yticklabels()
for i in range(1, len(ylabels)-1):
ylabels[i].set_visible(False)
yticks = axHistx.get_yticklines()
for i in range(1, len(yticks)-1):
yticks[i].set_visible(False)
axHistx.set_xlim(dt)
axScatter.set_xlim(dt)
return x,y
def getRasterHist(self, tetrode, cluster, dt=(-50, 100), hist=True):
"""
Calculates the histogram of the raster of spikes during a series of events
Parameters
----------
tetrode : int
cluster : int
dt : tuple
the window of time in ms to examine zeroed on the event of interest
i.e. the first value will probably be negative as in the default example
hist : bool
not sure
"""
x1 = self.TETRODE[tetrode].getClustTS(cluster)
x1 = x1 / int(self.TETRODE[tetrode].timebase / 1000.) #in ms
x1.sort()
on_good = self.STM.getTS()
dt = np.array(dt)
irange = on_good[:, np.newaxis] + dt[np.newaxis, :]
dts = np.searchsorted(x1, irange)
y = []
x = []
for i, t in enumerate(dts):
tmp = x1[t[0]:t[1]] - on_good[i]
x.extend(tmp)
y.extend(np.repeat(i, len(tmp)))
if hist:
nEvents = int(self.STM["num_stm_samples"])
return np.histogram2d(x, y, bins=[np.arange(dt[0],dt[1]+1,1), np.arange(0,nEvents+1, 1)])[0]
else:
return np.histogram(x, bins=np.arange(dt[0],dt[1]+1,1), range=dt)[0]
def plot_event_EEG(self, eeg_type='egf', dt=(-50, 100), plot=True, ax=None,
evenOnsets=True, **kwargs):
"""
Plots out the eeg record following an 'on' event in the log file
Parameters
----------
eeg_type : str
either 'eeg' or 'egf'
dt : tuple
time to look before and after an onset event
plot : bool
whether to plot the stimulus-triggered-eeg
ax : matplotlib.axis
will plot into this axis if supplied
(new figure produced if plot is None and ax is None)
evenOnsets: bool
if True assume there is supposed to be an even
difference between the events in the .stm file. If events are
found that have an uneven difference they are thrown out.
NB The difference is calculated from information gleaned from
the trial.STM field. If False this is ignored.
"""
on_good = self.STM.getTS()#timestamps in ms
"""
Check for inter-stimulus time differences to make sure that the large
majority (99%) of on pulses are regularly spaced - otherwise issue a warning
"""
df = np.diff(np.diff(on_good))
if np.count_nonzero(df) / float(len(on_good)) * 100 > 1:
warnings.warn('More than 1% of on events differ in size', UserWarning)
#check for abnormally large number of stim events and abort
if len(on_good) > 100000:
raise Exception('Very large number of stimulation events. Aborting plot_event_EEG')
#get the eeg data and indices to use
if 'egf' in eeg_type:
eeg = self.EGF.eeg
on_idx = self.STM.getEGFIdx()
eeg_samps_per_ms = self.EGF.sample_rate / 1000.0
elif 'eeg' in eeg_type:
eeg = self.EEG.eeg
on_idx = self.STM.getEEGIdx()
eeg_samps_per_ms = self.EEG.sample_rate / 1000.0
"""
NB the following conditional assumes there is only one phase of the
stimulation that actually contains stim events. If there is more than
one then the last one will be the one used
"""
df = np.diff(on_good)
"""
keep pulsePause here as used lower down to plot multiple Rectangle
patches in case the dt tuple specifies a range of values higher than
the pause between stimulation events
"""
pulsePause = 0
if evenOnsets:
for k, v in self.STM.items():
if isinstance(v, OrderedDict):
for kk, vv in v.items():
for kkk, vvv in vv.items():
if 'Pause' in kkk:
if vvv is not None:
pulsePause = vvv
pulsePause_ms = pulsePause / 1000#this is the desired
unequalPausesIdx = np.nonzero(df!=pulsePause_ms)[0]
on_good = np.delete(on_good, unequalPausesIdx)
on_idx = np.delete(on_idx, unequalPausesIdx)
eeg = eeg - np.ma.mean(eeg)
dt_eeg = eeg_samps_per_ms * np.array(dt)
rng = np.arange(dt_eeg[0], dt_eeg[1], 1)
idx = (on_idx[np.newaxis, :] + rng[:, np.newaxis]).astype(int)
result = np.zeros((len(rng), len(on_good)))
result = eeg[idx]
if not plot:
return result, idx
else:
mn = np.mean(result, 1)
se = np.std(result, 1) / np.sqrt(len(on_good))
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = ax
ax.errorbar(np.linspace(dt[0], dt[1], len(mn)), mn * 1e6,
yerr=se*1e6, rasterized=False)
ax.set_xlim(dt)
axTrans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
stim_pwidth = int(self.setheader['stim_pwidth'])
if pulsePause > 0:
a = np.arange(0, dt[1], pulsePause_ms)
b = np.arange(0, dt[0], -pulsePause_ms)
patchStarts = np.unique(np.concatenate((a, b)))
for p in patchStarts:
ax.add_patch(Rectangle((p, 0), width=stim_pwidth/1000., height=1,
transform=axTrans,
color=[1, 1, 0], alpha=0.5))
ax.set_ylabel('LFP ($\mu$V)')
ax.set_xlabel('Time(ms)')
return result
def plotEventEEGRange(self, eeg_type='egf', stimTrials=[0,1], ax=None, **kwargs):
"""
Calls plot_event_eeg with defaults and no plotting and then plots out
a time period in seconds from x1 to x2 and overlays the correct time in
seconds on the x-axis - meant for manual inspection of the effect of
stimulation events on the eeg
Parameters
------------
eeg_type : str
either 'egf' or 'eeg' although probably no point
using 'eeg' as sample rate too low
stimTrials : list
the stimulation 'trial' to plot, starting at 0
NB stimulating every 150ms for 10ms for 20 minutes gets
you 8000 trials
ax : matplotlib.axis
the axis to plot into. A new figure is
produced if this is None
"""
result, idx = self.plot_event_EEG(eeg_type=eeg_type, plot=False)
eeg_samp_rate = self.STM[eeg_type + 'SampRate']
time_ms = idx / float(eeg_samp_rate / 1000.)
eeg_blocks = []
time_blocks = []
for t in stimTrials:
eeg_blocks.append(result[:, t])
time_blocks.append(time_ms[:, t])
speed_idx = (idx / (eeg_samp_rate / self.POS.pos_sample_rate)).astype(int)
speed = self.POS.speed[0, np.ravel(speed_idx, 'F')]
max_speed = np.max(speed)
speed = np.reshape(speed, idx.shape, 'F')
# filter the eeg data in the theta and gamma bands
E = EEGCalcs(self.filename_root)
eeg = self.EGF.eeg
eeg = eeg - np.ma.mean(eeg)
sampRate = self.EGF.sample_rate
theta_eeg = E.filterWithButter(eeg, 4, 8, sampRate, 2)
gamma_eeg = E.filterWithButter(eeg, 30, 80, sampRate, 2)
theta = theta_eeg[np.ravel(idx, 'F')]
theta = np.reshape(theta, idx.shape, 'F')
gamma = gamma_eeg[np.ravel(idx, 'F')]
gamma = np.reshape(gamma, idx.shape, 'F')
#dt is (-50, 150)
rectStart = int((eeg_samp_rate / 1000.) * 50)
rectEnd = int((eeg_samp_rate / 1000.) * 60)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = ax
ax1 = ax.twinx()
for block in zip(time_blocks, eeg_blocks, stimTrials):
ax.plot(block[0], block[1], color=[0.8627, 0.8627, 0.8627])
ax.hold(True)
ax.plot(block[0], theta[:, block[2]], 'r')
ax.plot(block[0], gamma[:, block[2]], 'g')
ax1.plot(block[0], speed[:, block[2]], 'y')
ax1.set_ylim(0, np.max(max_speed) * 4)
axTrans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
i = block[0][rectStart]
j = block[0][rectEnd] - block[0][rectStart]
ax.add_patch(Rectangle((i,0), width=j, height=1,
transform=axTrans,
color=[41./256, 161./256, 230./256], alpha=0.5))
ax.set_xlim(time_blocks[0][0], time_blocks[-1][-1])
ylabels = ax1.yaxis.get_majorticklabels()
for i,xxx in enumerate(ylabels):
if i > 1:
xxx.set_visible(False)
else:
xxx.set_color('k')
yticks = ax1.yaxis.get_major_ticks()
for i,xxx in enumerate(yticks):
if i > 1:
xxx.set_visible(False)
def adjust_median_speed(self, min_speed=5, plot=True):
"""
Parameters
----------
min_speed : float
plot : bool
"""
grandMedian = stats.nanmedian(self.POS.speed, 1)
sortedSpIdx = np.argsort(self.POS.speed)
sortedSp = np.sort(self.POS.speed)
indMedian = np.nonzero(sortedSp >= grandMedian)[1][0]
indFirstOverThresh = np.nonzero(sortedSp >= min_speed)[1][0]
indLastNotNan = np.nonzero(~np.isnan(sortedSp))[1][-1]
halfWidth = np.min([indMedian-indFirstOverThresh, indLastNotNan-indMedian])
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
maxSp = sortedSp[0, indLastNotNan]
L = sortedSp.shape[1]
rect = Rectangle(xy=(0, indMedian-halfWidth), width=maxSp, height=indMedian+halfWidth/2, color='b', alpha=0.5)
ax.add_patch(rect)
ax.plot(sortedSp[0, 0:indLastNotNan], np.arange(indLastNotNan), 'k', lw=2)
ax.set_xlabel('Speed (cm/s)')
ax.set_ylabel('Cumulative number of samples')
if indLastNotNan != L:
ax.plot((0, maxSp), (indLastNotNan+1, indLastNotNan+1), 'r-')
ax.plot((0, maxSp), (L, L), 'r-')
ax.set_xlim(0, maxSp)
ax.set_ylim(0, L)
ax.plot((0, maxSp), (indMedian, indMedian), 'b', lw=1)
ax.plot((grandMedian, grandMedian), (0, indMedian), 'b-')
ax.plot(grandMedian, indMedian, 'bo', ms=12)
ax.plot((0, maxSp), (indFirstOverThresh, indFirstOverThresh), 'b', lw=1)
ax.plot((min_speed, min_speed), (0, indFirstOverThresh), 'b--')
ax.plot(min_speed, indFirstOverThresh, 'bo', ms=12)
return sortedSpIdx[indMedian-halfWidth:indMedian+halfWidth]
def plotRateVSpeed(self, tetrode, cluster, minSpeed=0.0, maxSpeed = 40.0,
sigma=3.0, shuffle=False, nShuffles=100, plot=False, ax=None,
verbose=False, getShuffledData=False, getData=False, **kwargs):
"""
Plots the instantaneous firing rate of a cell against running speed
Also outputs a couple of measures as with Kropff et al., 2015; the
Pearsons correlation and the depth of modulation (dom) - see below for
details
Parameters
-------------------
tetrode : int
the tetrode to use
cluster : int
the cluster to use
minSpeed : float
speeds below this value are masked and not used
maxSpeed : float
speeds above this value are masked and not used
sigma : float
the standard deviation of the gaussian used to smooth the spike
train
shuffle : bool, default False
Whether to calculate the significance of the speed score or not
This is done by calculating the correlation between speed and
the shuffled spike train for nShuffles where the shuffles are only allowed with the
window (trial_start + minTime) : (trial_end - minTime). Default is
30 seconds as with Kropff et al., 2015. Default False
nShuffles : int
How many times to perform the shuffle. Defaults to 100 as with
Kropff et al., 2015
plot : bool
Whether to plot output or not. Defaults to False
"""
speed = self.POS.speed.ravel()
# Calculate histogram to see how much is accounted for in each bin
if np.nanmax(speed) < maxSpeed:
maxSpeed = np.nanmax(speed)
if verbose:
print('Capping speed to max in data: {:.2f}'.format(maxSpeed))
spd_bins = np.arange(minSpeed, maxSpeed, 1.0)
# Construct the mask
speed_filt = np.ma.MaskedArray(speed)
speed_filt = np.ma.masked_where(speed_filt < minSpeed, speed_filt)
speed_filt = np.ma.masked_where(speed_filt > maxSpeed, speed_filt)
spk_sm = self._getTimeSmoothedSpikes(tetrode, cluster, sigma)
spk_sm = np.ma.MaskedArray(spk_sm, mask=np.ma.getmask(speed_filt))
# res is the basic correlation between running speed and instantaneous
# firing rate
res = stats.mstats.pearsonr(spk_sm, speed_filt)
if shuffle:
duration = self.POS.npos / self.POS.pos_sample_rate
shuffles = np.linspace(30, duration-30, nShuffles)
shuffled_rs = []
for time in shuffles:
shuffled_spks = self._getTimeSmoothedSpikes(tetrode, cluster, sigma, time)
shuffled_rs.append(stats.mstats.pearsonr(shuffled_spks, speed_filt)[0])
prob = np.array([.90, .95, .99])
qtiles = stats.mstats.mquantiles(shuffled_rs, prob)
if verbose:
print("Running speed vs firing rate correlation (PPMC): {0}".format(res[0]))
print("The {0} percentiles are {1}".format(prob*100, qtiles))
spd_dig = np.digitize(speed_filt, spd_bins, right=True)
mn_rate = np.array([np.ma.mean(spk_sm[spd_dig==i]) for i in range(0,len(spd_bins))])
if plot:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(spd_bins, mn_rate * self.POS.pos_sample_rate, 'k')
ax.set_xlim(spd_bins[0], spd_bins[-1])
ax.set_ylabel("Firing rate(Hz)")
ax.set_xlabel("Speed(cm/s)")
ylabels = ax.get_yticklabels()
for i in range(1, len(ylabels)-1):
ylabels[i].set_visible(False)
yticks = ax.get_yticklines()
for i in range(1, len(yticks)-1):
yticks[i].set_visible(False)
xlabels = ax.get_xticklabels()
for i in range(1, len(xlabels)-1):
xlabels[i].set_visible(False)
xticks = ax.get_xticklines()
for i in range(1, len(xticks)-1):
xticks[i].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
if "add_peak_rate" in kwargs:
if kwargs['add_peak_rate']:
ax.annotate('{:.2f}'.format(np.max(res[0])), (0.15,0.9), \
xycoords='axes fraction', textcoords='axes fraction', color='k', size=30, weight='bold', ha='center', va='center')
if getData:
return res[0], spd_bins, mn_rate * self.POS.pos_sample_rate
if getShuffledData:
return res[0], shuffled_rs
else:
return res[0]
def plotRollingCorrRateVSpeed(self, tetrode, cluster, minSpeed=2.0,
sigma=3.0, **kwargs):
"""
Plots the rolling correlation of instantaneous firing rate of a given
cell against running speed
Parameters
----------
tetrode : int
cluster : int
minSpeed : float
sigma : float
The width of the smoothing kernel applied to the spike train to smooth it
"""
speed_filt = self.POS.speed.ravel()
#filter for low speeds
lowSpeedIdx = speed_filt < minSpeed
spk_sm = self._getTimeSmoothedSpikes(tetrode, cluster, sigma)
windowSize = 50
runningCorr = np.ones_like(spk_sm)
for i in range(len(spk_sm)):
runningCorr[i] = stats.pearsonr(spk_sm[i:i+windowSize],
speed_filt[i:i+windowSize])[0]
speed_filt = np.ma.MaskedArray(speed_filt, lowSpeedIdx)
spk_sm = np.ma.MaskedArray(spk_sm, lowSpeedIdx)
# mask the running correlation where there is no rate (ie the cell fails
# to fire)
new_mask = np.ma.mask_or(lowSpeedIdx, spk_sm==0)
runningCorr = np.ma.MaskedArray(runningCorr, new_mask)
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
ax2 = ax.twinx()
ax3 = ax.twinx()
ax2.spines["right"].set_position(("axes", 1.2))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
ax3.spines["right"].set_visible(True)
p1, = ax.plot(speed_filt, 'b')
p2, = ax2.plot(spk_sm, 'r')
p3, = ax3.plot(runningCorr, 'k')
ax.set_xlim(0, len(speed_filt))
ax.set_ylim(0, np.max(speed_filt))
ax2.set_ylim(0, np.max(spk_sm))
ax3.set_ylim(-1, 1)
ax.set_ylabel('Speed(cm/s)')
ax2.set_ylabel('Instantaneous firing rate(Hz)')
ax3.set_ylabel('Running correlation')
ax.yaxis.label.set_color(p1.get_color())
ax2.yaxis.label.set_color(p2.get_color())
ax3.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
ax2.tick_params(axis='y', colors=p2.get_color(), **tkw)
ax3.tick_params(axis='y', colors=p3.get_color(), **tkw)
ax.tick_params(axis='x', **tkw)
def _getTimeSmoothedSpikes(self, tetrode, cluster, sigma=3.0, shuffle=None):
"""
Returns a spike train the same length as num pos samples that has been
smoothed in time with a gaussian kernel M in width and standard deviation
equal to sigma
Parameters
--------------
tetrode : int
the tetrode to use
cluster : int
the cluster to use
sigma : float
the standard deviation of the gaussian used to smooth the spike
train
"""
x1 = self.TETRODE[tetrode].getClustIdx(cluster)
spk_sm = self.spikecalcs.smoothSpikePosCount(x1, self.POS.npos, sigma, shuffle)
return spk_sm
def plotFreqVSpeed(self, minSp=5, maxSp=50, spStep=5, ax=None, laserFilter=None, **kwargs):
"""
Plots running speed vs eeg frequencies and does linear regression. Also adds position sample histogram
TODO: filter out negative frequencies - do this as default in EEG class
Parameters
----------
minSp : int
speeds below this are ignored
maxSp : int
speeds above this are ignored
spStep : int
the bin width for speed
ax : matplotlib.axes
the axes in which to plot
laser : int or None
whether to filter for laser on/ off events
None means no filtering at all; 1 means laser is on and data is filtered for on periods
0 means filter for laser off periods
"""
sp = np.ma.compressed(self.POS.speed)
if laserFilter:
eeg = self.EEG.eeg
EE = EEGCalcs(self.filename_root, thetaRange=[6,12])
if 'dip' in kwargs:
d = kwargs['dip']
else:
d = 15.0
if 'width' in kwargs:
w = kwargs['width']
else:
w = 0.125
if 'stimFreq' in kwargs:
sf = kwargs['stimFreq']
else:
sf = 6.66
fx = EE.filterForLaser(E=eeg, width=w, dip=d, stimFreq=sf)#filters out laser stimulation artifact
fxx = self.EEG.eegfilter(fx)
self.EEG.thetaAmpPhase(fxx)#filters for theta
freq = self.EEG.EEGinstfreq
else:
try:
freq = self.EEG.EEGinstfreq
except:
self.EEG.thetaAmpPhase()
freq = self.EEG.EEGinstfreq
freq[freq<0] = np.nan
sp_bins = np.arange(minSp, maxSp, spStep)
sp_dig = np.digitize(sp, sp_bins)
freq = np.reshape(freq, (self.POS.npos, self.EEG.sample_rate/self.POS.pos_sample_rate))
if np.ma.is_masked(self.POS.speed):
mask = np.ma.getmask(self.POS.speed)
mask = np.tile(mask.T, self.EEG.sample_rate/self.POS.pos_sample_rate)
freq = np.ma.MaskedArray(freq, mask=mask)
mn_freq = np.nanmean(freq, 1)
mn_freq = np.ma.compressed(mn_freq)
X = [mn_freq[sp_dig==i] for i in range(len(sp_bins))]
# remove any nans which will screw plt.boxplots ability to calculate means
# and do the boxplot correctly
for i,x in enumerate(X):
idx = ~np.isfinite(x)
X[i] = np.delete(x,np.nonzero(idx))
if ax is None:
fig = plt.figure()
fig.set_facecolor('w')
ax = plt.gca()
else:
fig = plt.gcf()
fig.set_facecolor('w')
# set up some properties for the elements in the box plot
bprops = {'c': [0.8627, 0.8627, 0.8627]}
wprops = {'c': [0.8627, 0.8627, 0.8627]}
ax.boxplot(X, positions=sp_bins, boxprops=bprops, whiskerprops=wprops)
medians = np.array([stats.nanmedian(x) for x in X])
nan_idx = np.isnan(medians)
slope, intercept, r_value, p_value, std_err = stats.linregress(sp_bins[~nan_idx], medians[~nan_idx])
minFreq = np.min(medians[~nan_idx]) - 1.0
maxFreq = np.max(medians[~nan_idx]) + 1.0
ax.set_ylim(minFreq, maxFreq)
# ax.set_xlim(0, sp_bins[-1])
# ylims = np.array(ax.get_ylim())
xlims = np.array(ax.get_xlim())
res = stats.theilslopes(medians[~nan_idx], sp_bins[~nan_idx], 0.90)
ax.plot([0,xlims[1]], (res[1], res[1] + (res[0] * sp_bins[-1])), 'r-')
ax.plot([0,xlims[1]], (res[1], res[1] + (res[2] * sp_bins[-1])), 'r--')
ax.plot([0,xlims[1]], (res[1], res[1] + (res[3] * sp_bins[-1])), 'r--')
# ax.plot([0,xlims[1]], (intercept, intercept + (sp_bins[-1] * slope)), 'k--', lw=2)
ax.set_ylabel('Frequency(Hz)')
ax.set_xlabel('Speed (cm/s)')
ax.set_title('Intercept: {0:.3f} Slope: {1:.5f}'.format(intercept, slope))
# add the right-hand y-axis and format
ax1 = ax.twinx()
# get a histogram of speed to be plotted against the right-hand y-axis
h,e = np.histogram(np.ma.compressed(sp), bins=len(sp_bins)*10, range=(0, sp_bins[-1]))
ax1.bar(e[0:-1], h, color=[0.6667, 0.6667, 0], linewidth=0, align='edge')
ax1.set_ylim(0, np.max(h) * 4) # reduce the 'height' of the secondary plot
# ax1.set_xlim(0, sp_bins[-1]+spStep)
ax1.set_ylabel('Position samples', color=[0.6667, 0.6667, 0])
ax1.yaxis.set_label_coords(1.1,.15)
ylabels = ax1.yaxis.get_majorticklabels()
for i,xxx in enumerate(ylabels):
if i > 1:
xxx.set_visible(False)
else:
xxx.set_color([0.6667, 0.6667, 0])
yticks = ax1.yaxis.get_major_ticks()
for i,xxx in enumerate(yticks):
if i > 1:
xxx.set_visible(False)
return ax, intercept, slope
def plotPhaseOfFiring(self, tetrode, cluster, ax=None, **kwargs):
"""
Plots the phase of firing of a given cluster as a histogram
Parameters
----------
tetrode : int
cluster : int
ax : matplotlib.Axes
"""
phase = self._getClusterPhaseVals(tetrode, cluster)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
# make the plot like the Somogyi figures!
fig.set_facecolor('#203C8A')
phase = np.hstack((phase, phase + (2*np.pi)))
ax2.hist(phase, bins=120, range=(-np.pi, 3*np.pi), color='w', histtype='stepfilled')
t = np.arange(-np.pi, 3 * np.pi, 0.1)
ax.plot(t, np.sin(t), 'w')
ax.annotate('180', xy=(-np.pi-0.2, 0), xycoords='data', ha='right', va='center',
color='w', fontsize=20)
ax.set_axis_bgcolor('#203C8A')
ax.set_ylim(-1.1, 1.1)
ax.axis('off')
ax2.set_axis_bgcolor('#203C8A')
plt.axis('off')
def plotPhaseInField(self, tetrode, cluster, ax=None, **kwargs):
"""
Plots theta phase of spikes in a place field (found using _getFieldLims)
as individual colours for each run through the field
TODO: broken
Parameters
----------
tetrode : int
cluster : int
ax : matplotlib.Axes
"""
if not self.EEG:
self.EEG = EEG(self.filename_root)
self.EEG.thetaAmpPhase()
self.EEG.EEGphase = np.rad2deg(self.EEG.EEGphase)
runs_to_keep, spk_in_run, run_duration = self.getFieldRuns(tetrode, cluster)
if ax is None:
ax = plt.gca()
else:
ax = ax
for spks in spk_in_run:
ax.plot(self.POS.xy[0,spks], self.EEG.EEGphase[spks * self.pos2eegScale]+180,'.')
ax.set_title(self.filename_root.split('\\')[-1] + ' cluster ' + str(cluster) + ' on tetrode ' + str(tetrode))
plt.show()
def plotSpectrogram(self, eegType='eeg', ymin=0, ymax=50, ax=None, secsPerBin=2,
laser=False, width=0.125, dip=15.0):
"""
Plots a spectrogram of the LFP of the whole trial
Parameters
--------------
eegType : str
Whether to do use .eeg file or .egf file. Defaults to eeg
ymin / ymax : int
Minimum/ maximum frequency (y-axis) to plot
ax : matplotlib.pyplot.axis]
Which axis to add the plot to. If None a new figure window is produced
secsPerBin : int
Size of the x-axis bins
laser : bool
Whether to filter the eeg for laser stimulation events
width/ dip : float
Parameters for Kaisser filter in eegcalcs.EEGCalcs - see there
for definition
Returns
------------
Plots the spectrogram
"""
if 'eeg' in eegType:
E = self.EEG.eeg
if np.ma.is_masked(E):
E = E.compressed()
Fs = self.EEG.sample_rate
elif 'egf' in eegType:
E = self.EGF.eeg
if np.ma.is_masked(E):
E = E.compressed()
Fs = self.EGF.sample_rate
EE = EEGCalcs(self.filename_root,thetaRange=[6,12])
if laser:
"""
Split the eeg into the parts where the laser is on and off
and then reassemble for the spectrogram
NB this assumes the laser comes on at 600s for 20 minutes
and then goes off
"""
mask = np.ones_like(E).astype(bool)
mask[600*int(Fs):1800*int(Fs)] = False
# filter
# import pdb
# pdb.set_trace()
fx = EE.filterForLaser(E=E[~mask], width=width, dip=dip)
# reassemble
Etmp = np.zeros_like(E)
Etmp[~mask] = fx
Etmp[mask] = E[mask]
fx = Etmp
else:
fx = E
nperseg = int(Fs * secsPerBin)
freqs, times, Sxx = signal.spectrogram(fx, Fs, nperseg=nperseg)
# Sxx_sm = self.ratemap.blurImage(Sxx, (secsPerBin*2)+1)
Sxx_sm = Sxx
x, y = np.meshgrid(times, freqs)
if ax is None:
plt.figure()
ax = plt.gca()
im = ax.pcolormesh(x, y, Sxx_sm, edgecolors='face', cmap='RdBu',norm=colors.LogNorm())
im = ax.pcolormesh(x, y, Sxx_sm, edgecolors='face', norm=colors.LogNorm())
ax.set_xlim(times[0], times[-1])
ax.set_ylim(ymin, ymax)
ax.set_xlabel('Time(s)')
ax.set_ylabel('Frequency(Hz)')
if laser:
ax.vlines(600,ymin,ymax)
ax.vlines(1800,ymin,ymax)
ax.set_xticks((0, 600, 1800, 2400))
ax.set_xticklabels((str(0), str(600), str(1800), str(2400)))
return freqs, times, Sxx, im
def plotEEGPower(self, E=None, eegType='eeg', smthKernelSigma=0.1875,
freqBand=(6,12), outsideBand=(3,125), s2nWdth=2, xmax=125,
ymax=None, plot=True, ax=None, **kwargs):
"""
Plots the eeg power spectrum. Annotates graph around theta frequency band.
Parameters
-------------
E : numpy.array
(Optional) numEEGSamples sized numpy array of raw eeg signal amplitude.
eegType : str
(Optional) Either 'eeg' or 'egf'. The .eeg file type to use. Defaults to 'eeg'
smthKernelSigma : float
(Optional) number of points in the output window for gaussian filtering of eeg. This
value is multipled by the binsPerHz which comes from the length of the fft (derived from nextpow2 for speed).
freqBand : two-tuple
(Optional) the theta-band to examine.
outsideBand : two-tuple
(Optional): frequencies outside these values are ignored. NOT IMPLEMENTED.
s2nWdth : int
(Optional) Determines the width of the window to calculate the signal-to-noise ratio.
xmax : int
(Optional) Maximum x-value (frequency) to plot to. Defaults to 125
ymax : int
(Optional) Maximum y-value to plot to. Defaults to None so plots full range
plot : bool
(Optional) Whether to produce a plot
ax : matplotlib.pyplot.axis instance
(Optional) The axis to plot in to.
Returns
-------------
ax : matplotlib.pyplot.axis instance
The axis containing the plot.
"""
if E is None:
if 'eeg' in eegType:
E = self.EEG.eeg
freqBand = (self.EEG.x1, self.EEG.x2)
if np.ma.is_masked(E):
E = E.compressed()
sample_rate = self.EEG.sample_rate
elif 'egf' in eegType:
E = self.EGF.eeg
freqBand = (self.EEG.x1, self.EEG.x2)
if np.ma.is_masked(E):
E = E.compressed()
sample_rate = self.EGF.sample_rate
else:
if np.ma.is_masked(E):
E = E.compressed()
sample_rate = kwargs['sample_rate']
nqLim = 0
nqLim = sample_rate / 2
origLength = len(E)
fftLength = 2 ** self.EEG.nextpow2(origLength).astype(int)
freqs, power = signal.periodogram(E, fs=sample_rate, return_onesided=True, nfft=fftLength)
fftHalfLength = fftLength / 2+1
# calculate the number of points in the gaussian window - gleaned from gaussian_filter1d
# which lives in scipy/ndimage/filters.py
binsPerHz = (fftHalfLength-1) / nqLim
kernelSigma = smthKernelSigma * binsPerHz
smthKernelWidth = 2 * int(4.0 * kernelSigma + 0.5) + 1
gaussWin = signal.gaussian(smthKernelWidth, kernelSigma)
# smooth the power
sm_power = signal.fftconvolve(power, gaussWin, 'same')
# normalize the smoothed power by the length of the fft
sm_power = sm_power / np.sqrt(len(sm_power))
# calculate some metrics
spectrumMaskBand = np.logical_and(freqs>freqBand[0], freqs<freqBand[1])
bandMaxPower = np.max(sm_power[spectrumMaskBand])
maxBinInBand = np.argmax(sm_power[spectrumMaskBand])
bandFreqs = freqs[spectrumMaskBand]
freqAtBandMaxPower = bandFreqs[maxBinInBand]
# find power in windows around peak, divide by power in rest of spectrum
# to get SNR
spectrumMaskPeak = np.logical_and(freqs>freqAtBandMaxPower-s2nWdth/2, freqs < freqAtBandMaxPower + s2nWdth/2)
snr = np.nanmean(sm_power[spectrumMaskPeak]) / np.nanmean(sm_power[~spectrumMaskPeak])
# collect all the following keywords into a dict for output
dictKeys = ('sm_power','freqs', 'spectrumMaskPeak', 'power','freqBand',
'freqAtBandMaxPower', 'bandMaxPower', 'xmax', 'ymax', 'snr', 'kernelSigma', 'binsPerHz')
outDict = dict.fromkeys(dictKeys,np.nan)
for thiskey in outDict.keys():
outDict[thiskey] = locals()[thiskey]# neat trick: locals is a dict that holds all locally scoped variables
if plot:
if ax is None:
plt.figure()
ax = plt.gca()
ax.plot(freqs, power, alpha=0.5, color=[0.8627, 0.8627, 0.8627])
# ax.hold(1)
ax.plot(freqs, sm_power)
r = Rectangle((freqBand[0],0), width=np.diff(freqBand)[0], height=np.diff(ax.get_ylim())[0], alpha=0.25, color='r', ec='none')
ax.add_patch(r)
ax.set_xlim(0,xmax)
ax.set_ylim(0, bandMaxPower / 0.8)
ax.set_xlabel('Frequency')
ax.set_ylabel('Power')
ax.text(x = freqBand[1] / 0.9, y = bandMaxPower, s = str(freqAtBandMaxPower)[0:4], fontsize=20)
return ax
def plotClusterSpace(self, tetrode, clusters=None, ax=None, bins=256,**kwargs):
"""
Plots the cluster space for the given tetrode
Parameters
----------
tetrode : int
the tetrode cluster space to plot
clusters : int or list or np.array
the clusters to colour in
ax : matplotlib.pyplot.axis
the axis to plot into
bins : int
the number of bins to use in the histogram
**kwargs :
can include a param keyword for the parameter to construct the
histogram from - this defaults to amplitude ('Amp') but can be any
valid key in the getParam method of the Tetrode class
Returns
-------
fig: handle to figure window
"""
if clusters is not None and not isinstance(clusters, (np.ndarray, list)):
clusters = [clusters] # ie needs to be iterable
waves = self.TETRODE[tetrode].waveforms
if self.TETRODE[tetrode].volts:
waves = (waves * 128) / self.TETRODE[tetrode].scaling[:, np.newaxis]
waves = waves.astype(int)
cutfile = self.TETRODE[tetrode].cut
if cutfile is not None:
cutfile = np.array(cutfile)
if 'param' in kwargs.keys():
param = kwargs['param']
else:
param = 'Amp'
amps = self.TETRODE[tetrode].getParam(waves, param=param)
bad_electrodes = np.setdiff1d(np.array(range(4)),np.array(np.sum(amps,0).nonzero())[0])
cmap = np.tile(tcols.colours[0],(bins,1))
cmap[0] = (1,1,1)
cmap = colors.ListedColormap(cmap)
cmap._init()
alpha_vals = np.ones(cmap.N+3)
alpha_vals[0] = 0
cmap._lut[:,-1] = alpha_vals
cmb = combinations(range(4),2)
if 'figure' in kwargs.keys():
fig = kwargs.pop('figure')
else:
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111)
else:
ax = ax
ax.axis('off')
# fig = plt.gcf()
rect = ax.get_position().bounds
grid = ImageGrid(fig, rect, nrows_ncols= (2,3), axes_pad=0.1)
if 'Amp' in param:
myRange = [[0,256],[0,256]]
else:
myRange = None
for i, c in enumerate(cmb):
if c not in bad_electrodes:
H = np.histogram2d(amps[:,c[0]], amps[:,c[1]], range = myRange, bins=bins)
grid[i].imshow(H[0], cmap=cmap, interpolation='nearest')
if clusters is not None:
for thisclust in clusters:
if 'clustColour' in kwargs.keys():
clustColour = kwargs['clustColour']
else:
clustColour = tcols.colours[thisclust]
clustidx = (cutfile==thisclust).nonzero()[0]
H = np.histogram2d(amps[clustidx,c[0]],amps[clustidx,c[1]], range=myRange, bins=bins)
H = H[0]
H = signal.convolve2d(H, np.ones((3, 3)), mode='same')
clustCMap = np.tile(clustColour,(bins,1))
clustCMap[0] = (1,1,1)
clustCMap = colors.ListedColormap(clustCMap)
clustCMap._init()
clustCMap._lut[:,-1] = alpha_vals
grid[i].imshow(H, cmap=clustCMap, interpolation='nearest')
s = str(c[0]+1) + ' v ' + str(c[1]+1)
grid[i].text(0.05,0.95, s, va='top', ha='left', size='small', color='k', transform=grid[i].transAxes)
grid[i].set_xlim([0,bins])
grid[i].set_ylim([0,bins])
grid[i].tick_params(axis='both', which='both', left='off', right='off',
bottom='off', top='off')
plt.setp([a.get_xticklabels() for a in grid], visible=False)
plt.setp([a.get_yticklabels() for a in grid], visible=False)
return fig
def plotXCorr(self, tetrode, clusters, ax=None, Trange=(-500,500), bins=None, annotate=True, **kwargs):
"""
Plots the temporal autocorrelogram (defaults to +/- 500ms)
TODO: needs to be able to take in two tetrodes & make sure Trange in ms
Parameters
----------
tetrode : int
clusters : int or list
ax : matplotlib.Axes
The axes to plot into. If None a new figure window is created
TRange : two-tuple
The range over which to examine the events. Zero time is the occurance of the event
bins : int
The number of bins to assign the data to
annotate : bool
Whether to add the cluster identities to the figure axis
**kwargs
if 'add_peak_rate' is in the kwargs then that is also added to the axes
"""
if isinstance(clusters, (np.ndarray, list, int)):
clusters = [clusters]
if isinstance(tetrode, (np.ndarray, list, int)):
tetrode = [tetrode]
duration = np.diff(Trange)
if bins is None:
bins = 201
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if len(clusters) == 1:
cluster_a = cluster_b = clusters[0]
elif len(clusters) == 2:
cluster_a = clusters[0]
cluster_b = clusters[1]
if len(tetrode) == 1:
tetrode_a = tetrode[0]
tetrode_b = None
elif len(tetrode) == 2:
tetrode_a = tetrode[0]
tetrode_b = tetrode[1]
Trange = np.array(Trange)
timebase = self.TETRODE[tetrode_a].timebase
x1 = self.TETRODE[tetrode_a].getClustTS(cluster_a) / (timebase/1000)
if tetrode_b is None:
if cluster_b is None:
x2 = x1
cluster_b = cluster_a
else:
x2 = self.TETRODE[tetrode_a].getClustTS(cluster_b) / (timebase/1000)
else:
x2 = self.TETRODE[tetrode_b].getClustTS(cluster_b) / (timebase/1000)
if self.posFilter:
idx = np.nonzero(~self.POS.xy.mask[0])[0] # indices to keep
x1PosSamp = (x1 / (1000 / self.POS.pos_sample_rate)).astype(int)
x1 = x1[np.in1d(x1PosSamp, idx)]
if cluster_b is not None:
x2PosSamp = (x2 / (1000 / self.POS.pos_sample_rate)).astype(int)
x2 = x2[np.in1d(x2PosSamp, idx)]
y = self.spikecalcs.xcorr(x1, x2, Trange=Trange)
h = ax.hist(y[y != 0], bins=bins, range=Trange, color='k', histtype='stepfilled')
ax.set_xlim(Trange)
if annotate:
if cluster_b is None:
cond_rate = np.count_nonzero(y == 0) / np.float(duration)
ax.text(0.55, .9, "{0:.4}".format(str(cond_rate)), ha='center', va='center',
transform=ax.transAxes)
else:
if np.logical_or((tetrode_a == tetrode_b), tetrode_b is None):
if (cluster_a == cluster_b):
#autocorr being done so get theta modulation
modIdx = self.spikecalcs.thetaModIdx(x1)
ax.set_title('Cluster {0} vs Cluster {1}\ntheta modulation: {2:.4f}'.format(cluster_a, cluster_b, modIdx))
if "add_peak_rate" in kwargs:
if kwargs['add_peak_rate']:
ax.annotate('{:.2f}'.format(np.max(modIdx)), (0.15,0.9), \
xycoords='axes fraction', textcoords='axes fraction', color='k', size=30, weight='bold', ha='center', va='center')
# ax.set_title('Cluster ' + str(cluster_a) + ' vs Cluster ' + str(cluster_b) +'\ntheta modulation=' + str(modIdx))
else:
ax.set_title('Cluster ' + str(cluster_a) + ' vs Cluster ' + str(cluster_b))
ax.set_xlabel('Time(ms)')
ax.set_xticks((Trange[0], 0, Trange[1]))
ax.set_xticklabels((str(Trange[0]), '0', str(Trange[1])))
ax.tick_params(axis='both', which='both', left='off', right='off',
bottom='off', top='off')
ax.set_yticklabels('')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
return ax, h
def getThetaModIdx(self, tetrode, cluster):
"""
Calculates the theta modulation index of a clusters autocorrelogram
as the difference between the first trough and second peak of the
autocorrelogram (actually the difference over their sum)
Parameters
--------------
tetrode : int
The tetrode the cluster is on
cluster : int
The cluster identity
Returns
-------------
thetaModulation : int
The depth of theta modulation
"""
x1 = self.TETRODE[tetrode].getClustTS(cluster) / float(self.TETRODE[tetrode].timebase) * 1000
if self.posFilter:
idx = np.nonzero(~self.POS.xy.mask[0])[0] # indices to keep
x1PosSamp = (x1 / (1000 / self.POS.pos_sample_rate)).astype(int)
x1 = x1[
|
np.in1d(x1PosSamp, idx)
|
numpy.in1d
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the Projected Gradient Descent attack `ProjectedGradientDescent` as an iterative method in which,
after each iteration, the perturbation is projected on an lp-ball of specified radius (in addition to clipping the
values of the adversarial sample so that it lies in the permitted data range). This is the attack proposed by Madry et
al. for adversarial training.
| Paper link: https://arxiv.org/abs/1706.06083
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Optional, Union, TYPE_CHECKING
import numpy as np
from tqdm.auto import tqdm
from art.config import ART_NUMPY_DTYPE
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
from art.estimators.classification.classifier import ClassifierMixin
from art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy import (
ProjectedGradientDescentCommon,
)
from art.utils import compute_success, random_sphere, compute_success_array
if TYPE_CHECKING:
import torch
from art.estimators.classification.pytorch import PyTorchClassifier
logger = logging.getLogger(__name__)
class ProjectedGradientDescentPyTorch(ProjectedGradientDescentCommon):
"""
The Projected Gradient Descent attack is an iterative method in which, after each iteration, the perturbation is
projected on an lp-ball of specified radius (in addition to clipping the values of the adversarial sample so that it
lies in the permitted data range). This is the attack proposed by Madry et al. for adversarial training.
| Paper link: https://arxiv.org/abs/1706.06083
"""
_estimator_requirements = (BaseEstimator, LossGradientsMixin, ClassifierMixin)
def __init__(
self,
estimator: Union["PyTorchClassifier"],
norm: Union[int, float, str] = np.inf,
eps: Union[int, float, np.ndarray] = 0.3,
eps_step: Union[int, float, np.ndarray] = 0.1,
max_iter: int = 100,
targeted: bool = False,
num_random_init: int = 0,
batch_size: int = 32,
random_eps: bool = False,
verbose: bool = True,
):
"""
Create a :class:`.ProjectedGradientDescentPyTorch` instance.
:param estimator: An trained estimator.
:param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 1 or 2.
:param eps: Maximum perturbation that the attacker can introduce.
:param eps_step: Attack step size (input variation) at each iteration.
:param random_eps: When True, epsilon is drawn randomly from truncated normal distribution. The literature
suggests this for FGSM based training to generalize across different epsilons. eps_step is
modified to preserve the ratio of eps / eps_step. The effectiveness of this method with PGD
is untested (https://arxiv.org/pdf/1611.01236.pdf).
:param max_iter: The maximum number of iterations.
:param targeted: Indicates whether the attack is targeted (True) or untargeted (False).
:param num_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0 starting
at the original input.
:param batch_size: Size of the batch on which adversarial samples are generated.
:param verbose: Show progress bars.
"""
if not estimator.all_framework_preprocessing:
raise NotImplementedError(
"The framework-specific implementation only supports framework-specific preprocessing."
)
super(ProjectedGradientDescentPyTorch, self).__init__(
estimator=estimator,
norm=norm,
eps=eps,
eps_step=eps_step,
max_iter=max_iter,
targeted=targeted,
num_random_init=num_random_init,
batch_size=batch_size,
random_eps=random_eps,
verbose=verbose,
)
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param mask: An array with a mask broadcastable to input `x` defining where to apply adversarial perturbations.
Shape needs to be broadcastable to the shape of x and can also be of the same shape as `x`. Any
features for which the mask is zero will not be adversarially perturbed.
:type mask: `np.ndarray`
:return: An array holding the adversarial examples.
"""
import torch # lgtm [py/repeated-import]
mask = self._get_mask(x, **kwargs)
# Ensure eps is broadcastable
self._check_compatibility_input_and_eps(x=x)
# Check whether random eps is enabled
self._random_eps()
# Set up targets
targets = self._set_targets(x, y)
# Create dataset
if mask is not None:
# Here we need to make a distinction: if the masks are different for each input, we need to index
# those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is.
if len(mask.shape) == len(x.shape):
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(mask.astype(ART_NUMPY_DTYPE)),
)
else:
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)),
torch.from_numpy(np.array([mask.astype(ART_NUMPY_DTYPE)] * x.shape[0])),
)
else:
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x.astype(ART_NUMPY_DTYPE)), torch.from_numpy(targets.astype(ART_NUMPY_DTYPE)),
)
data_loader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=self.batch_size, shuffle=False, drop_last=False
)
# Start to compute adversarial examples
adv_x = x.astype(ART_NUMPY_DTYPE)
# Compute perturbation with batching
for (batch_id, batch_all) in enumerate(
tqdm(data_loader, desc="PGD - Batches", leave=False, disable=not self.verbose)
):
if mask is not None:
(batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], batch_all[2]
else:
(batch, batch_labels, mask_batch) = batch_all[0], batch_all[1], None
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
# Compute batch_eps and batch_eps_step
if isinstance(self.eps, np.ndarray):
if len(self.eps.shape) == len(x.shape) and self.eps.shape[0] == x.shape[0]:
batch_eps = self.eps[batch_index_1:batch_index_2]
batch_eps_step = self.eps_step[batch_index_1:batch_index_2]
else:
batch_eps = self.eps
batch_eps_step = self.eps_step
else:
batch_eps = self.eps
batch_eps_step = self.eps_step
for rand_init_num in range(max(1, self.num_random_init)):
if rand_init_num == 0:
# first iteration: use the adversarial examples as they are the only ones we have now
adv_x[batch_index_1:batch_index_2] = self._generate_batch(
x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step
)
else:
adversarial_batch = self._generate_batch(
x=batch, targets=batch_labels, mask=mask_batch, eps=batch_eps, eps_step=batch_eps_step
)
# return the successful adversarial examples
attack_success = compute_success_array(
self.estimator,
batch,
batch_labels,
adversarial_batch,
self.targeted,
batch_size=self.batch_size,
)
adv_x[batch_index_1:batch_index_2][attack_success] = adversarial_batch[attack_success]
logger.info(
"Success rate of attack: %.2f%%",
100 * compute_success(self.estimator, x, targets, adv_x, self.targeted, batch_size=self.batch_size),
)
return adv_x
def _generate_batch(
self,
x: "torch.Tensor",
targets: "torch.Tensor",
mask: "torch.Tensor",
eps: Union[int, float, np.ndarray],
eps_step: Union[int, float, np.ndarray],
) -> np.ndarray:
"""
Generate a batch of adversarial samples and return them in an array.
:param x: An array with the original inputs.
:param targets: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)`.
:param mask: An array with a mask to be applied to the adversarial perturbations. Shape needs to be
broadcastable to the shape of x. Any features for which the mask is zero will not be adversarially
perturbed.
:param eps: Maximum perturbation that the attacker can introduce.
:param eps_step: Attack step size (input variation) at each iteration.
:return: Adversarial examples.
"""
import torch # lgtm [py/repeated-import]
inputs = x.to(self.estimator.device)
targets = targets.to(self.estimator.device)
adv_x = torch.clone(inputs)
if mask is not None:
mask = mask.to(self.estimator.device)
for i_max_iter in range(self.max_iter):
adv_x = self._compute_torch(
adv_x, inputs, targets, mask, eps, eps_step, self.num_random_init > 0 and i_max_iter == 0,
)
return adv_x.cpu().detach().numpy()
def _compute_perturbation(
self, x: "torch.Tensor", y: "torch.Tensor", mask: Optional["torch.Tensor"]
) -> "torch.Tensor":
"""
Compute perturbations.
:param x: Current adversarial examples.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
:param mask: An array with a mask broadcastable to input `x` defining where to apply adversarial perturbations.
Shape needs to be broadcastable to the shape of x and can also be of the same shape as `x`. Any
features for which the mask is zero will not be adversarially perturbed.
:return: Perturbations.
"""
import torch # lgtm [py/repeated-import]
# Pick a small scalar to avoid division by 0
tol = 10e-8
# Get gradient wrt loss; invert it if attack is targeted
grad = self.estimator.loss_gradient(x=x, y=y) * (1 - 2 * int(self.targeted))
# Apply mask
if mask is not None:
grad = torch.where(mask == 0.0, torch.tensor(0.0), grad)
# Apply norm bound
if self.norm in ["inf", np.inf]:
grad = grad.sign()
elif self.norm == 1:
ind = tuple(range(1, len(x.shape)))
grad = grad / (torch.sum(grad.abs(), dim=ind, keepdims=True) + tol) # type: ignore
elif self.norm == 2:
ind = tuple(range(1, len(x.shape)))
grad = grad / (torch.sqrt(torch.sum(grad * grad, axis=ind, keepdims=True)) + tol) # type: ignore
assert x.shape == grad.shape
return grad
def _apply_perturbation(
self, x: "torch.Tensor", perturbation: "torch.Tensor", eps_step: Union[int, float, np.ndarray]
) -> "torch.Tensor":
"""
Apply perturbation on examples.
:param x: Current adversarial examples.
:param perturbation: Current perturbations.
:param eps_step: Attack step size (input variation) at each iteration.
:return: Adversarial examples.
"""
import torch # lgtm [py/repeated-import]
eps_step = np.array(eps_step, dtype=ART_NUMPY_DTYPE)
x = x + torch.tensor(eps_step).to(self.estimator.device) * perturbation
if self.estimator.clip_values is not None:
clip_min, clip_max = self.estimator.clip_values
x = torch.max(
torch.min(x, torch.tensor(clip_max).to(self.estimator.device)),
torch.tensor(clip_min).to(self.estimator.device),
)
return x
def _compute_torch(
self,
x: "torch.Tensor",
x_init: "torch.Tensor",
y: "torch.Tensor",
mask: "torch.Tensor",
eps: Union[int, float, np.ndarray],
eps_step: Union[int, float, np.ndarray],
random_init: bool,
) -> "torch.Tensor":
"""
Compute adversarial examples for one iteration.
:param x: Current adversarial examples.
:param x_init: An array with the original inputs.
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
(nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
(explained in this paper: https://arxiv.org/abs/1611.01236).
:param mask: An array with a mask broadcastable to input `x` defining where to apply adversarial perturbations.
Shape needs to be broadcastable to the shape of x and can also be of the same shape as `x`. Any
features for which the mask is zero will not be adversarially perturbed.
:param eps: Maximum perturbation that the attacker can introduce.
:param eps_step: Attack step size (input variation) at each iteration.
:param random_init: Random initialisation within the epsilon ball. For random_init=False starting at the
original input.
:return: Adversarial examples.
"""
import torch # lgtm [py/repeated-import]
if random_init:
n = x.shape[0]
m =
|
np.prod(x.shape[1:])
|
numpy.prod
|
import os
import numpy as np
import sys
from skeletons.utilities import dataIO
template ='''#!/bin/bash
#
# add all other SBATCH directives here
#
#SBATCH -p {PARTITION} # use the COX partition
#SBATCH -n 1 # Number of cores
#SBATCH -N 1 # Ensure that all cores are on one matching
#SBATCH --mem={MEMORY} # CPU memory in MBs
#SBATCH -t 0-{HOURS}:00 # time in dd-hh:mm to run the code for
#SBATCH --mail-type=NONE # send all email types (start, end, error, etc.)
#SBATCH --mail-user=<EMAIL> # email address to send to
#SBATCH -o {OUTPUT_PATH}/{JOBNAME}.out # where to write the log files
#SBATCH -e {ERROR_PATH}/{JOBNAME}.err # where to write the error files
#SBATCH -J thinning_{JOBNAME} # jobname given to job
module load Anaconda3/5.0.1-fasrc02
source activate fillholes
export PYTHONPATH=$PYTHONPATH:{RUNCODEDIRECTORY}
cd {RUNCODEDIRECTORY}skeletons/examples/
python scripts/{COMMAND}
echo "DONE"
'''
def makeFolder(folder_path):
if os.path.exists(folder_path):
raise ValueError("Folderpath " + folder_path + " already exists!")
else:
os.mkdir(folder_path)
def writeFile(filename, data):
if os.path.exists(filename):
raise ValueError("File " + filename + " already exists!")
else:
with open(filename, 'w') as fd:
fd.write(data)
if(len(sys.argv))!=5:
raise ValueError(" Scripts needs 4 cluster partitions as input, put 0 if not less desired")
else:
n_part = 0
partitions = ["0","0","0","0"]
if sys.argv[1]!="0":
partitions[0] = sys.argv[1]
n_part +=1
if sys.argv[2]!="0":
partitions[1] = sys.argv[2]
n_part +=1
if sys.argv[3]!="0":
partitions[2] = sys.argv[3]
n_part +=1
if sys.argv[4]!="0":
partitions[3] = sys.argv[4]
n_part +=1
files_written = 0
prefix = "Zebrafinch"
ID_max = 410
refinement_chunksize = 50
error_path = dataIO.OutputDirectory(prefix) + "error_files/"
output_path = dataIO.OutputDirectory(prefix) + "output_files/"
slurm_path = dataIO.OutputDirectory(prefix)+"slurm_files/"
code_run_path = dataIO.CodeDirectory(prefix)
block_size = dataIO.Blocksize(prefix)
start_blocks = dataIO.StartBlocks(prefix)
n_blocks = dataIO.NBlocks(prefix)
block_volume = block_size[0]*block_size[1]*block_size[2]
memory = str(int(block_volume*3*8*3/1000/1000))
memory_step4 = str(50000)
run_hours = str(int(block_volume/(1024*1024*1024)*2))
template = template.replace('{RUNCODEDIRECTORY}', code_run_path)
template = template.replace('{HOURS}', run_hours)
SLURM_OUTPUT_FOLDER = slurm_path
step01folderpath = SLURM_OUTPUT_FOLDER+"step01/"
step02folderpath = SLURM_OUTPUT_FOLDER+"step02/"
step03folderpath = SLURM_OUTPUT_FOLDER+"step03/"
step04folderpath = SLURM_OUTPUT_FOLDER+"step04/"
makeFolder(step01folderpath)
makeFolder(step02folderpath)
makeFolder(step03folderpath)
makeFolder(step04folderpath)
# write slurm for step two
for bz in range(start_blocks[0], start_blocks[0] + n_blocks[0]):
command = "step1.py" + " " + str(bz)
jobname = "S1"+"_" +"z"+str(bz).zfill(2)
t = template
t = t.replace('{JOBNAME}', jobname)
t = t.replace('{COMMAND}', command)
t = t.replace('{ERROR_PATH}', error_path)
t = t.replace('{OUTPUT_PATH}', output_path)
t = t.replace('{MEMORY}', memory)
t = t.replace('{PARTITION}', partitions[np.random.randint(0,n_part)])
filename = step01folderpath + jobname + ".slurm"
writeFile(filename, t)
files_written += 1
# write slurm for step three
for bz in range(start_blocks[0], start_blocks[0] + n_blocks[0]):
command = "step2.py" + " " + str(bz)
jobname = "S2"+"_" +"z"+str(bz).zfill(2)
t = template
t = t.replace('{JOBNAME}', jobname)
t = t.replace('{COMMAND}', command)
t = t.replace('{ERROR_PATH}', error_path)
t = t.replace('{OUTPUT_PATH}', output_path)
t = t.replace('{MEMORY}', memory)
t = t.replace('{PARTITION}', partitions[np.random.randint(0,n_part)])
filename = step02folderpath + jobname + ".slurm"
writeFile(filename, t)
files_written += 1
# write slurm for step four
for bz in range(start_blocks[0], start_blocks[0] + n_blocks[0]):
for by in range(start_blocks[1], start_blocks[1] + n_blocks[1]):
for bx in range(start_blocks[2], start_blocks[2] + n_blocks[2]):
command = "step3.py" + " " + str(bz) + " " + str(by) + " " + str(bx)
jobname = "S3"+"_"+ "z"+str(bz).zfill(2)+"y"+str(by).zfill(2)+"x"+str(bx).zfill(2)
t = template
t = t.replace('{JOBNAME}', jobname)
t = t.replace('{COMMAND}', command)
t = t.replace('{ERROR_PATH}', error_path)
t = t.replace('{OUTPUT_PATH}', output_path)
t = t.replace('{MEMORY}', memory)
t = t.replace('{PARTITION}', partitions[np.random.randint(0,n_part)])
filename = step03folderpath + jobname + ".slurm"
writeFile(filename, t)
files_written += 1
ID_range = np.arange(1,ID_max)
for ID_start in ID_range[::refinement_chunksize]:
ID_end = ID_start+refinement_chunksize-1
command = "step4.py" + " " + str(ID_start) + " " + str(ID_end)
jobname = "S4" + "_" + str(ID_start) + "_" + str(ID_end)
t = template
t = t.replace('{JOBNAME}', jobname)
t = t.replace('{COMMAND}', command)
t = t.replace('{ERROR_PATH}', error_path)
t = t.replace('{OUTPUT_PATH}', output_path)
t = t.replace('{MEMORY}', str(memory_step4))
t = t.replace('{PARTITION}', partitions[
|
np.random.randint(0,n_part)
|
numpy.random.randint
|
import sys, numpy as np
import proxmin
from functools import partial
import logging
logging.basicConfig()
logger = logging.getLogger("proxmin")
logger.setLevel(logging.INFO)
# location of true minimum of f
dX = np.array([1, 0.5])
def f(X):
"""Shifted parabola"""
return np.sum((X - dX) ** 2, axis=-1)
def grad_f(X):
return 2 * (X - dX)
def step_f(X, it=0):
L = 2
slowdown = 0.1 # to see behavior better
return slowdown * 1 / L
def prox_circle(X, step):
"""Projection onto circle"""
center = np.array([0, 0])
dX = X - center
radius = 0.5
phi = np.arctan2(dX[1], dX[0])
return center + radius * np.array([np.cos(phi), np.sin(phi)])
def prox_xline(x, step):
"""Projection onto line in x"""
if not np.isscalar(x):
x = x[0]
if x > 0.5:
return np.array([0.5])
else:
return np.array([x])
def prox_yline(y, step):
"""Projection onto line in y"""
if not np.isscalar(y):
y = y[0]
if y > -0.75:
return np.array([-0.75])
else:
return np.array([y])
def prox_line(X, step):
"""2D projection onto 2 lines"""
return np.concatenate((prox_xline(X[0], step), prox_yline(X[1], step)))
def prox_lim(X, step, boundary=None):
"""Proximal projection operator"""
if boundary == "circle":
return prox_circle(X, step)
if boundary == "line":
return prox_line(X, step)
# default: do nothing
return X
def prox_gradf(X, step):
"""Gradient step"""
return X - step * grad_f(X)
def prox_gradf_lim(X, step, boundary=None):
"""Forward-backward step: gradient, followed by projection"""
return prox_lim(prox_gradf(X, step), step, boundary=boundary)
def plotResults(trace, label="", boundary=None):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
lims = -2, 2
X = np.dstack(
np.meshgrid(
|
np.linspace(lims[0], lims[1], 1000)
|
numpy.linspace
|
"""
This module contains the `PostProcessor` class.
It contains all advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
"""
from __future__ import absolute_import
import os
from .PostProcessor import PostProcessor as Post
from ..generic.general_methods import aedt_exception_handler
import time
import math
import warnings
try:
import numpy as np
except ImportError:
warnings.warn("The NumPy module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install numpy\n\nRequires CPython.")
try:
import pyvista as pv
pyvista_available = True
except ImportError:
warnings.warn("The PyVista module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install pyvista\n\nRequires CPython.")
try:
from IPython.display import Image, display
ipython_available = True
except ImportError:
warnings.warn("The Ipython module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install ipython\n\nRequires CPython.")
try:
import matplotlib.pyplot as plt
except ImportError:
warnings.warn("The Matplotlib module is required to run some functionalities of PostProcess.\n"
"Install with \n\npip install matplotlib\n\nRequires CPython.")
def is_float(istring):
"""Convert a string to a float.
Parameters
----------
istring : str
String to convert to a float.
Returns
-------
float
Converted float when successful, ``0`` when when failed.
"""
try:
return float(istring.strip())
except Exception:
return 0
class PostProcessor(Post):
"""Contains advanced postprocessing functionalities that require Python 3.x packages like NumPy and Matplotlib.
Parameters
----------
parent :
Inherited parent object.
"""
def __init__(self, parent):
Post.__init__(self, parent)
@aedt_exception_handler
def nb_display(self, show_axis=True, show_grid=True, show_ruler=True):
"""Show the Jupyter Notebook display.
.. note::
Jupyter Notebook is not supported by IronPython.
Parameters
----------
show_axis : bool, optional
Whether to show the axes. The default is ``True``.
show_grid : bool, optional
Whether to show the grid. The default is ``True``.
show_ruler : bool, optional
Whether to show the ruler. The default is ``True``.
Returns
-------
:class:`IPython.core.display.Image`
Jupyter notebook image.
"""
file_name = self.export_model_picture(show_axis=show_axis, show_grid=show_grid, show_ruler=show_ruler)
return Image(file_name, width=500)
@aedt_exception_handler
def get_efields_data(self, setup_sweep_name='', ff_setup="Infinite Sphere1", freq='All'):
"""Compute Etheta and EPhi.
.. warning::
This method requires NumPy to be installed on your machine.
Parameters
----------
setup_sweep_name : str, optional
Name of the setup for computing the report. The default is ``""``, in
which case the nominal adaptive is applied.
ff_setup : str, optional
Far field setup. The default is ``"Infinite Sphere1"``.
freq : str, optional
The default is ``"All"``.
Returns
-------
np.ndarray
numpy array containing ``[theta_range, phi_range, Etheta, Ephi]``.
"""
if not setup_sweep_name:
setup_sweep_name = self._parent.nominal_adaptive
results_dict = {}
all_sources = self.post_osolution.GetAllSources()
# assuming only 1 mode
all_sources_with_modes = [s + ':1' for s in all_sources]
for n, source in enumerate(all_sources_with_modes):
edit_sources_ctxt = [["IncludePortPostProcessing:=", False, "SpecifySystemPower:=", False]]
for m, each in enumerate(all_sources_with_modes):
if n == m: # set only 1 source to 1W, all the rest to 0
mag = 1
else:
mag = 0
phase = 0
edit_sources_ctxt.append(
["Name:=", "{}".format(each), "Magnitude:=", "{}W".format(mag), "Phase:=", "{}deg".format(phase)])
self.post_osolution.EditSources(edit_sources_ctxt)
ctxt = ['Context:=', ff_setup]
sweeps = ['Theta:=', ['All'], 'Phi:=', ['All'], 'Freq:=', [freq]]
trace_name = "rETheta"
solnData = self.get_far_field_data(setup_sweep_name=setup_sweep_name, domain=ff_setup,
expression=trace_name)
data = solnData.nominal_variation
theta_vals = np.degrees(np.array(data.GetSweepValues('Theta')))
phi_vals = np.degrees(np.array(data.GetSweepValues('Phi')))
# phi is outer loop
theta_unique = np.unique(theta_vals)
phi_unique = np.unique(phi_vals)
theta_range = np.linspace(np.min(theta_vals), np.max(theta_vals), np.size(theta_unique))
phi_range = np.linspace(np.min(phi_vals), np.max(phi_vals), np.size(phi_unique))
real_theta = np.array(data.GetRealDataValues(trace_name))
imag_theta = np.array(data.GetImagDataValues(trace_name))
trace_name = "rEPhi"
solnData = self.get_far_field_data(setup_sweep_name=setup_sweep_name, domain=ff_setup,
expression=trace_name)
data = solnData.nominal_variation
real_phi = np.array(data.GetRealDataValues(trace_name))
imag_phi = np.array(data.GetImagDataValues(trace_name))
Etheta = np.vectorize(complex)(real_theta, imag_theta)
Ephi = np.vectorize(complex)(real_phi, imag_phi)
source_name_without_mode = source.replace(':1', '')
results_dict[source_name_without_mode] = [theta_range, phi_range, Etheta, Ephi]
return results_dict
@aedt_exception_handler
def ff_sum_with_delta_phase(self, ff_data, xphase=0, yphase=0):
"""Generate a far field sum with a delta phase.
Parameters
----------
ff_data :
xphase : float, optional
Phase in the X-axis direction. The default is ``0``.
yphase : float, optional
Phase in the Y-axis direction. The default is ``0``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
array_size = [4, 4]
loc_offset = 2
rETheta = ff_data[2]
rEPhi = ff_data[3]
weight = np.zeros((array_size[0], array_size[0]))
mag = np.ones((array_size[0], array_size[0]))
for m in range(array_size[0]):
for n in range(array_size[1]):
mag = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag) * np.exp(1 * ang)
return True
@aedt_exception_handler
def _triangle_vertex(self, elements_nodes, num_nodes_per_element, take_all_nodes=True):
"""
Parameters
----------
elements_nodes :
num_nodes_per_element :
take_all_nodes : bool, optional
The default is ``True``.
Returns
-------
"""
trg_vertex = []
if num_nodes_per_element == 10 and take_all_nodes:
for e in elements_nodes:
trg_vertex.append([e[0], e[1], e[3]])
trg_vertex.append([e[1], e[2], e[4]])
trg_vertex.append([e[1], e[4], e[3]])
trg_vertex.append([e[3], e[4], e[5]])
trg_vertex.append([e[9], e[6], e[8]])
trg_vertex.append([e[6], e[0], e[3]])
trg_vertex.append([e[6], e[3], e[8]])
trg_vertex.append([e[8], e[3], e[5]])
trg_vertex.append([e[9], e[7], e[8]])
trg_vertex.append([e[7], e[2], e[4]])
trg_vertex.append([e[7], e[4], e[8]])
trg_vertex.append([e[8], e[4], e[5]])
trg_vertex.append([e[9], e[7], e[6]])
trg_vertex.append([e[7], e[2], e[1]])
trg_vertex.append([e[7], e[1], e[6]])
trg_vertex.append([e[6], e[1], e[0]])
elif num_nodes_per_element == 10 and not take_all_nodes:
for e in elements_nodes:
trg_vertex.append([e[0], e[2], e[5]])
trg_vertex.append([e[9], e[0], e[5]])
trg_vertex.append([e[9], e[2], e[0]])
trg_vertex.append([e[9], e[2], e[5]])
elif num_nodes_per_element == 6 and not take_all_nodes:
for e in elements_nodes:
trg_vertex.append([e[0], e[2], e[5]])
elif num_nodes_per_element == 6 and take_all_nodes:
for e in elements_nodes:
trg_vertex.append([e[0], e[1], e[3]])
trg_vertex.append([e[1], e[2], e[4]])
trg_vertex.append([e[1], e[4], e[3]])
trg_vertex.append([e[3], e[4], e[5]])
elif num_nodes_per_element == 4 and take_all_nodes:
for e in elements_nodes:
trg_vertex.append([e[0], e[1], e[3]])
trg_vertex.append([e[1], e[2], e[3]])
trg_vertex.append([e[0], e[1], e[2]])
trg_vertex.append([e[0], e[2], e[3]])
elif num_nodes_per_element == 3:
trg_vertex = elements_nodes
return trg_vertex
@aedt_exception_handler
def _plot_from_aedtplt(self, aedtplt_files=None, imageformat="jpg", view="iso", plot_type="Full",
plot_label="Temperature", model_color="#8faf8f", show_model_edge=False, off_screen=False):
"""Export the 3D field solver mesh, fields, or both mesh and fields as images using Python Plotly.
.. note::
This method is currently supported only on Windows using CPython.
Parameters
----------
aedtplt_files : str or list, optional
Names of the one or more AEDTPLT files generated by AEDT. The default is ``None``.
imageformat : str, optional
Format of the image file. Options are ``"jpg"``, ``"png"``, ``"svg"``, and
``"webp"``. The default is ``"jpg"``.
view : str, optional
View to export. Options are ``"iso"``, ``"x"`` , ``"y"``, ``"z"``, and ``"all"``.
The default is ``"iso"``. The ``"all"`` option exports all views.
plot_type : str, optional
Type of the plot. The default is ``"Full"``.
plot_label : str, optional
Label for the plot. The default is ``"Temperature"``.
model_color : str, optional
Color scheme for the 3D model. The default is ``"#8faf8f"``, which is silver.
show_model_edge : bool, optional
Whether to return a list of the files that are generated. The default
is ``False``.
off_screen : bool, optional
The default is ``False``.
Returns
-------
list
List of exported files.
"""
start = time.time()
if type(aedtplt_files) is str:
aedtplt_files = [aedtplt_files]
plot = pv.Plotter(off_screen=off_screen)
if not off_screen:
plot.enable_anti_aliasing()
plot.enable_fly_to_right_click()
lines = []
for file in aedtplt_files:
if ".aedtplt" in file:
with open(file, "r") as f:
drawing_found = False
for line in f:
if "$begin Drawing" in line:
drawing_found = True
l_tmp = []
continue
if "$end Drawing" in line:
lines.append(l_tmp)
drawing_found = False
continue
if drawing_found:
l_tmp.append(line)
continue
if "Number of drawing:" in line:
n_drawings = int(line[18:])
continue
elif ".obj" in file:
mesh = pv.read(file)
def create_object_mesh(opacity):
"""Create the mesh.
Parameters
----------
opacity :
Returns
-------
"""
try:
plot.remove_actor("Volumes")
except:
pass
plot.add_mesh(mesh, show_scalar_bar=False, opacity=opacity, cmap=[model_color], name="3D Model",
show_edges=show_model_edge, edge_color=model_color)
plot.add_slider_widget(create_object_mesh, [0, 1], style='modern', value=0.75, pointa=[0.81, 0.98], pointb=[0.95, 0.98], title="Opacity")
filename = os.path.splitext(aedtplt_files[0])[0]
print(filename)
for drawing_lines in lines:
bounding = []
elements = []
nodes_list = []
solution = []
for l in drawing_lines:
if "BoundingBox(" in l:
bounding = l[l.find("(") + 1:-2].split(",")
bounding = [i.strip() for i in bounding]
if "Elements(" in l:
elements = l[l.find("(") + 1:-2].split(",")
elements = [int(i.strip()) for i in elements]
if "Nodes(" in l:
nodes_list = l[l.find("(") + 1:-2].split(",")
nodes_list = [float(i.strip()) for i in nodes_list]
if "ElemSolution(" in l:
# convert list of strings to list of floats
sols = l[l.find("(") + 1:-2].split(",")
sols = [is_float(value) for value in sols]
# sols = [float(i.strip()) for i in sols]
num_solution_per_element = int(sols[2])
sols = sols[3:]
sols = [sols[i:i + num_solution_per_element] for i in range(0, len(sols), num_solution_per_element)]
solution = [sum(i) / num_solution_per_element for i in sols]
nodes = [[nodes_list[i], nodes_list[i + 1], nodes_list[i + 2]] for i in range(0, len(nodes_list), 3)]
num_nodes = elements[0]
num_elements = elements[1]
elements = elements[2:]
element_type = elements[0]
num_nodes_per_element = elements[4]
hl = 5 # header length
elements_nodes = []
for i in range(0, len(elements), num_nodes_per_element + hl):
elements_nodes.append([elements[i + hl + n] for n in range(num_nodes_per_element)])
if solution:
take_all_nodes = True # solution case
else:
take_all_nodes = False # mesh case
trg_vertex = self._triangle_vertex(elements_nodes, num_nodes_per_element, take_all_nodes)
# remove duplicates
nodup_list = [list(i) for i in list(set([frozenset(t) for t in trg_vertex]))]
sols_vertex = []
if solution:
sv = {}
for els, s in zip(elements_nodes, solution):
for el in els:
if el in sv:
sv[el] = (sv[el] + s) / 2
else:
sv[el] = s
sols_vertex = [sv[v] for v in sorted(sv.keys())]
array = [[3] + [j - 1 for j in i] for i in nodup_list]
faces = np.hstack(array)
vertices = np.array(nodes)
surf = pv.PolyData(vertices, faces)
if sols_vertex:
temps = np.array(sols_vertex)
mean = np.mean(temps)
std = np.std(temps)
if np.min(temps) > 0:
log = True
else:
log = False
surf.point_arrays[plot_label] = temps
sargs = dict(title_font_size=10, label_font_size=10, shadow=True, n_labels=9, italic=True, fmt="%.1f",
font_family="arial")
if plot_type == "Clip":
plot.add_text("Full Plot", font_size=15)
if solution:
class MyCustomRoutine():
""" """
def __init__(self, mesh):
self.output = mesh # Expected PyVista mesh type
# default parameters
self.kwargs = {
'min_val': 0.5,
'max_val': 30,
}
def __call__(self, param, value):
self.kwargs[param] = value
self.update()
def update(self):
""" """
# This is where you call your simulation
try:
plot.remove_actor("FieldPlot")
except:
pass
plot.add_mesh(surf, scalars=plot_label, log_scale=log, scalar_bar_args=sargs, cmap='rainbow',
show_edges=False, clim=[self.kwargs['min_val'], self.kwargs['max_val']],
pickable=True, smooth_shading=True, name="FieldPlot")
return
engine = MyCustomRoutine(surf)
plot.add_box_widget(surf, show_edges=False, scalars=plot_label, log_scale=log, scalar_bar_args=sargs,
cmap='rainbow', pickable=True, smooth_shading=True, name="FieldPlot")
plot.add_slider_widget(callback=lambda value: engine('min_val', value),
rng=[np.min(temps), np.max(temps)], title='Lower', style='modern',
value=np.min(temps), pointa=(.5, .98), pointb=(.65, .98))
plot.add_slider_widget(callback=lambda value: engine('max_val', value),
rng=[np.min(temps), np.max(temps)], title='Upper', style='modern',
value=np.max(temps), pointa=(.66, .98), pointb=(.8, .98))
else:
plot.add_box_widget(surf, show_edges=True, line_width=0.1, color="grey", pickable=True, smooth_shading=True)
else:
plot.add_text("Full Plot", font_size=15)
if solution:
class MyCustomRoutine():
""" """
def __init__(self, mesh):
self.output = mesh # Expected PyVista mesh type
# default parameters
self.kwargs = {
'min_val': 0.5,
'max_val': 30,
}
def __call__(self, param, value):
self.kwargs[param] = value
self.update()
def update(self):
""" """
# This is where you call your simulation
try:
plot.remove_actor("FieldPlot")
except:
pass
plot.add_mesh(surf, scalars=plot_label, log_scale=log, scalar_bar_args=sargs, cmap='rainbow',
show_edges=False, clim=[self.kwargs['min_val'], self.kwargs['max_val']],
pickable=True, smooth_shading=True, name="FieldPlot")
return
engine = MyCustomRoutine(surf)
plot.add_mesh(surf, show_edges=False, scalars=plot_label, log_scale=log, scalar_bar_args=sargs,
cmap='rainbow', pickable=True, smooth_shading=True, name="FieldPlot")
plot.add_slider_widget(callback=lambda value: engine('min_val', value),
rng=[np.min(temps), np.max(temps)], title='Lower', style='modern',
value=np.min(temps), pointa=(.5, .98), pointb=(.65, .98))
plot.add_slider_widget(callback=lambda value: engine('max_val', value),
rng=[np.min(temps), np.max(temps)], title='Upper', style='modern',
value=np.max(temps), pointa=(.66, .98), pointb=(.8, .98))
else:
plot.add_mesh(surf, show_edges=True, line_width=0.1, color="grey", pickable=True, smooth_shading=True)
plot.show_axes()
plot.show_grid()
if view == "iso":
plot.view_isometric()
elif view == "x":
plot.view_yz()
elif view == "y":
plot.view_xz()
elif view == "z":
plot.view_xy()
files_list = []
if plot:
end = time.time() - start
self._messenger.add_info_message("PyVista plot generation took {} seconds.".format(end))
if off_screen:
if imageformat:
plot.show(screenshot=filename + "." + imageformat)
files_list.append(filename + "." + imageformat)
else:
plot.show()
else:
def show(screen=None, interactive=True):
"""
Parameters
----------
screen : optional
The default is ``None``.
interactive : bool, optional
The default is ``True``.
Returns
-------
"""
if screen:
plot.show(screenshot=screen, interactive=interactive, full_screen=True)
else:
plot.show(interactive=interactive)
if imageformat:
show(filename + "." + imageformat, True)
files_list.append(filename + "." + imageformat)
else:
show(filename + "." + imageformat, False)
for f in aedtplt_files:
os.remove(os.path.join(f))
return files_list
@aedt_exception_handler
def _animation_from_aedtflt(self, aedtplt_files=None, variation_var="Time", variation_list=[],
plot_label="Temperature", model_color="#8faf8f", export_gif=False, off_screen=False):
"""Export the 3D field solver mesh, fields, or both mesh and fields as images using Python Plotly.
.. note::
This method is currently supported only on Windows using CPython.
Parameters
----------
aedtplt_files : str or list, optional
Names of the one or more AEDTPLT files generated by AEDT. The default is ``None``.
variation_var : str, optional
Variable to vary. The default is ``"Time"``.
variation_list : list, optional
List of variation values. The default is ``[]``.
plot_label : str, optional
Label for the plot. The default is ``"Temperature"``.
model_color : str, optional
Color scheme for the 3D model. The default is ``"#8faf8f"``, which is silver.
export_gif : bool, optional
Whether to export the animation as a GIF file. The default is ``False``.
off_screen : bool, optional
The default is ``False``.
Returns
-------
str
Name of the GIF file.
"""
frame_per_seconds = 0.5
start = time.time()
if type(aedtplt_files) is str:
aedtplt_files = [aedtplt_files]
plot = pv.Plotter(notebook=False, off_screen=off_screen)
if not off_screen:
plot.enable_anti_aliasing()
plot.enable_fly_to_right_click()
lines = []
for file in aedtplt_files:
if ".aedtplt" in file:
with open(file, "r") as f:
drawing_found = False
for line in f:
if "$begin Drawing" in line:
drawing_found = True
l_tmp = []
continue
if "$end Drawing" in line:
lines.append(l_tmp)
drawing_found = False
continue
if drawing_found:
l_tmp.append(line)
continue
if "Number of drawing:" in line:
n_drawings = int(line[18:])
continue
elif ".obj" in file:
mesh = pv.read(file)
plot.add_mesh(mesh, show_scalar_bar=False, opacity=0.75, cmap=[model_color], name="3D Model",
show_edges=False, edge_color=model_color)
# def create_object_mesh(opacity):
# try:
# p.remove_actor("Volumes")
# except:
# pass
# p.add_mesh(mesh, show_scalar_bar=False, opacity=opacity, cmap=[model_color], name="3D Model",
# show_edges=False, edge_color=model_color)
# p.add_slider_widget(create_object_mesh, [0,1], style='modern', value=0.75,pointa=[0.81,0.98], pointb=[0.95,0.98],title="Opacity")
filename = os.path.splitext(aedtplt_files[0])[0]
print(filename)
surfs=[]
log = False
mins=1e12
maxs=-1e12
log = True
for drawing_lines in lines:
bounding = []
elements = []
nodes_list = []
solution = []
for l in drawing_lines:
if "BoundingBox(" in l:
bounding = l[l.find("(") + 1:-2].split(",")
bounding = [i.strip() for i in bounding]
if "Elements(" in l:
elements = l[l.find("(") + 1:-2].split(",")
elements = [int(i.strip()) for i in elements]
if "Nodes(" in l:
nodes_list = l[l.find("(") + 1:-2].split(",")
nodes_list = [float(i.strip()) for i in nodes_list]
if "ElemSolution(" in l:
# convert list of strings to list of floats
sols = l[l.find("(") + 1:-2].split(",")
sols = [is_float(value) for value in sols]
num_solution_per_element = int(sols[2])
sols = sols[3:]
sols = [sols[i:i + num_solution_per_element] for i in range(0, len(sols), num_solution_per_element)]
solution = [sum(i) / num_solution_per_element for i in sols]
nodes = [[nodes_list[i], nodes_list[i + 1], nodes_list[i + 2]] for i in range(0, len(nodes_list), 3)]
num_nodes = elements[0]
num_elements = elements[1]
elements = elements[2:]
element_type = elements[0]
num_nodes_per_element = elements[4]
hl = 5 # header length
elements_nodes = []
for i in range(0, len(elements), num_nodes_per_element + hl):
elements_nodes.append([elements[i + hl + n] for n in range(num_nodes_per_element)])
if solution:
take_all_nodes = True # solution case
else:
take_all_nodes = False # mesh case
trg_vertex = self._triangle_vertex(elements_nodes, num_nodes_per_element, take_all_nodes)
# remove duplicates
nodup_list = [list(i) for i in list(set([frozenset(t) for t in trg_vertex]))]
sols_vertex = []
if solution:
sv = {}
for els, s in zip(elements_nodes, solution):
for el in els:
if el in sv:
sv[el] = (sv[el] + s) / 2
else:
sv[el] = s
sols_vertex = [sv[v] for v in sorted(sv.keys())]
array = [[3] + [j - 1 for j in i] for i in nodup_list]
faces = np.hstack(array)
vertices = np.array(nodes)
surf = pv.PolyData(vertices, faces)
if sols_vertex:
temps = np.array(sols_vertex)
mean = np.mean(temps)
std = np.std(temps)
if np.min(temps) <=0:
log = False
surf.point_arrays[plot_label] = temps
if solution:
surfs.append(surf)
if np.min(temps)<mins:
mins=np.min(temps)
if np.max(temps) > maxs:
maxs = np.max(temps)
self._animating = True
gifname=None
if export_gif:
gifname= os.path.splitext(aedtplt_files[0])[0]+".gif"
plot.open_gif(gifname)
def q_callback():
"""exit when user wants to leave"""
self._animating = False
self._pause=False
def p_callback():
"""exit when user wants to leave"""
self._pause = not self._pause
plot.add_text('Press p for Play/Pause, Press q to exit ', font_size=8, position='upper_left')
plot.add_text(' ', font_size=10, position=[0, 0])
plot.add_key_event("q", q_callback)
plot.add_key_event("p", p_callback)
# run until q is pressed
plot.show_axes()
plot.show_grid()
cpos = plot.show(interactive=False, auto_close=False,
interactive_update=not off_screen)
sargs = dict(title_font_size=10, label_font_size=10,
shadow=True, n_labels=9, italic=True, fmt="%.1f",
font_family="arial")
plot.add_mesh(surfs[0], scalars=plot_label, log_scale=log,
scalar_bar_args=sargs, cmap='rainbow',
clim=[mins, maxs], show_edges=False,
pickable=True, smooth_shading=True,
name="FieldPlot")
plot.isometric_view()
start = time.time()
plot.update(1, force_redraw=True)
first_loop = True
if export_gif:
first_loop = True
plot.write_frame()
else:
first_loop = False
i=1
while self._animating:
if self._pause:
time.sleep(1)
plot.update(1, force_redraw=True)
continue
#p.remove_actor("FieldPlot")
if i >= len(surfs):
if off_screen:
break
i=0
first_loop = False
scalars = surfs[i].point_arrays[plot_label]
plot.update_scalars(scalars, render=False)
# p.add_mesh(surfs[i], scalars=plot_label, log_scale=log, scalar_bar_args=sargs, cmap='rainbow',
# show_edges=False, pickable=True, smooth_shading=True, name="FieldPlot")
plot.textActor.SetInput(variation_var + " = " + variation_list[i])
if not hasattr(plot, 'ren_win'):
break
#p.update(1, force_redraw=True)
time.sleep(max(0, frame_per_seconds - (time.time() - start)))
start = time.time()
if off_screen:
plot.render()
else:
plot.update(1, force_redraw=True)
if first_loop:
plot.write_frame()
time.sleep(0.2)
i+=1
plot.close()
for el in aedtplt_files:
os.remove(el)
return gifname
@aedt_exception_handler
def export_model_obj(self):
"""Export the model."""
assert self._parent._aedt_version >= "2021.2", self._messenger.add_error_message("Object is supported from AEDT 2021 R2.")
project_path = self._parent.project_path
obj_list = self._parent.modeler.primitives.object_names
obj_list = [i for i in obj_list if not self._parent.modeler.primitives.objects[
self._parent.modeler.primitives.get_obj_id(i)].is3d or (
self._parent.modeler.primitives.objects[
self._parent.modeler.primitives.get_obj_id(i)].material_name.lower() != "vacuum" and
self._parent.modeler.primitives.objects[
self._parent.modeler.primitives.get_obj_id(i)].material_name.lower() != "air")]
self._parent.modeler.oeditor.ExportModelMeshToFile(os.path.join(project_path, "Model.obj"),
obj_list)
return os.path.join(project_path, "Model.obj")
@aedt_exception_handler
def export_mesh_obj(self, setup_name=None, intrinsic_dict={}):
"""Export the mesh.
Parameters
----------
setup_name : str, optional
Name of the setup. The default is ``None``.
intrinsic_dict : dict, optipnal.
Intrinsic dictionary that is needed for the export.
The default is ``{}``.
Returns
-------
"""
project_path = self._parent.project_path
if not setup_name:
setup_name = self._parent.nominal_adaptive
face_lists = []
obj_list = self._parent.modeler.primitives.object_names
for el in obj_list:
obj_id = self._parent.modeler.primitives.get_obj_id(el)
if not self._parent.modeler.primitives.objects[obj_id].is3d or (
self._parent.modeler.primitives.objects[obj_id].material_name != "vacuum" and
self._parent.modeler.primitives.objects[obj_id].material_name != "air"):
face_lists += self._parent.modeler.primitives.get_object_faces(obj_id)
plot = self.create_fieldplot_surface(face_lists, "Mesh", setup_name, intrinsic_dict)
if plot:
file_to_add = self.export_field_plot(plot.name, project_path)
plot.delete()
return file_to_add
return None
@aedt_exception_handler
def plot_model_obj(self, export_afterplot=True, jupyter=False):
"""Plot the model.
Parameters
----------
export_afterplot : bool, optional
Whether to export the plot after it is generated. The
default is ``True``.
jupyter : bool, optional
Whether to generate the plot using Jupyter Notebook. The default is ``False``.
Returns
-------
list
List of plot files.
"""
assert self._parent._aedt_version >= "2021.2", self._messenger.add_error_message("Object is supported from AEDT 2021 R2.")
files = [self.export_model_obj()]
if export_afterplot:
imageformat='jpg'
else:
imageformat=None
file_list = self._plot_from_aedtplt(files, imageformat=imageformat, plot_label="3D Model", model_color="#8faf8f", show_model_edge=False, jupyter=jupyter)
return file_list
@aedt_exception_handler
def plot_field_from_fieldplot(self, plotname, project_path="", meshplot=False, setup_name=None,
intrinsic_dict={}, imageformat="jpg", view="iso", plot_label="Temperature", plot_folder=None, off_screen=False):
"""Export a field plot to an image file (JPG or PNG) using Python Plotly.
.. note::
The Plotly module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the field plot to export.
project_path : str, optional
Path for saving the image file. The default is ``""``.
meshplot : bool, optional
Whether to create and plot the mesh over the fields. The
default is ``False``.
setup_name : str, optional
Name of the setup or sweep to use for the export. The default is ``None``.
intrinsic_dict : dict, optional
Intrinsic dictionary that is needed for the export when ``meshplot="True"``.
The default is ``{}``.
imageformat : str, optional
Format of the image file. Options are ``"jpg"``,
``"png"``, ``"svg"``, and ``"webp"``. The default is
``"jpg"``.
view : str, optional
View to export. Options are ``"iso"``, ``"x"`` , ``"y"``,
``"z"``, and ``"all"``. The default is ``"iso"``. If
``"all"``, all views are exported.
plot_label : str, optional
Type of the plot. The default is ``"Temperature"``.
plot_folder : str, optional
Plot folder to update before exporting the
field. The default is ``None``, in which case all plot
folders are updated.
Returns
-------
type
List of exported files.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
start = time.time()
files_to_add = []
if not project_path:
project_path = self._parent.project_path
file_to_add = self.export_field_plot(plotname, project_path)
file_list=None
if not file_to_add:
return False
else:
files_to_add.append(file_to_add)
if meshplot:
if self._parent._aedt_version >= "2021.2":
files_to_add.append(self.export_model_obj())
else:
file_to_add = self.export_mesh_obj(setup_name, intrinsic_dict)
if file_to_add:
files_to_add.append(file_to_add)
file_list = self._plot_from_aedtplt(files_to_add, imageformat=imageformat, view=view,
plot_label=plot_label, off_screen=off_screen)
endt = time.time() - start
print("Field Generation, export and plot time: ", endt)
return file_list
@aedt_exception_handler
def animate_fields_from_aedtplt(self, plotname, plot_folder=None, meshplot=False, setup_name=None,
intrinsic_dict={}, variation_variable="Phi", variation_list=['0deg'],
project_path="", export_gif=False, off_screen=False):
"""Generate a field plot to an image file (JPG or PNG) using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
Parameters
----------
plotname : str
Name of the plot or the name of the object.
plot_folder : str, optional
Name of the folder in which the plot resides. The default
is ``None``.
setup_name : str, optional
Name of the setup (sweep) to use for the export. The
default is ``None``.
intrinsic_dict : dict, optional
Intrinsic dictionary that is needed for the export. The default
is ``{}``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, optional
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""``.
meshplot : bool, optional
The default is ``False``.
export_gif : bool, optional
The default is ``False``.
off_screen : bool, optional
Generate the animation without showing an interactive plot. The default is ``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not plot_folder:
self.ofieldsreporter.UpdateAllFieldsPlots()
else:
self.ofieldsreporter.UpdateQuantityFieldsPlots(plot_folder)
files_to_add = []
if meshplot:
if self._parent._aedt_version >= "2021.2":
files_to_add.append(self.export_model_obj())
else:
file_to_add = self.export_mesh_obj(setup_name, intrinsic_dict)
if file_to_add:
files_to_add.append(file_to_add)
for el in variation_list:
self._parent.odesign.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:FieldsPostProcessorTab",
[
"NAME:PropServers",
"FieldsReporter:"+plotname
],
[
"NAME:ChangedProps",
[
"NAME:"+variation_variable,
"Value:=" , el
]
]
]
])
files_to_add.append(self.export_field_plot(plotname, project_path,plotname+variation_variable+str(el)))
self._animation_from_aedtflt(files_to_add, variation_variable, variation_list, export_gif=export_gif, off_screen=off_screen)
return True
@aedt_exception_handler
def animate_fields_from_aedtplt_2(self, quantityname, object_list, plottype, meshplot=False, setup_name=None,
intrinsic_dict={}, variation_variable="Phi", variation_list=['0deg'],
project_path="", export_gif=False, off_screen=False):
"""Generate a field plot to an image file (JPG or PNG) using PyVista.
.. note::
The PyVista module rebuilds the mesh and the overlap fields on the mesh.
This method creates the plot and exports it. It is an alternative to the method :func:`animate_fields_from_aedtplt`,
which uses an existing plot.
Parameters
----------
quantityname : str
Name of the plot or the name of the object.
object_list : list, optional
Name of the ``folderplot`` folder.
plottype : str
Type of the plot. Options are ``"Surface"``, ``"Volume"``, and
``"CutPlane"``.
meshplot : bool, optional
The default is ``False``.
setup_name : str, optional
Name of the setup (sweep) to use for the export. The default is
``None``.
intrinsic_dict : dict, optional
Intrinsic dictionary that is needed for the export.
The default is ``{}``.
variation_variable : str, optional
Variable to vary. The default is ``"Phi"``.
variation_list : list, option
List of variation values with units. The default is
``["0deg"]``.
project_path : str, optional
Path for the export. The default is ``""``.
export_gif : bool, optional
Whether to export to a GIF file. The default is ``False``,
in which case the plot is exported to a JPG file.
off_screen : bool, optional
The default is ``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not project_path:
project_path = self._parent.project_path
files_to_add = []
if meshplot:
if self._parent._aedt_version >= "2021.2":
files_to_add.append(self.export_model_obj())
else:
file_to_add = self.export_mesh_obj(setup_name, intrinsic_dict)
if file_to_add:
files_to_add.append(file_to_add)
v = 0
for el in variation_list:
intrinsic_dict[variation_variable] = el
if plottype == "Surface":
plotf = self.create_fieldplot_surface(object_list, quantityname, setup_name, intrinsic_dict)
elif plottype == "Volume":
plotf = self.create_fieldplot_volume(object_list, quantityname, setup_name, intrinsic_dict)
else:
plotf = self.create_fieldplot_cutplane(object_list, quantityname, setup_name, intrinsic_dict)
if plotf:
file_to_add = self.export_field_plot(plotf.name, project_path, plotf.name + str(v))
if file_to_add:
files_to_add.append(file_to_add)
plotf.delete()
v += 1
return self._animation_from_aedtflt(files_to_add, variation_variable, variation_list, export_gif=export_gif, off_screen=off_screen)
@aedt_exception_handler
def far_field_plot(self, ff_data, x=0, y=0, qty='rETotal', dB=True, array_size=[4, 4]):
"""Generate a far field plot.
Parameters
----------
ff_data :
x : float, optional
The default is ``0``.
y : float, optional
The default is ``0``.
qty : str, optional
The default is ``"rETotal"``.
dB : bool, optional
The default is ``True``.
array_size : list
List for the array size. The default is ``[4, 4]``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
loc_offset = 2 # if array index is not starting at [1,1]
xphase = float(y)
yphase = float(x)
array_shape = (array_size[0], array_size[1])
weight = np.zeros(array_shape, dtype=complex)
mag = np.ones(array_shape, dtype='object')
port_names_arranged = np.chararray(array_shape)
all_ports = ff_data.keys()
w_dict = {}
# calculate weights based off of progressive phase shift
port_name = []
for m in range(array_shape[0]):
for n in range(array_shape[1]):
mag_val = mag[m][n]
ang = np.radians(xphase * m) + np.radians(yphase * n)
weight[m][n] = np.sqrt(mag_val) * np.exp(1j * ang)
current_index_str = '[' + str(m + 1 + loc_offset) + ',' + str(n + 1 + loc_offset) + ']'
port_name = [y for y in all_ports if current_index_str in y]
w_dict[port_name[0]] = weight[m][n]
length_of_ff_data = len(ff_data[port_name[0]][2])
array_shape = (len(w_dict), length_of_ff_data)
rEtheta_fields = np.zeros(array_shape, dtype=complex)
rEphi_fields =
|
np.zeros(array_shape, dtype=complex)
|
numpy.zeros
|
"""
The LT box contains an assortment of plotting, histogramming and fitting routines
used for data analysis. The plotting routines have been imported from the :ref:`plotting`
module and the fitting routines have been imported from the :ref:`LT_Fit`. For detailed
information about these functions check the :ref:`plotting` and the the :ref:`LT_Fit` documentation.
Normally all you should need to do is import the `box`_ and you have a *box of tools*.
Example::
>>> import LT.box as B
-------------------------------------------
Imported functions:
From LT.plotting:
* :meth:`~LT.plotting.plot_exp`: plot experimental data points with or without error bar
* :meth:`~LT.plotting.plot_line`: plot a line through a set of data points
* :meth:`~LT.plotting.plot_spline`: plot a spline curve through a set of data points
* dplot_exp: (:meth:`~LT.plotting.datafile_plot_exp`) is like plot_exp but accesses the datafile variables directly via their names
* dplot_line: (:meth:`~LT.plotting.datafile_plot_theory`) like plot_line for datafile variables
* dplot_spline: (:meth:`~LT.plotting.datafile_spline_plot_theory`) like plot_spline for datafiles variables
From LT_Fit:
* :class:`~LT_Fit.linear_fit.linefit`: fit a straight line through a set of data points
* :class:`~LT_Fit.linear_fit.polyfit`: fit a polynomial
* :class:`~LT_Fit.linear_fit.gen_linfit`: general linear fit
* :class:`~LT_Fit.gen_fit.genfit`: general, non-linear fit
-------------------------------------------
"""
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.colors import LogNorm
from matplotlib import ticker
import copy
import LT
# include the version with parameters
from .pdatafile import pdfile
from .plotting import plot_exp
from .plotting import plot_line
from .plotting import plot_spline
from .plotting import datafile_plot_exp as dplot_exp
from .plotting import datafile_plot_theory as dplot_line
from .plotting import datafile_spline_plot_theory as dplot_spline
from LT_Fit.linear_fit import linefit
from LT_Fit.linear_fit import polyfit
from LT_Fit.linear_fit import gen_linfit
from LT_Fit.parameters import *
# general fitting
from LT_Fit.gen_fit import genfit
# this is done for general linear fitting
# for MCA spectra
from . import MCA as mcsp
def get_file(file, **kwargs):
"""
Assume that B is the name of LT.box.
Open and read the file::
>>> md = B.get_file( 'file.data' )
"""
return pdfile(file, **kwargs)
def get_data(D, var):
"""
Assume that B is the name of LT.box.
Get all the values of variable 'my_var' as a :func:`numpy.array`::
>>> mv = B.get_data(md, 'my_var')
"""
return np.array(D.get_data(var))
# window cuts
def in_between(xmin, xmax, val):
"""
return a selection of the values in val
that satisfy:
xmin <= val <= xmax
Returns
-------
numpy array of True and False
"""
return (xmin <= val) & (val <= xmax)
# select data within a window
def select_data(xmin, a, xmax):
"""
Assume that B is the name of LT.box.
Find which values of an array A lie between 0.8 and 1.2
>>> iw = B.select_data( 0.8, A, 1.2)
iw now is an array of indices, A[iw] is an array with values
between 0.8 and 1.2
in general: B.select_data(xmin, a, xmax)
"""
return np.where( in_between(xmin, xmax, a) )
# Determine if a point is inside a given polygon or not
# Polygon is a list of (x,y) pairs. This function
# returns True or False. The algorithm is called
# the "Ray Casting Method".
#
# here the rays are cast horizontally to the right (toward positive x)
def point_in_poly(x,y,poly):
"""
x, y coordinates of a point to test if it lies inside a polygon
given by a list of (xv, yv) vertices
returns True if inside
"""
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n] # cycles to all possible values
# is y larger than any of the two min. y-values ?
if y > min(p1y,p2y):
# if so is y smaller than any of the two max. y-values
if y <= max(p1y,p2y):
# there is a potential that the point is inside
if x <= max(p1x,p2x):
if p1y != p2y:
# point not on a vertex calculate intersection with a horizontal line at y
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
# print ' i, p1x, p1y, p2x, p2y, x, y ', i, p1x, p1y, p2x, p2y, x, y, inside
p1x,p1y = p2x,p2y
return inside
def array_point_in_poly(r,poly):
"""
r arrays of points; array with shape (N,s), where N is the number of points
poly an array of vertices for the polygon with the shape (M,2), M number of polygon vertices
return a list of logical values indicating if the point is inside or outside the poly.
"""
inside = []
for rr in r:
inside.append( point_in_poly(rr[0], rr[1], poly) )
return np.array(inside)
## Test
# polygon = [(-5,5),(5,5),(5,-5),(-5,-5)]
# point_x = -4
# point_y = -3
## Call the function with the points and the polygon
# print point_in_poly(point_x,point_y,polygon)
# histogram class exception
class histoError(Exception):
def __init__(self, comment, value):
self.comment = comment
self.value = value
def __str__(self):
return self.comment + repr(self.value)
# 1d -histogram class
#
# fitting of a gaussian on a quadratic back ground is built in
#
class histo:
"""
Define a histogram based on the np.histogram class.
The various ways of defining one are:
* If *a* is a 1D ( :func:`numpy.array`) containing the data to be histogrammed
>>> h = histo( a )
* If *his* is the output of the :func:`numpy.histogram` function
>>> h = histo(histogram = his)
* If ``bc`` is a 1D array with bin center values, and ``bcont``
contains bin content values then:
>>> h = histo(bin_center = bc, bin_content = bcont)
* A filename for a stored histogram is given
>>> h = histo(filename), where filename contains the pdatafile
Usually the result of a histo.save operation
Important keywords:
============ =====================================================
Keyword Meaning
============ =====================================================
values Array of values to be histogrammed (:func:`numpy.array`)
range Lower and upper limits of binning ( e.g. ``range = (10.,20.)`` )
bins Number of bins
histogram Result of :func:`numpy.histogram` function
bin_error Array of errors for each bin content (:func:`numpy.array`)
bin_center Array of bin-center values (:func:`numpy.array`)
bin_content Array of bin-content values (:func:`numpy.array`)
file Load data from file
window Set a window (a zoom window)
title Set the title
xlabel Set the x-label
ylabel Set the y-label
============ =====================================================
Additional keyword arguments are passed to the :func:`numpy.histogram` function
"""
def __init__(self,\
values = None, \
range = None, \
bins = None, \
histogram = None, \
bin_error = None, \
bin_center = None, \
bin_content = None, \
file = None, \
window = None, \
title = 'my histogram', \
xlabel = 'x-bin', \
ylabel = 'content', \
**kwargs):
self.res = None
self.fit_dict = {}
# initialize fitting
self.b0 = Parameter(0., 'b0')
self.b1 = Parameter(0., 'b1')
self.b2 = Parameter(0., 'b2')
self.mean = Parameter(1.,'mean')
self.sigma = Parameter(1., 'sigma')
self.A = Parameter(1., 'A')
# create a dictionary for vairable fitting
self.fit_par = {
"b0" : self.b0, \
"b1" : self.b1, \
"b2" : self.b2, \
"mean" : self.mean, \
"sigma": self.sigma, \
"A" : self.A}
# setup fitting list
self.set_fit_list()
self.window_set = False
if values is not None:
# values have been given for filling
if (range is None) and (bins is None):
self.fill(values, **kwargs)
elif (range is not None) and (bins is None):
self.fill(values, range = range, **kwargs)
elif (range is None) and (bins is not None):
self.fill(values, bins = bins, **kwargs)
else:
self.fill(values, bins = bins, range = range, **kwargs)
elif file is not None:
# create from file
self.load(file)
return
elif histogram is not None:
# the output of the numpy histogram function has been given
self.res = histogram
elif (bin_center is not None) and (bin_content is not None):
# the histogram content is given direectly
self.bin_center = np.copy(bin_center)
self.bin_content = np.copy(bin_content)
self.bin_width = np.diff(self.bin_center)[0]
self.__get_histogram()
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.__setup_bins(error = bin_error)
self.nbins = self.bin_center.shape[0]
if window is None:
self.clear_window()
else:
self.set_window( xmin = window[0], xmax = window[1])
def save_index(self, i):
# make sure i is always within the allowed range
return min(len(self.bin_content)-1, i)
def fill(self, y, add = False, **kwargs):
"""
Fill the histogram with the values stored in the :func:`numpy.array` y.
============ =====================================================
Keyword Meaning
============ =====================================================
add if True add the results to the existing content
============ =====================================================
Additional keyword arguments are passed to the :func:`numpy.histogram` function
"""
if not add:
# a new filling
try:
self.res = np.histogram(y, new = None, **kwargs)
except:
self.res = np.histogram(y, **kwargs)
self.__setup_bins(error = None)
else:
# the bins have already been defined continue
# to accumulate statistics
if self.res is None:
print("no binning information: try fill with add = False ")
return
try:
res = np.histogram(y, new = True, bins = self.res[1], **kwargs)
except:
res = np.histogram(y, bins = self.res[1], **kwargs)
# add the new bin content to the old one
self.res = (self.res[0] + res[0], self.res[1])
# update the histogram information
self.__setup_bins(error = None)
# end of fill
def clear(self):
"""
Set the content and errors to 0.
"""
self.bin_content = np.zeros_like(self.bin_content)
self.bin_error = np.zeros_like(self.bin_content)
self.res = (np.zeros_like(self.res[0]), self.res[1])
self.__prepare_histo_plot()
def sum(self, xmin = None, xmax = None):
"""
Return the sum of all bins. If the limits are given, calculate the sum of all bins between the bins that contain
the values xmin and xmax.
Example::
>>> s0 = h.sum() # add all bins
>>> s1 = h.sum(0.5, 1.1) # add the bins between 0.5 and 1.1
>>> s2 = h.sum(xmin = 0.5, xmax = 1.1) # add the bins between 0.5 and 1.1
============ =====================================================
Keyword Meaning
============ =====================================================
xmin lower limit of sum of bin content
xmax upper limit of sum
============ =====================================================
The errors are also calculated.
"""
if (xmin is None) and (xmax is None):
sum = self.bin_content.sum()
sum_err = np.sqrt( (self.bin_error**2).sum())
elif (xmin is None):
sel = (self.bin_center <= xmax)
sum = self.bin_content[sel].sum()
sum_err = np.sqrt( (self.bin_error[sel]**2).sum())
elif (xmax is None):
sel = (xmin <= self.bin_center)
sum = (self.bin_content[sel]).sum()
sum_err = np.sqrt( (self.bin_error[sel]**2).sum())
else:
sel = (xmin <= self.bin_center) & (self.bin_center <= xmax)
sum = (self.bin_content[sel]).sum()
sum_err = np.sqrt( (self.bin_error[sel]**2).sum())
return (sum, sum_err)
def copy(self):
"""
Create a copy of the histogram::
>>>hc = h.copy()
Only the histogram values are copied, no lables and titles etc.
"""
res = (np.copy(self.res[0]), np.copy(self.res[1]) )
err = np.copy( self.bin_error )
return histo(histogram = res, bin_error = err)
def rebin(self, n, scale = False, use_mean = False, replace = False):
"""
rebin the histogram by a factor n::
>>>hc = h.rebin(2)
============ =====================================================
Keyword Meaning
============ =====================================================
scale True: the original bin number is not a multiple of n
and the last bin content will be scaled
use_mean True: the new bin content is the mean of the bin_content
replace True: replace the current histogram with the
rebinned version
============ =====================================================
"""
n_bins = self.bin_center.shape[0]
divisible = (np.mod(n_bins, n) != 0)
# change bin content
bco_sl, bco_mean_sl, sl, fact_sl = self._rebin_array(self.bin_content, n)
be2_sl, mean_sl, sl, fact_sl = self._rebin_array(self.bin_error**2, n)
sum_sl, bc_sl, sl, fact_sl = self._rebin_array(self.bin_center, n)
# adjust the bin center of last bin if necessary
if not divisible:
bc_sl[-1] = bc_sl[-2]+ np.diff(bc_sl)[0]
if scale:
bco_sl *= fact_sl
# store new histogram parameters and update histogram
if not use_mean:
bin_content = bco_sl
bin_error = np.sqrt(be2_sl)
else:
bin_content = bco_mean_sl
s_i, n_p = self._sl_indices(sl, n_bins) # n_p number of points per slice
b_e = np.sqrt(be2_sl)/np.array(n_p)
bin_error = b_e
bin_center = bc_sl
if replace:
self.bin_content = np.copy(bin_content)
self.bin_error = np.copy(bin_error)
self.bin_center = np.copy(bin_center)
self.__get_histogram()
# prepare for plotting
self.__prepare_histo_plot()
else:
# return a new histogram
hn = self.__new_histogram(bin_content, bin_center, bin_error)
hn.xlabel = self.xlabel
hn.ylabel = self.ylabel
hn.title = self.title
return hn
def plot(self,filled = 'True', ymin = 0., axes = None, ignore_zeros = False, **kwargs):
"""
Plot the histogram content:
============ =====================================================
Keyword Meaning
============ =====================================================
filled if True draw a filled histogram
ymin lower limit where fill starts (horizontal line)
ignore_zeros do not plot channels with bin content 0 (default = False)
============ =====================================================
"""
if axes is None:
axes = pl.gca()
if ymin is None:
ymin = self.cont_min
# prepare histo plot if axes have changed
self.__prepare_histo_plot()
if filled :
xx = self.xpl
yy = self.ypl
if ignore_zeros:
sel = yy != 0.
else:
sel = np.ones_like(yy).astype('bool')
axes.fill_between( xx[sel], yy[sel], y2=ymin, **kwargs)
else:
xx = np.concatenate([self.xpl[:1], self.xpl, self.xpl[-1:]])
yy = np.concatenate([np.array([ymin]), self.ypl, np.array([ymin])])
if ignore_zeros:
sel = yy != 0.
else:
sel = np.ones_like(yy).astype('bool')
axes.plot(xx[sel],yy[sel], **kwargs)
if self.window_set:
axes.set_xlim( (self.win_min, self.win_max) )
# prepare y scale
sel = (self.win_min <= self.xpl) & (self.xpl <= self.win_max)
ymin = self.ypl[sel].min()
ymax = self.ypl[sel].max()
axes.set_ylim( (ymin, ymax) )
axes.set_xlabel(self.xlabel)
axes.set_ylabel(self.ylabel)
axes.set_title(self.title)
def set_window(self, xmin = None, xmax = None):
"""
Define a window into the histogram. This is similar to a zoom or a
region of interest (ROI)
============ =====================================================
Keyword Meaning
============ =====================================================
xmin lower limit for plotting or fitting
xmax upper limit
============ =====================================================
"""
# a call to __setup_bins MUST preced usage of this call
self.window_set = True
if xmin is None:
self.win_min = self.xpl.min()
else:
self.win_min = xmin
if xmax is None:
self.win_max = self.xpl.max()
else:
self.win_max = xmax
return
def set_window_view(self):
"""
Like set_windows but uses the current display limits. This is only
useful if the histogram has been plotted.
"""
xmin,xmax = pl.xlim()
self.set_window(xmin,xmax)
def clear_window(self):
"""
Reset (Clear) the defined window
"""
# a call to __setup_bins MUST preced usage of this call
self.window_set = False
self.win_min = self.xpl.min()
self.win_max = self.xpl.max()
def plot_exp(self, ignore_zeros = False, **kwargs):
"""
Plot histogram content and errors like experimental data.
============ =====================================================
Keyword Meaning
============ =====================================================
ignore_zeros do not plot channels with bin content 0 (default = False)
============ =====================================================
"""
xx = self.bin_center
yy = self.bin_content
dyy = self.bin_error
if ignore_zeros:
sel = yy != 0.
else:
sel = np.ones_like(yy).astype('bool')
plot_exp(xx[sel], yy[sel], dyy[sel],\
x_label = self.xlabel, \
y_label = self.ylabel, \
plot_title = self.title, \
**kwargs)
def save(self, filename = 'histo.data'):
"""
Save the histogram in :mod:`~LT.pdatafile` format
"""
of = open(filename, 'w')
of.write('#\ title = %s\n'%(self.title))
of.write('#\ xlabel = %s\n'%(self.xlabel))
of.write('#\ ylabel = %s\n'%(self.ylabel))
# now write the current fit parameters
for key in self.fit_par:
name = key + ' = %r'
err_name = '; d_'+name
fmt = '#\ '+name+err_name+'\n'
l = fmt%( self.fit_par[key].value, self.fit_par[key].err)
of.write(l)
of.write('# \n')
of.write('#! bin_center[f, 0]/ bin_content[f,1]/ bin_error[f, 2]/ \n')
for i,bc in enumerate(self.bin_center):
of.write ("%r %r %r \n"%( bc, self.bin_content[i], self.bin_error[i]) )
of.close()
def load(self, file='histo.data'):
"""
read the histogram data from :mod:`~LT.pdatafile`
If the file does not result from a save function make sure that
all the necessary data are present.
"""
data = get_file(file)
# first the data
self.bin_center = np.array(data.get_data('bin_center') )
self.bin_content = np.array(data.get_data('bin_content') )
self.bin_error = np.array(data.get_data('bin_error') )
# now the parameters
self.title = data.par.get_value('title', str)
self.xlabel = data.par.get_value('xlabel', str)
self.ylabel = data.par.get_value('ylabel', str)
# now the fit parameters
for key in self.fit_par:
name = key
dname = 'd_'+key
self.fit_par[key].set(data.par.get_value(name, float), \
err = data.par.get_value(dname, float))
self.bin_width = np.diff(self.bin_center)[0]
self.__get_histogram()
self.bins = self.res[1]
self.__prepare_histo_plot()
# plot the fit
x = np.linspace(self.bins[0], self.bins[-1:][0], 100)
self.fit_dict['xpl'] = x
self.fit_dict['ypl'] = self.fit_func(x)
def find_bin(self, x):
"""
Find the bin that would contain the value x
"""
# self.bins contains the bin edged
if (x < self.bins[0]):
print('searched value {0} < lowest bin = {1} '.format(x, self.bins[0]))
return 0
elif (x > self.bins[-1:][0]):
print('searched value {0} > highest bin = {1} '.format(x, self.bins[-1:][0]))
return len(self.bins) - 1
elif (x == self.bins[0]):
return 0
else:
return (np.searchsorted(self.bins, x) - 1 )
def set_fit_list(self, fit = [ 'A', 'mean', 'sigma'] ):
"""
Define which parameters are to be fitted.
The default list is ::
fit = [ 'A', 'mean', 'sigma']
to use all parameters::
h.set_fit_list( fit = [ 'A', 'mean', 'sigma', 'b0', 'b1', 'b2'])
"""
if fit==[]:
# empty list show possibilities
print('possible fit parameters:')
print(list(self.fit_par.keys()))
return
self.fit_names = fit
self.fit_list = []
for key in self.fit_names:
try:
curr_par_name = self.fit_par[key]
except:
print('cannot use parameter :', key, ' (does not exist ?!)')
continue
self.fit_list.append(curr_par_name)
# end of fitting list
def show_fit_list(self):
"""
Show the current fit list
Returns
-------
None.
"""
print("\nCurrent fit list : ", [k.name for k in self.fit_list])
print("\nAvailable parameters: [ 'A', 'mean', 'sigma', 'b0', 'b1', 'b2']")
def fit(self, xmin = None, xmax = None, init = True, ignore_zeros = True, **kwargs):
"""
Fit a gaussian on a quadratic background. You can also just
fit a background or just a gaussian. All this is controlled by which
parameters you want to fit. Another important part of non-linear
fitting is that you need to provide reasonable guesses for the fit
parameters. The parameters in :class:`~LT.box.histo` are not just
numbers but objects with their own properties and functions (see
:class:`~LT_Fit.parameters.Parameter` ). The full fit function is as
follows:
:math:`$ f(x) = b_0 + b_1x + b_2x^2 + A exp(-(x - \mu)^2/\sigma^2)$`
The (:class:`LT.box.histo`) parameters are:
=================== ================================================
Parameter Histo Class Member
=================== ================================================
:math:`$b_o $` b0
:math:`$b_1 $` b1
:math:`$b_2 $` b2
:math:`$A $` A
:math:`$\mu $` mean
:math:`$\sigma $` sigma
=================== ================================================
Which parameters are fitted is defined in :meth:`~LT.box.histo.set_fit_list`
Keyword arguments are:
============ =====================================================
Keyword Meaning
============ =====================================================
xmin lower fit limit
xmax upper fit limit
init True/False (default = True) estimate initial fit parameters automatically
ignore_zeros True/False (default = True) ignore channels with bin content zero
kwargs additional keywords are passed to gen_fit (use only if you know what you are doing!)
============ =====================================================
"""
# is there a range given, or is a window set
sel_all = np.ones_like(self.bin_center, dtype = 'bool')
if init:
self.init_gauss(xmin, xmax)
if (xmin is None) and (xmax is None):
# check if a window is set
if self.window_set:
# if so use the set window limits
sel = (self.win_min <= self.bin_center) & (self.bin_center <= self.win_max)
self.fit_indx, = np.where ( sel )
else:
# if not use all data
self.fit_indx, = np.where(sel_all)
elif (xmin is None):
sel = (self.bin_center <= xmax)
if self.window_set:
# if so check which is smaller
sel_w = (self.bin_center <= self.win_max) & sel
self.fit_indx, = np.where(sel_w)
else:
self.fit_indx, = np.where(sel)
elif (xmax is None):
sel = (xmin <= self.bin_center)
if self.window_set:
# if so check which is larger
sel_w = (self.win_min <= self.bin_center) & sel
self.fit_indx, = np.where(sel_w)
else:
self.fit_indx, = np.where(sel)
else:
sel = (xmin <= self.bin_center) & ( self.bin_center <= xmax)
if self.window_set:
# if so check the set window limits
sel_w = (self.win_min <= self.bin_center) & ( self.bin_center <= self.win_max) & sel
# use the tighter limits
self.fit_indx, = np.where(sel_w)
else:
self.fit_indx, = np.where(sel)
# set minimal error to 1
is_zero = np.where(self.bin_error == 0.)
self.bin_error[is_zero] = 1.
# do the fit
if ignore_zeros:
# ignore bins with content of 0
sel = self.bin_content[self.fit_indx] != 0.
bin_content = self.bin_content[self.fit_indx][sel]
bin_center = self.bin_center[self.fit_indx][sel]
bin_error = self.bin_error[self.fit_indx][sel]
else:
bin_content = self.bin_content[self.fit_indx]
bin_center = self.bin_center[self.fit_indx]
bin_error = self.bin_error[self.fit_indx]
# do the fit using the new version
self.F = genfit( self.fit_func, self.fit_list, \
x = bin_center, \
y = bin_content, \
y_err = bin_error, \
full_output=1, \
ftol = 0.001, \
print_results = False, **kwargs)
self.fit_dict = self.F.stat
self.fit_dict['xpl'] = self.F.xpl
self.fit_dict['ypl'] = self.F.ypl
# get the covariance matrix
if self.fit_dict == {}:
print("Problem with fit: no result, check parameters !")
return
self.cov = self.F.covar
self.chi2_red = self.F.chi2_red
self.chi2 = self.F.chi2
self.CL = self.F.CL
# print the result
print('----------------------------------------------------------------------')
print('Fit results:')
for key in self.fit_names:
print(key, ' = ', self.fit_par[key].value,' +/- ', self.fit_par[key].err)
print('Chi square = ', self.F.chi2)
print('Chi sq./DoF = ', self.F.chi2_red)
print('----------------------------------------------------------------------')
self.calc_fit_plot()
def fit_view(self, init = True):
"""
Fit histogram using the current display limits as fit range. This is only
useful if the histogram has been plotted.
"""
xmin,xmax = pl.xlim()
self.fit(xmin,xmax, init = init)
def init_parameters(self):
"""
Reset fit parameters to their default values
Returns
-------
None.
"""
self.b0.set(0.)
self.b1.set(0.)
self.b2.set(0.)
self.mean.set(0.)
self.sigma.set(1.)
self.A.set(1.)
def init_gauss(self, xmin = None, xmax = None):
"""
Calculate the initial parameter guess for a gaussian. These parameters
can them be used in the call to :class:`~LT.box.histo.fit`
"""
# is there a range given, or is a window set
sel_all = np.ones_like(self.bin_center, dtype = 'bool')
if (xmin is None) and (xmax is None):
# check if a window is set
if self.window_set:
# if so use the set window limits
sel = (self.win_min <= self.bin_center) & (self.bin_center <= self.win_max)
self.fit_indx, = np.where ( sel )
else:
# if not use all data
self.fit_indx, = np.where(sel_all)
elif (xmin is None):
sel = (self.bin_center <= xmax)
if self.window_set:
# if so check which is smaller
sel_w = (self.bin_center <= self.win_max) & sel
self.fit_indx, = np.where(sel_w)
else:
self.fit_indx, = np.where(sel)
elif (xmax is None):
sel = (xmin <= self.bin_center)
if self.window_set:
# if so check which is larger
sel_w = (self.win_min <= self.bin_center) & sel
self.fit_indx, = np.where(sel_w)
else:
self.fit_indx, = np.where(sel)
else:
sel = (xmin <= self.bin_center) & ( self.bin_center <= xmax)
if self.window_set:
# if so check the set window limits
sel_w = (self.win_min <= self.bin_center) & ( self.bin_center <= self.win_max) & sel
# use the tighter limits
self.fit_indx, = np.where(sel_w)
else:
self.fit_indx, = np.where(sel)
# set minimal error to 1
is_zero = np.where(self.bin_error == 0.)
self.bin_error[is_zero] = 1.
# do the fit
bin_content = self.bin_content[self.fit_indx]
bin_center = self.bin_center[self.fit_indx]
# bin_error = self.bin_error[self.fit_indx]
# calculate initial parameters
mean = np.sum(bin_center*bin_content)/bin_content.sum()
sigma = np.sqrt( np.sum(bin_content*(bin_center - mean)**2)/bin_content.sum())
A = bin_content.max()
# store the parameters
self.A.set(A)
self.mean.set(mean)
self.sigma.set(sigma)
def calc_fit_plot(self):
# plot the fit
imax = min(len(self.bins)-1, self.fit_indx[-1:][0] + 1)
xmin = self.bins[self.fit_indx][0]
xmax = self.bins[imax]
x = np.linspace(xmin, xmax, 100)
self.fit_dict['xpl'] = x
self.fit_dict['ypl'] = self.fit_func(x)
def plot_fit(self, color = 'r', axes = None, **kwargs):
"""
Plot the fitted curve
============ =====================================================
Keyword Meaning
============ =====================================================
color color of the fitted line
============ =====================================================
"""
if axes is None:
axes = pl.gca()
if self.fit_dict == {}:
print('no fit, nothing to plot !')
else:
plot_line(self.fit_dict['xpl'], self.fit_dict['ypl'], color = color, axes = axes, **kwargs)
if self.window_set:
axes.set_xlim( (self.win_min, self.win_max) )
def fit_func(self, x):
"""
The function fitted to the histogram data
"""
fit_val = (self.b2()*x + self.b1())*x + self.b0() + \
self.A()*np.exp(-0.5*((x-self.mean())/self.sigma() )**2)
return fit_val
def apply_calibration(self, cal):
"""
apply x-axis calibration, new axis values are cal(xaxis)
"""
self.bin_center = cal(self.bin_center)
self.bin_width = np.diff(self.bin_center)[0]
self.bins = cal(self.bins)
# prepare histo plot if axes have changed
self.__prepare_histo_plot()
# private functions
def __setup_bins(self, error = None ):
self.bin_width = np.diff(self.res[1])[0]
self.bin_center = self.res[1][:-1] + self.bin_width/2.
self.bin_content = self.res[0]
self.bins = self.res[1]
if error is None:
self.bin_error = np.sqrt(self.bin_content)
else:
self.bin_error = error
self.__prepare_histo_plot()
def __get_histogram(self):
# create the histogram arrays from bin_width, bin_center, bin_content and bin_error
res1 = np.concatenate( [self.bin_center - self.bin_width/2., self.bin_center[-1:] + self.bin_width/2.])
res0 = self.bin_content
self.res = ([res0,res1])
def __new_histogram(self, b_content, b_center, b_error):
# setup new np historam parameters using content, center abd error arrays
# same as get_histogram
# return a new histogram with these parameters
b_width = np.diff(b_center)[0]
res1 = np.concatenate( [b_center - b_width/2., b_center[-1:] + b_width/2.])
res0 = b_content
res = ([res0,res1])
return histo(histogram = res, bin_error = b_error, window = (self.win_min, self.win_max))
def __prepare_histo_plot(self):
# prepare data for plotting as steps
self.cont_min = self.res[0].min()
iv = self.bin_width / 2.
self.xpl = np.array(list(zip( self.bin_center - iv, self.bin_center + iv))).ravel()
self.ypl = np.array(list(zip( self.bin_content, self.bin_content))).ravel()
def _sl_indices(self, sla, Ni):
# sla arrayt of slices
# Ni length of array that the slices are applied to
# get indices for slices and number of elements per slice
si = [sll.indices(Ni) for sll in sla]
np = [x[1] - x[0] for x in si]
# list of index tuples : start,stop, step
# number of corresonding elements
return si, np
def _rebin_array(self, x, n):
# rebin 1d array, useful for histograms
# start array for slices
i_s = np.arange(0, x.shape[0]+n, n, dtype=int)
# end array for slices
# i_e = np.roll(i_s, -1)
i_e = np.arange(n, x.shape[0]+2*n, n, dtype=int)
# create the slices
slices = [ slice(s, e) for s, e in zip(i_s,i_e)]
# sum over the slices
sum_sl = np.array([np.sum(x[sl]) for sl in slices[:-1]])
# mean value of the slices
mean_sl = np.array([np.mean(x[sl]) for sl in slices[:-1]])
# factor to correct the sum for slices that are shorter than n
fact = np.array([np.float(n)/len(x[sl]) for sl in slices[:-1]])
# return the values
return sum_sl, mean_sl, slices[:-1], fact
def __add__(self, v):
if np.isscalar(v):
res0 = self.bin_content + v
err = self.bin_error
res1 = np.copy(self.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
# add 2 histograms and take care of the errors
elif np.array_equal(self.bin_center.shape, v.bin_center.shape):
# this is the content
res0 = self.res[0] + v.res[0]
err = np.sqrt( self.bin_error**2 + v.bin_error**2)
res1 = np.copy(v.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
else:
print('bin centers do not match-> cannot add, sorry !')
return None
def __sub__(self, v):
# subtract 2 histograms and take care of the errors
if np.isscalar(v):
res0 = self.bin_content - v
err = self.bin_error
res1 = np.copy(self.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
elif np.array_equal(self.bin_center.shape, v.bin_center.shape):
res0 = self.res[0] - v.res[0]
err = np.sqrt( self.bin_error**2 + v.bin_error**2)
res1 = np.copy(v.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
else:
print('bin centers do not match-> cannot subtract, sorry !')
return None
def __mul__(self, v):
# scale a histogram multiply from left
if np.isscalar(v):
res0 = v*self.bin_content
err = v*self.bin_error
res1 = np.copy(self.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
elif np.array_equal(self.bin_center.shape, v.bin_center.shape):
res0 = self.res[0] * v.res[0]
err = np.sqrt(self.bin_error**2 + v.bin_error**2)
res1 = np.copy(self.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
else:
print('bin centers do not match-> cannot multiply, sorry !')
return None
def __truediv__(self, v):
# divide 2 histograms and take care of the errors
# check the shapes
if np.isscalar(v):
res0 = self.res[0]/v
err = self.bin_error/v
res1 = np.copy(self.res[1])
res = ([res0, res1])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
elif np.array_equal(self.bin_content.shape, v.bin_content.shape):
# this is the content
res0 = self.bin_content / v.bin_content
f1 = 1./v.bin_content
f2 = res0/v.bin_content
err = np.sqrt( (f1*self.bin_error)**2 + (f2*v.bin_error)**2)
res1 = np.copy(v.res[1])
res = ([res0, res1])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
else:
print('bin centers do not match-> cannot divide, sorry !')
return None
def __rmul__(self, c):
# scale a histogram multiply from right
res0 = c*self.res[0]
err = c*self.bin_error
res1 = np.copy(self.res[1])
res = ([res0, res1 ])
return histo(histogram = res, bin_error = err, window = (self.win_min, self.win_max))
# end of 1d-histo class
# 2d histogram class
class histo2d:
"""
Define a 2d histogram based on the np.histogram2d class.
The various ways of defining one are:
* If *xv* is a 1D ( :func:`numpy.array`) containing the x-value data to be histogrammed
and *yx* is a 1D array containing the y-value data :
>>> h2 = histo2d( xv, yv )
* If *his2* is the output of the :func:`numpy.histogram2d` function
>>> h2 = histo2d(histogram = his2)
* If ``xbc`` is an array with x-bin center values, ``ybc`` is an array with y-bin center values
and ``bcont`` contains bin content values then use:
>>> h2 = histo2d(x_bin_center = xbc, y_bin_center = ybc, bin_content = bcont)
* A filename for a stored histogram is given
>>> h2 = histo2d(filename), where filename contains the pdatafile
Usually the result of a histo2d.save operation
Important keywords:
============ =====================================================
Keyword Meaning
============ =====================================================
xvalues Array of x-values to be histogrammed (1d-:func:`numpy.array`)
yvalues Array of y-values to be histogrammed (1d-:func:`numpy.array`)
range Lower and upper limits of binning ( e.g. ``range = (10.,20.)`` )
bins Number of bins, or [binsx, binsy]
histogram Result of :func:`numpy.histogram2d` function
x_bin_center 1d-Array of x - bin-center values (:func:`numpy.array`)
y_bin_center 1d-Array of y - bin-center values (:func:`numpy.array`)
bin_content 2d-Array of bin-content values (:func:`numpy.array`)
bin_error 2d-Array of errors for each bin content (:func:`numpy.array`)
file Load data from file
title Set the title
xlabel Set the x-label
ylabel Set the y-label
zlabel Set the z-label
colorbar if True, plot a colorbar
bad_color Set the color for plot for bins below zmin (default: w)
logz if True plot content on log scale
============ =====================================================
Additional keyword arguments are passed to the :func:`numpy.histogram2d` function
"""
def __init__(self,\
x_values = None, \
y_values = None, \
range = None, \
bins = None, \
histogram = None, \
bin_error = None, \
bin_content = None, \
x_bin_center = None, \
y_bin_center = None, \
file = None, \
title = 'my histogram', \
xlabel = 'x-bin', \
ylabel = 'y-bin', \
zlabel = 'content',\
bad_color = 'w',\
colorbar = True, \
logz = False,\
**kwargs):
self.bad_color = bad_color # color for bad pixels
self.colorbar = colorbar
self.logz = logz
# initialize fitting
if (x_values is not None) and (y_values is not None):
# values have been given for filling
if (range is None) and (bins is None):
self.fill(x_values, y_values, **kwargs)
elif (range is not None) and (bins is None):
self.fill(x_values, y_values, range = range, **kwargs)
elif (range is None) and (bins is not None):
self.fill(x_values, y_values, bins = bins, **kwargs)
else:
self.fill(x_values, y_values, bins = bins, range = range, **kwargs)
elif file is not None:
# create from file
self.load(file)
return
elif histogram is not None:
# the output of the numpy histogram function has been given
self.res = histogram
elif (x_bin_center is not None) and (y_bin_center is not None) and (bin_content is not None):
# the histogram content is given direectly
self.x_bin_center = np.copy(x_bin_center)
self.y_bin_center = np.copy(y_bin_center)
self.bin_content = np.copy(bin_content)
if(bin_error is not None):
self.bin_error = bin_error
self.__setup_histogram()
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.zlabel = zlabel
self.__setup_bins(error = bin_error)
self.nbins_x = self.x_bin_center.shape[0]
self.nbins_y = self.y_bin_center.shape[0]
def set_nans(self, value = 0., err_value = 1.):
"""
replace nans by specified values
============ =====================================================
Keyword Meaning
============ =====================================================
value replaces the nan in bin_content (default 0)
err_value replaces the corresponding bin_error (default 1)
============ =====================================================
Returns
-------
None.
"""
sel = np.isnan(self.bin_content)
self.bin_content[sel] = value
self.bin_error[sel] = err_value
def fill(self, x, y, add = False, **kwargs):
"""
Fill the histogram with the values stored in the :func:`numpy.array` y.
============ =====================================================
Keyword Meaning
============ =====================================================
add if True add the results to the existing content
============ =====================================================
Additional keyword arguments are passed to the :func:`numpy.histogram` function
"""
if not add:
# a new filling
try:
self.res = np.histogram2d(x, y, new = None, **kwargs)
except:
self.res = np.histogram2d(x, y, **kwargs)
self.__setup_bins(error = None)
else:
# the bins have already been defined continue
# to accumulate statistics
if self.res is None:
print("no binning information: try fill with add = False ")
return
res = np.histogram2d(x, y, bins = [self.x_bins,self.y_bins], **kwargs)
# add the new bin content to the old one
self.res = (self.res[0] + res[0], self.res[1], self.res[2])
# update the histogram information
self.__setup_bins(error = None)
# end of fill
def clear(self):
"""
Set the content and errors to 0.
"""
self.bin_content = np.zeros_like(self.bin_content)
self.bin_error = np.zeros_like(self.bin_content)
self.res = (np.zeros_like(self.res[0]), self.res[1], self.res[2])
self.x_bin_width = np.zeros_like(self.x_bin_width)
self.y_bin_width =
|
np.zeros_like(self.y_bin_width)
|
numpy.zeros_like
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVR
from functools import reduce
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
import requests
import datetime as dt
from datetime import timedelta
import calendar
import time
from scipy.special import binom
import urllib.request, json
import os
import http.client
import tensorflow as tf
class DataGeneratorSeq(object):
def __init__(self,prices,batch_size,num_unroll):
self._prices = prices
self._prices_length = len(self._prices) - num_unroll
self._batch_size = batch_size
self._num_unroll = num_unroll
self._segments = self._prices_length // self._batch_size
self._cursor = [offset * self._segments for offset in range(self._batch_size)]
def next_batch(self):
batch_data = np.zeros((self._batch_size),dtype=np.float32)
batch_labels = np.zeros((self._batch_size),dtype=np.float32)
for b in range(self._batch_size):
if self._cursor[b]+1>=self._prices_length:
self._cursor[b] = np.random.randint(0,(b+1)*self._segments)
batch_data[b] = self._prices[self._cursor[b]]
batch_labels[b]= self._prices[self._cursor[b]+np.random.randint(0,5)]
self._cursor[b] = (self._cursor[b]+1)%self._prices_length
return batch_data,batch_labels
def unroll_batches(self):
unroll_data,unroll_labels = [],[]
init_data, init_label = None,None
for ui in range(self._num_unroll):
data, labels = self.next_batch()
unroll_data.append(data)
unroll_labels.append(labels)
return unroll_data, unroll_labels
def reset_indices(self):
for b in range(self._batch_size):
self._cursor[b] = np.random.randint(0,min((b+1)*self._segments,self._prices_length-1))
class NeuralNetworkModel():
def read_csv_file(self):
self.myDataFromCSV = pd.read_csv('minutes.csv')
print(self.myDataFromCSV)
self.timestamps_c = []
self.prices_c = []
self.timestamps_c = self.myDataFromCSV.loc[:,'time'].as_matrix()
self.prices_c = self.myDataFromCSV.loc[:,'last'].as_matrix()
self.prices_c = self.prices_c[-12000:]
self.timestamps_c = self.timestamps_c[-12000:]
self.dates_c = []
for j in self.timestamps_c:
self.dates_c.append(time.ctime(j))
self.MSE_errors = []
self.df_comp = {'dates': self.dates_c, 'prices': self.prices_c}
self.df_c = pd.DataFrame(data=self.df_comp)
self.df_c = self.df_c.sort_values('dates')
self.df_c = self.df_c.reset_index(drop=True)
self.df_c.head()
self.vizialize_initial_data()
def vizialize_initial_data(self):
plt.figure(figsize = (18,9))
plt.plot(range(self.df_c.shape[0]),(self.df_c['prices']))
plt.xticks(range(0,self.df_c.shape[0],500),self.df_c['dates'].loc[::500],rotation=45)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Price',fontsize=18)
plt.show()
self.divide_by_train_test_data()
def divide_by_train_test_data(self):
self.prices = self.df_c.loc[:,'prices'].as_matrix()
self.train_data = self.prices[:11000]
self.test_data = self.prices[11000:]
self.scale_data()
def scale_data(self):
self.scaler = MinMaxScaler()
self.train_data = self.train_data.reshape(-1,1)
self.test_data = self.test_data.reshape(-1,1)
self.train_scaler()
def train_scaler(self):
self.smoothing_window_size = 2500
for di in range(0,10000,self.smoothing_window_size):
self.scaler.fit(self.train_data[di:di+self.smoothing_window_size,:])
self.train_data[di:di+self.smoothing_window_size,:] = self.scaler.transform(self.train_data[di:di+self.smoothing_window_size,:])
# You normalize the last bit of remaining data
self.scaler.fit(self.train_data[di+self.smoothing_window_size:,:])
self.train_data[di+self.smoothing_window_size:,:] = self.scaler.transform(self.train_data[di+self.smoothing_window_size:,:])
# Reshape both train and test data
self.train_data = self.train_data.reshape(-1)
# Normalize test data
self.test_data = self.scaler.transform(self.test_data).reshape(-1)
self.perform_exponential_moving_average()
def perform_exponential_moving_average(self):
self.EMA = 0.0
self.gamma = 0.1
for ti in range(11000):
self.EMA = self.gamma*self.train_data[ti] + (1-self.gamma)*self.EMA
self.train_data[ti] = self.EMA
self.all_mid_data = np.concatenate([self.train_data,self.test_data],axis=0)
self.standart_average_calc()
def standart_average_calc(self):
self.window_size = 100
self.N = self.train_data.size
self.std_avg_predictions = []
self.std_avg_x = []
self.mse_errors = []
for pred_idx in range(self.window_size,self.N):
if pred_idx >= self.N:
self.date = self.dt_c.datetime.strptime(k, '%Y-%m-%d %H:%M:%S').date() + dt.timedelta(days=1)
else:
self.date = self.df_c.loc[pred_idx,'dates']
self.std_avg_predictions.append(np.mean(self.train_data[pred_idx-self.window_size:pred_idx]))
self.mse_errors.append((self.std_avg_predictions[-1]-self.train_data[pred_idx])**2)
self.std_avg_x.append(self.date)
print('MSE error for standard averaging: %.5f'%(0.5*np.mean(self.mse_errors)))
self.standart_average_vizualize()
def standart_average_vizualize(self):
plt.figure(figsize = (18,9))
plt.plot(range(self.df_c.shape[0]),self.all_mid_data,color='b',label='True')
plt.plot(range(self.window_size,self.N),self.std_avg_predictions,color='orange',label='Prediction')
plt.xlabel('Date')
plt.ylabel('Price')
plt.legend(fontsize=18)
plt.show()
self.exponential_moving_average_calc()
def exponential_moving_average_calc(self):
self.window_size = 100
self.N = self.train_data.size
self.run_avg_predictions = []
self.run_avg_x = []
self.mse_errors = []
self.running_mean = 0.0
self.run_avg_predictions.append(self.running_mean)
self.decay = 0.5
for pred_idx in range(1,self.N):
self.running_mean = self.running_mean*self.decay + (1.0-self.decay)*self.train_data[pred_idx-1]
self.run_avg_predictions.append(self.running_mean)
self.mse_errors.append((self.run_avg_predictions[-1]-self.train_data[pred_idx])**2)
self.run_avg_x.append(self.date)
print('MSE error for EMA averaging: %.5f'%(0.5*
|
np.mean(self.mse_errors)
|
numpy.mean
|
import numpy as np
# Helper function to calculate the sigmoid
def sigmoid(z):
sig = 1 / (1 +
|
np.exp(-z)
|
numpy.exp
|
import centrosome.filter
import numpy
import six.moves
from cellprofiler_core.constants.measurement import (
GROUP_NUMBER,
GROUP_INDEX,
R_FIRST_IMAGE_NUMBER,
R_SECOND_IMAGE_NUMBER,
R_FIRST_OBJECT_NUMBER,
R_SECOND_OBJECT_NUMBER,
C_COUNT,
MCA_AVAILABLE_POST_GROUP,
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
)
from cellprofiler_core.image import ImageSetList
import cellprofiler_core.measurement
from cellprofiler_core.object import ObjectSet, Objects
import cellprofiler.modules.trackobjects
import tests.modules
from cellprofiler_core.pipeline import Pipeline, LoadException, RunException
from cellprofiler_core.workspace import Workspace
OBJECT_NAME = "objects"
def test_load_v3():
file = tests.modules.get_test_resources_directory("trackobjects/v3.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
module = pipeline.modules()[0]
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
assert module.tracking_method == "LAP"
assert module.object_name.value == "Nuclei"
assert module.pixel_radius.value == 80
assert module.display_type.value == "Color and Number"
assert not module.wants_image
assert module.measurement == "AreaShape_Area"
assert module.image_name == "TrackedCells"
assert module.wants_second_phase
assert module.split_cost == 41
assert module.merge_cost == 42
assert module.max_gap_score == 53
assert module.max_split_score == 54
assert module.max_merge_score == 55
assert module.max_frame_distance == 6
def test_load_v4():
file = tests.modules.get_test_resources_directory("trackobjects/v4.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 3
for module, tracking_method, model, save_img, phase2, meas, dop in zip(
pipeline.modules(),
("Measurements", "Overlap", "Distance"),
(
cellprofiler.modules.trackobjects.M_BOTH,
cellprofiler.modules.trackobjects.M_RANDOM,
cellprofiler.modules.trackobjects.M_VELOCITY,
),
(True, False, True),
(True, False, True),
("Slothfulness", "Prescience", "Trepidation"),
(
cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER,
cellprofiler.modules.trackobjects.DT_COLOR_ONLY,
cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER,
),
):
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
assert module.tracking_method == tracking_method
assert module.model == model
assert module.wants_image.value == save_img
assert module.wants_second_phase.value == phase2
assert module.measurement == meas
assert module.pixel_radius == 50
assert module.display_type == dop
assert module.image_name == "TrackByLAP"
assert module.radius_std == 3
assert module.radius_limit.min == 3.0
assert module.radius_limit.max == 10.0
assert module.gap_cost == 40
assert module.split_cost == 1
assert module.merge_cost == 1
assert module.max_gap_score == 51
assert module.max_split_score == 52
assert module.max_merge_score == 53
assert module.max_frame_distance == 4
def test_load_v5():
file = tests.modules.get_test_resources_directory("trackobjects/v5.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
m = pipeline.modules()[0]
assert isinstance(m, cellprofiler.modules.trackobjects.TrackObjects)
assert m.tracking_method == "LAP"
assert m.object_name == "Turtles"
assert m.measurement == "Steadiness"
assert m.pixel_radius == 44
assert m.display_type == cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER
assert not m.wants_image
assert m.image_name == "TrackedTurtles"
assert m.model == cellprofiler.modules.trackobjects.M_BOTH
assert m.radius_std == 3
assert m.radius_limit.min == 3
assert m.radius_limit.max == 11
assert m.wants_second_phase
assert m.gap_cost == 39
assert m.split_cost == 41
assert m.merge_cost == 42
assert m.max_frame_distance == 8
assert m.wants_minimum_lifetime
assert m.min_lifetime == 2
assert not m.wants_maximum_lifetime
assert m.max_lifetime == 1000
def test_load_v6():
file = tests.modules.get_test_resources_directory("trackobjects/v6.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
assert len(pipeline.modules()) == 1
m = pipeline.modules()[0]
assert isinstance(m, cellprofiler.modules.trackobjects.TrackObjects)
assert m.tracking_method == "LAP"
assert m.object_name == "Turtles"
assert m.measurement == "Steadiness"
assert m.pixel_radius == 44
assert m.display_type == cellprofiler.modules.trackobjects.DT_COLOR_AND_NUMBER
assert not m.wants_image
assert m.image_name == "TrackedTurtles"
assert m.model == cellprofiler.modules.trackobjects.M_BOTH
assert m.radius_std == 3
assert m.radius_limit.min == 3
assert m.radius_limit.max == 11
assert m.wants_second_phase
assert m.gap_cost == 39
assert m.split_cost == 41
assert m.merge_cost == 42
assert m.max_frame_distance == 8
assert m.wants_minimum_lifetime
assert m.min_lifetime == 2
assert not m.wants_maximum_lifetime
assert m.max_lifetime == 1000
assert m.mitosis_cost == 79
assert m.mitosis_max_distance == 41
def runTrackObjects(labels_list, fn=None, measurement=None):
"""Run two cycles of TrackObjects
labels1 - the labels matrix for the first cycle
labels2 - the labels matrix for the second cycle
fn - a callback function called with the module and workspace. It has
the signature, fn(module, workspace, n) where n is 0 when
called prior to prepare_run, 1 prior to first iteration
and 2 prior to second iteration.
returns the measurements
"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.pixel_radius.value = 50
module.measurement.value = "measurement"
measurements = cellprofiler_core.measurement.Measurements()
measurements.add_all_measurements(
"Image", GROUP_NUMBER, [1] * len(labels_list),
)
measurements.add_all_measurements(
"Image", GROUP_INDEX, list(range(1, len(labels_list) + 1)),
)
pipeline = Pipeline()
pipeline.add_module(module)
image_set_list = ImageSetList()
if fn:
fn(module, None, 0)
module.prepare_run(
Workspace(pipeline, module, None, None, measurements, image_set_list)
)
first = True
for labels, index in zip(labels_list, list(range(len(labels_list)))):
object_set = ObjectSet()
objects = Objects()
objects.segmented = labels
object_set.add_objects(objects, OBJECT_NAME)
image_set = image_set_list.get_image_set(index)
if first:
first = False
else:
measurements.next_image_set()
if measurement is not None:
measurements.add_measurement(
OBJECT_NAME, "measurement", numpy.array(measurement[index])
)
workspace = Workspace(
pipeline, module, image_set, object_set, measurements, image_set_list
)
if fn:
fn(module, workspace, index + 1)
module.run(workspace)
return measurements
def test_track_nothing():
"""Run TrackObjects on an empty labels matrix"""
columns = []
def fn(module, workspace, index, columns=columns):
if workspace is not None and index == 0:
columns += module.get_measurement_columns(workspace.pipeline)
measurements = runTrackObjects(
(numpy.zeros((10, 10), int), numpy.zeros((10, 10), int)), fn
)
features = [
feature
for feature in measurements.get_feature_names(OBJECT_NAME)
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all(
[column[1] in features for column in columns if column[0] == OBJECT_NAME]
)
for feature in cellprofiler.modules.trackobjects.F_ALL:
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "50"))
assert name in features
value = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(value) == 0
features = [
feature
for feature in measurements.get_feature_names("Image")
if feature.startswith(cellprofiler.modules.trackobjects.F_PREFIX)
]
assert all([column[1] in features for column in columns if column[0] == "Image"])
for feature in cellprofiler.modules.trackobjects.F_IMAGE_ALL:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "50")
)
assert name in features
value = measurements.get_current_image_measurement(name)
assert value == 0
def test_00_track_one_then_nothing():
"""Run track objects on an object that disappears
Regression test of IMG-1090
"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
measurements = runTrackObjects((labels, numpy.zeros((10, 10), int)))
feature = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT,
OBJECT_NAME,
"50",
)
)
value = measurements.get_current_image_measurement(feature)
assert value == 1
def test_track_one_distance():
"""Track an object that doesn't move using distance"""
labels = numpy.zeros((10, 10), int)
labels[3:6, 2:7] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels, labels), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X) - 0), 7) == 0
assert round(abs(m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y) - 0), 7) == 0
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED) - 0), 7) == 0
)
assert (
round(abs(m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE) - 0), 7)
== 0
)
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER) == 1
assert m(cellprofiler.modules.trackobjects.F_LIFETIME) == 2
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(measurements, [1], [1], [2], [1])
def test_track_one_moving():
"""Track an object that moves"""
labels_list = []
distance = 0
last_i, last_j = (0, 0)
for i_off, j_off in ((0, 0), (2, 0), (2, 1), (0, 1)):
distance = i_off - last_i + j_off - last_j
last_i, last_j = (i_off, j_off)
labels = numpy.zeros((10, 10), int)
labels[4 + i_off : 7 + i_off, 4 + j_off : 7 + j_off] = 1
labels_list.append(labels)
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 3
module.tracking_method.value = "Distance"
measurements = runTrackObjects(labels_list, fn)
def m(feature, expected):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "3"))
value_set = measurements.get_all_measurements(OBJECT_NAME, name)
assert len(expected) == len(value_set)
for values, x in zip(value_set, expected):
assert len(values) == 1
assert round(abs(values[0] - x), 7) == 0
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_X, [0, 0, 1, 0])
m(cellprofiler.modules.trackobjects.F_TRAJECTORY_Y, [0, 2, 0, -2])
m(cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED, [0, 2, 1, 2])
m(cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE, [0, 2, 3, 5])
m(cellprofiler.modules.trackobjects.F_LABEL, [1, 1, 1, 1])
m(cellprofiler.modules.trackobjects.F_LIFETIME, [1, 2, 3, 4])
m(
cellprofiler.modules.trackobjects.F_LINEARITY,
[1, 1, numpy.sqrt(5) / 3, 1.0 / 5.0],
)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "3")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
image_numbers = numpy.arange(1, len(labels_list) + 1)
object_numbers = numpy.ones(len(image_numbers))
check_relationships(
measurements,
image_numbers[:-1],
object_numbers[:-1],
image_numbers[1:],
object_numbers[1:],
)
def test_track_split():
"""Track an object that splits"""
labels1 = numpy.zeros((11, 9), int)
labels1[1:10, 1:8] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[1:6, 1:8] = 1
labels2[6:10, 1:8] = 2
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 5
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2, labels2), fn)
def m(feature, idx):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "5"))
values = measurements.get_measurement(OBJECT_NAME, name, idx + 1)
assert len(values) == 2
return values
labels = m(cellprofiler.modules.trackobjects.F_LABEL, 2)
assert len(labels) == 2
assert numpy.all(labels == 1)
parents = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, 1)
assert numpy.all(parents == 1)
assert numpy.all(m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, 1) == 1)
parents = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, 2)
assert numpy.all(parents == numpy.array([1, 2]))
assert numpy.all(m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, 2) == 2)
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "5")
)
return measurements.get_all_measurements("Image", name)[1]
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
check_relationships(
measurements, [1, 1, 2, 2], [1, 1, 1, 2], [2, 2, 3, 3], [1, 2, 1, 2]
)
def test_track_negative():
"""Track unrelated objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[1:5, 1:5] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[6:9, 6:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 1
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "1"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def m(feature):
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, OBJECT_NAME, "1")
)
return measurements.get_current_image_measurement(name)
assert m(cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT) == 1
assert m(cellprofiler.modules.trackobjects.F_SPLIT_COUNT) == 0
assert m(cellprofiler.modules.trackobjects.F_MERGE_COUNT) == 0
def test_track_ambiguous():
"""Track disambiguation from among two possible parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:4, 1:4] = 1
labels1[16:19, 16:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[10:15, 10:15] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 20
module.tracking_method.value = "Distance"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "20"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_overlap_positive():
"""Track overlapping objects"""
labels1 = numpy.zeros((10, 10), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[4:7, 5:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
def test_overlap_negative():
"""Track objects that don't overlap"""
labels1 = numpy.zeros((20, 20), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((20, 20), int)
labels2[14:17, 15:19] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def test_overlap_ambiguous():
"""Track an object that overlaps two parents"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:5, 1:5] = 1
labels1[15:19, 15:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[4:18, 4:18] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Overlap"
measurements = runTrackObjects((labels1, labels2), fn)
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_measurement_positive():
"""Test tracking an object by measurement"""
labels1 = numpy.zeros((10, 10), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((10, 10), int)
labels2[4:7, 5:9] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1], [1]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 1
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 1
def test_measurement_negative():
"""Test tracking with too great a jump between successive images"""
labels1 = numpy.zeros((20, 20), int)
labels1[3:6, 4:7] = 1
labels2 = numpy.zeros((20, 20), int)
labels2[14:17, 15:19] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 2
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1], [1]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "2"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 0
def test_ambiguous():
"""Test measurement with ambiguous parent choice"""
labels1 = numpy.zeros((20, 20), int)
labels1[1:5, 1:5] = 1
labels1[15:19, 15:19] = 2
labels2 = numpy.zeros((20, 20), int)
labels2[6:14, 6:14] = 1
def fn(module, workspace, idx):
if idx == 0:
module.pixel_radius.value = 4
module.tracking_method.value = "Measurements"
measurements = runTrackObjects((labels1, labels2), fn, [[1, 10], [9]])
def m(feature):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature, "4"))
values = measurements.get_current_measurement(OBJECT_NAME, name)
assert len(values) == 1
return values[0]
assert m(cellprofiler.modules.trackobjects.F_LABEL) == 2
assert m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER) == 2
def test_cross_numbered_objects():
"""Test labeling when object 1 in one image becomes object 2 in next"""
i, j = numpy.mgrid[0:10, 0:20]
labels = (i > 5) + (j > 10) * 2
pp = numpy.array(list(centrosome.filter.permutations([1, 2, 3, 4])))
def fn(module, workspace, idx):
if idx == 0:
module.tracking_method.value = "LAP"
measurements = runTrackObjects([numpy.array(p)[labels] for p in pp], fn)
def m(feature, i):
name = "_".join((cellprofiler.modules.trackobjects.F_PREFIX, feature))
values = measurements[OBJECT_NAME, name, i + 1]
assert len(values) == 4
return values
for i, p in enumerate(pp):
l = m(cellprofiler.modules.trackobjects.F_LABEL, i)
numpy.testing.assert_array_equal(numpy.arange(1, 5), p[l - 1])
if i > 0:
p_prev = pp[i - 1]
order = numpy.lexsort([p])
expected_po = p_prev[order]
po = m(cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER, i)
numpy.testing.assert_array_equal(po, expected_po)
pi = m(cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER, i)
numpy.testing.assert_array_equal(pi, i)
image_numbers, _ = numpy.mgrid[1 : (len(pp) + 1), 0:4]
check_relationships(
measurements,
image_numbers[:-1, :].flatten(),
pp[:-1, :].flatten(),
image_numbers[1:, :].flatten(),
pp[1:, :].flatten(),
)
def test_measurement_columns():
"""Test get_measurement_columns function"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "Distance"
module.pixel_radius.value = 10
columns = module.get_measurement_columns(None)
assert len(columns) == len(cellprofiler.modules.trackobjects.F_ALL) + len(
cellprofiler.modules.trackobjects.F_IMAGE_ALL
)
for object_name, features in (
(OBJECT_NAME, cellprofiler.modules.trackobjects.F_ALL),
("Image", cellprofiler.modules.trackobjects.F_IMAGE_ALL,),
):
for feature in features:
if object_name == OBJECT_NAME:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature, "10")
)
else:
name = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
OBJECT_NAME,
"10",
)
)
index = [column[1] for column in columns].index(name)
assert index != -1
column = columns[index]
assert column[0] == object_name
def test_measurement_columns_lap():
"""Test get_measurement_columns function for LAP"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "LAP"
module.model.value = cellprofiler.modules.trackobjects.M_BOTH
second_phase = [
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
]
for wants in (True, False):
module.wants_second_phase.value = wants
columns = module.get_measurement_columns(None)
# 2, 2, 4 for the static model
# 4, 4, 16 for the velocity model
other_features = [
cellprofiler.modules.trackobjects.F_AREA,
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
cellprofiler.modules.trackobjects.F_STANDARD_DEVIATION,
]
if wants:
other_features += [
cellprofiler.modules.trackobjects.F_GAP_LENGTH,
cellprofiler.modules.trackobjects.F_GAP_SCORE,
cellprofiler.modules.trackobjects.F_MERGE_SCORE,
cellprofiler.modules.trackobjects.F_SPLIT_SCORE,
cellprofiler.modules.trackobjects.F_MITOSIS_SCORE,
]
assert (
len(columns)
== len(cellprofiler.modules.trackobjects.F_ALL)
+ len(cellprofiler.modules.trackobjects.F_IMAGE_ALL)
+ len(other_features)
+ 2
+ 2
+ 4
+ 4
+ 4
+ 16
)
kalman_features = [
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_STATE,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_NOISE,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_STATIC_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_X,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_Y,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VX,
cellprofiler.modules.trackobjects.F_VY,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_X,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_Y,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_VX,
),
cellprofiler.modules.trackobjects.kalman_feature(
cellprofiler.modules.trackobjects.F_VELOCITY_MODEL,
cellprofiler.modules.trackobjects.F_COV,
cellprofiler.modules.trackobjects.F_VY,
cellprofiler.modules.trackobjects.F_VY,
),
]
for object_name, features in (
(
OBJECT_NAME,
cellprofiler.modules.trackobjects.F_ALL
+ kalman_features
+ other_features,
),
("Image", cellprofiler.modules.trackobjects.F_IMAGE_ALL,),
):
for feature in features:
if object_name == OBJECT_NAME:
name = "_".join(
(cellprofiler.modules.trackobjects.F_PREFIX, feature)
)
else:
name = "_".join(
(
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
OBJECT_NAME,
)
)
index = [column[1] for column in columns].index(name)
assert index != -1
column = columns[index]
assert column[0] == object_name
if wants or feature in second_phase:
assert len(column) == 4
assert MCA_AVAILABLE_POST_GROUP in column[3]
assert column[3][MCA_AVAILABLE_POST_GROUP]
else:
assert (
(len(column) == 3)
or (MCA_AVAILABLE_POST_GROUP not in column[3])
or (not column[3][MCA_AVAILABLE_POST_GROUP])
)
def test_measurements():
"""Test the different measurement pieces"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.object_name.value = OBJECT_NAME
module.image_name.value = "image"
module.pixel_radius.value = 10
categories = module.get_categories(None, "Foo")
assert len(categories) == 0
categories = module.get_categories(None, OBJECT_NAME)
assert len(categories) == 1
assert categories[0] == cellprofiler.modules.trackobjects.F_PREFIX
features = module.get_measurements(None, OBJECT_NAME, "Foo")
assert len(features) == 0
features = module.get_measurements(
None, OBJECT_NAME, cellprofiler.modules.trackobjects.F_PREFIX
)
assert len(features) == len(cellprofiler.modules.trackobjects.F_ALL)
assert all(
[feature in cellprofiler.modules.trackobjects.F_ALL for feature in features]
)
scales = module.get_measurement_scales(
None, OBJECT_NAME, cellprofiler.modules.trackobjects.F_PREFIX, "Foo", "image"
)
assert len(scales) == 0
for feature in cellprofiler.modules.trackobjects.F_ALL:
scales = module.get_measurement_scales(
None,
OBJECT_NAME,
cellprofiler.modules.trackobjects.F_PREFIX,
feature,
"image",
)
assert len(scales) == 1
assert int(scales[0]) == 10
def make_lap2_workspace(objs, nimages, group_numbers=None, group_indexes=None):
"""Make a workspace to test the second half of LAP
objs - a N x 7 array of "objects" composed of the
following pieces per object
objs[0] - image set # for object
objs[1] - label for object
objs[2] - parent image #
objs[3] - parent object #
objs[4] - x coordinate for object
objs[5] - y coordinate for object
objs[6] - area for object
nimages - # of image sets
group_numbers - group numbers for each image set, defaults to all 1
group_indexes - group indexes for each image set, defaults to range
"""
module = cellprofiler.modules.trackobjects.TrackObjects()
module.set_module_num(1)
module.object_name.value = OBJECT_NAME
module.tracking_method.value = "LAP"
module.wants_second_phase.value = True
module.wants_lifetime_filtering.value = False
module.wants_minimum_lifetime.value = False
module.min_lifetime.value = 1
module.wants_maximum_lifetime.value = False
module.max_lifetime.value = 100
module.pixel_radius.value = 50
pipeline = Pipeline()
def callback(caller, event):
assert not isinstance(event, RunException)
pipeline.add_listener(callback)
pipeline.add_module(module)
m = cellprofiler_core.measurement.Measurements()
if objs.shape[0] > 0:
nobjects = numpy.bincount(objs[:, 0].astype(int))
else:
nobjects = numpy.zeros(nimages, int)
for i in range(nimages):
m.next_image_set(i + 1)
for index, feature, dtype in (
(
1,
module.measurement_name(cellprofiler.modules.trackobjects.F_LABEL),
int,
),
(
2,
module.measurement_name(
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER
),
int,
),
(
3,
module.measurement_name(
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER
),
int,
),
(4, M_LOCATION_CENTER_X, float),
(5, M_LOCATION_CENTER_Y, float),
(
6,
module.measurement_name(cellprofiler.modules.trackobjects.F_AREA),
float,
),
):
values = objs[objs[:, 0] == i, index].astype(dtype)
m.add_measurement(OBJECT_NAME, feature, values, i + 1)
m.add_measurement("Image", "ImageNumber", i + 1)
m.add_measurement(
"Image",
GROUP_NUMBER,
1 if group_numbers is None else group_numbers[i],
image_set_number=i + 1,
)
m.add_measurement(
"Image",
GROUP_INDEX,
i if group_indexes is None else group_indexes[i],
image_set_number=i + 1,
)
#
# Add blanks of the right sizes for measurements that are recalculated
#
m.add_measurement(
"Image",
"_".join((C_COUNT, OBJECT_NAME)),
nobjects[i],
image_set_number=i + 1,
)
for feature in (
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED,
cellprofiler.modules.trackobjects.F_DISPLACEMENT,
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE,
cellprofiler.modules.trackobjects.F_TRAJECTORY_X,
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y,
cellprofiler.modules.trackobjects.F_LINEARITY,
cellprofiler.modules.trackobjects.F_LIFETIME,
cellprofiler.modules.trackobjects.F_FINAL_AGE,
cellprofiler.modules.trackobjects.F_LINKING_DISTANCE,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
cellprofiler.modules.trackobjects.F_STANDARD_DEVIATION,
):
dtype = (
int
if feature
in (
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER,
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER,
cellprofiler.modules.trackobjects.F_LIFETIME,
cellprofiler.modules.trackobjects.F_LINK_TYPE,
cellprofiler.modules.trackobjects.F_MOVEMENT_MODEL,
)
else float
)
m.add_measurement(
OBJECT_NAME,
module.measurement_name(feature),
numpy.NaN * numpy.ones(nobjects[i], dtype)
if feature == cellprofiler.modules.trackobjects.F_FINAL_AGE
else numpy.zeros(nobjects[i], dtype),
image_set_number=i + 1,
)
for feature in (
cellprofiler.modules.trackobjects.F_SPLIT_COUNT,
cellprofiler.modules.trackobjects.F_MERGE_COUNT,
):
m.add_measurement(
"Image",
module.image_measurement_name(feature),
0,
image_set_number=i + 1,
)
#
# Figure out how many new and lost objects per image set
#
label_sets = [set() for i in range(nimages)]
for row in objs:
label_sets[row[0]].add(row[1])
if group_numbers is None:
group_numbers = numpy.ones(nimages, int)
if group_indexes is None:
group_indexes = numpy.arange(nimages) + 1
#
# New objects are ones without matching labels in the previous set
#
for i in range(0, nimages):
if group_indexes[i] == 1:
new_objects = len(label_sets[i])
lost_objects = 0
else:
new_objects = sum(
[1 for label in label_sets[i] if label not in label_sets[i - 1]]
)
lost_objects = sum(
[1 for label in label_sets[i - 1] if label not in label_sets[i]]
)
m.add_measurement(
"Image",
module.image_measurement_name(
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT
),
new_objects,
image_set_number=i + 1,
)
m.add_measurement(
"Image",
module.image_measurement_name(
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT
),
lost_objects,
image_set_number=i + 1,
)
m.image_set_number = nimages
image_set_list = ImageSetList()
for i in range(nimages):
image_set = image_set_list.get_image_set(i)
workspace = Workspace(pipeline, module, image_set, ObjectSet(), m, image_set_list,)
return workspace, module
def check_measurements(workspace, d):
"""Check measurements against expected values
workspace - workspace that was run
d - dictionary of feature name and list of expected measurement values
"""
m = workspace.measurements
assert isinstance(m,cellprofiler_core.measurement.Measurements)
module = workspace.module
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
for feature, expected in list(d.items()):
if numpy.isscalar(expected[0]):
mname = module.image_measurement_name(feature)
values = m.get_all_measurements("Image", mname)
assert len(expected) == len(values), (
"Expected # image sets (%d) != actual (%d) for %s"
% (len(expected), len(values), feature)
)
assert all([v == e for v, e in zip(values, expected)]), (
"Values don't match for " + feature
)
else:
mname = module.measurement_name(feature)
values = m.get_all_measurements(OBJECT_NAME, mname)
assert len(expected) == len(values), (
"Expected # image sets (%d) != actual (%d) for %s"
% (len(expected), len(values), feature)
)
for i, (e, v) in enumerate(zip(expected, values)):
assert len(e) == len(v), (
"Expected # of objects (%d) != actual (%d) for %s:%d"
% (len(e), len(v), feature, i)
)
numpy.testing.assert_almost_equal(v, e)
def check_relationships(
m,
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
):
"""Check the relationship measurements against expected"""
expected_parent_image_numbers = numpy.atleast_1d(expected_parent_image_numbers)
expected_child_image_numbers = numpy.atleast_1d(expected_child_image_numbers)
expected_parent_object_numbers = numpy.atleast_1d(expected_parent_object_numbers)
expected_child_object_numbers = numpy.atleast_1d(expected_child_object_numbers)
assert isinstance(m,cellprofiler_core.measurement.Measurements)
r = m.get_relationships(
1, cellprofiler.modules.trackobjects.R_PARENT, OBJECT_NAME, OBJECT_NAME
)
actual_parent_image_numbers = r[R_FIRST_IMAGE_NUMBER]
actual_parent_object_numbers = r[R_FIRST_OBJECT_NUMBER]
actual_child_image_numbers = r[R_SECOND_IMAGE_NUMBER]
actual_child_object_numbers = r[R_SECOND_OBJECT_NUMBER]
assert len(actual_parent_image_numbers) == len(expected_parent_image_numbers)
#
# Sort similarly
#
for i1, o1, i2, o2 in (
(
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
),
(
actual_parent_image_numbers,
actual_parent_object_numbers,
actual_child_image_numbers,
actual_child_object_numbers,
),
):
order = numpy.lexsort((i1, o1, i2, o2))
for x in (i1, o1, i2, o2):
x[:] = x[order]
for expected, actual in zip(
(
expected_parent_image_numbers,
expected_parent_object_numbers,
expected_child_image_numbers,
expected_child_object_numbers,
),
(
actual_parent_image_numbers,
actual_parent_object_numbers,
actual_child_image_numbers,
actual_child_object_numbers,
),
):
numpy.testing.assert_array_equal(expected, actual)
def test_lap_none():
"""Run the second part of LAP on one image of nothing"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(numpy.zeros((0, 7)), 1)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [numpy.zeros(0, int)],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(0)
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [numpy.zeros(0)],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0],
},
)
def test_lap_one():
"""Run the second part of LAP on one image of one object"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 100, 100, 25]]), 1
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
module.run_as_data_tool(workspace)
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [numpy.array([1])],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0])
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0])
],
cellprofiler.modules.trackobjects.F_DISPLACEMENT: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1)
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [numpy.zeros(1)],
cellprofiler.modules.trackobjects.F_NEW_OBJECT_COUNT: [1],
cellprofiler.modules.trackobjects.F_LOST_OBJECT_COUNT: [0],
cellprofiler.modules.trackobjects.F_MERGE_COUNT: [0],
cellprofiler.modules.trackobjects.F_SPLIT_COUNT: [0],
},
)
def test_bridge_gap():
"""Bridge a gap of zero frames between two objects"""
with MonkeyPatchedDelete():
workspace, module = make_lap2_workspace(
numpy.array([[0, 1, 0, 0, 1, 2, 25], [2, 2, 0, 0, 101, 102, 25]]), 3
)
assert isinstance(module, cellprofiler.modules.trackobjects.TrackObjects)
#
# The cost of bridging the gap should be 141. We set the alternative
# score to 142 so that bridging wins.
#
module.gap_cost.value = 142
module.max_gap_score.value = 142
module.run_as_data_tool(workspace)
distance = numpy.array([numpy.sqrt(2 * 100 * 100)])
check_measurements(
workspace,
{
cellprofiler.modules.trackobjects.F_LABEL: [
numpy.array([1]),
numpy.zeros(0),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_IMAGE_NUMBER: [
numpy.array([0]),
numpy.zeros(0, int),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_PARENT_OBJECT_NUMBER: [
numpy.array([0]),
numpy.zeros(0, int),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_DISTANCE_TRAVELED: [
numpy.zeros(1),
numpy.zeros(0),
distance,
],
cellprofiler.modules.trackobjects.F_INTEGRATED_DISTANCE: [
numpy.zeros(1),
numpy.zeros(0),
distance,
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_X: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
],
cellprofiler.modules.trackobjects.F_TRAJECTORY_Y: [
numpy.zeros(1),
numpy.zeros(0),
numpy.array([100]),
],
cellprofiler.modules.trackobjects.F_LINEARITY: [
numpy.array([numpy.nan]),
numpy.zeros(0),
numpy.array([1]),
],
cellprofiler.modules.trackobjects.F_LIFETIME: [
numpy.ones(1),
numpy.zeros(0),
|
numpy.array([2])
|
numpy.array
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
Long-term extreme response analysis of offshore structures by combining importance sampling with subset simulation Ying Min Low, Xiaoxu Huang
<NAME>. On the prediction of extreme wave crest heights. In: Proceedings of the 7th international workshop on wave hindcasting and forecasting, Meteorological Service of Canada. 2002.
<NAME>. and <NAME>. (2000): “Metocean Design Criteria for Kvitebjørn”, Statoil Report , C193-KVB-N-FD-0001, Rev. date: 2000-12-14, Stavanger, 2000.
"""
import numpy as np
import scipy.stats as stats
import uqra
from ._envbase import EnvBase
#### Hs distribution Class
##################################################
##################################################
class DistHs(object):
def __init__(self):
self.name = 'Lonowe'
self.mu_Hs = 0.77
self.sigma_Hs = 0.6565
self.Hs_shape = 1.503
self.Hs_scale = 2.691
self.h0 = 2.9
self.dist1 = stats.lognorm(s=self.sigma_Hs, scale=np.exp(self.mu_Hs))
self.dist2 = stats.weibull_min(c=self.Hs_shape, scale=self.Hs_scale)
def ppf(self, u):
"""
Return Hs samples corresponding ppf values u
"""
assert np.logical_and(u >=0, u <=1).all(), 'CDF values should be in range [0,1]'
hs1 = self.dist1.ppf(u)
hs2 = self.dist2.ppf(u)
hs = np.where(hs1 < self.h0, hs1, hs2)
return hs
def cdf(self, hs):
"""
Return Hs cdf
"""
hs_cdf1 = self.dist1.cdf(hs)
hs_cdf2 = self.dist2.cdf(hs)
hs_cdf = np.where(hs < self.h0, hs_cdf1, hs_cdf2)
return hs_cdf
def rvs(self, size=1):
hs1 = self.dist1.rvs(size=size)
hs2 = self.dist2.rvs(size=size)
hs = np.where(hs1 < self.h0, hs1, hs2)
return hs
def pdf(self, hs):
hs_pdf1 = self.dist1.pdf(hs)
hs_pdf2 = self.dist2.pdf(hs)
hs_pdf = np.where(hs < self.h0, hs_pdf1, hs_pdf2)
return hs_pdf
def get_distribution(self, x, key='value'):
"""
Return Hs distribution based on x value
For Kvitebjorn, Hs distribution is a piecewise distribution
connected at Hs = H0 or ppf_Hs = ppf_h0 (icdf_h0)
key = 'value'
dist_Hs = dist1 if x < h0
= dist2 if x > h0
or
dist_hs = dist1 if cdf_x < ppf_h0
= dist2 if cdf_x > ppf_h0
key = 'ppf'
- value: physical value of Hs
- ppf: point percentile value of Hs
"""
dist1 = stats.lognorm(s=self.sigma_Hs, scale=np.exp(self.mu_Hs))
dist2 = stats.weibull_min(c=self.Hs_shape, scale=self.Hs_scale)
ppf_h0 = dist1.cdf(self.h0)
if key.lower() == 'value':
dist = dist1 if x <= h0 else dist2
elif key.lower() == 'ppf':
dist = dsit1 if x <= ppf_h0 else dist2
else:
raise ValueError('Key value: {} is not defined'.format(key))
##################################################
##################################################
class DistTp(object):
def __init__(self, hs):
self.a1 = 1.134
self.a2 = 0.892
self.a3 = 0.225
self.b1 = 0.005
self.b2 = 0.120
self.b3 = 0.455
self.hs = hs
self.dist = stats.lognorm(s=1)
def rvs(self, size=1):
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp = stats.lognorm.rvs(sigma_tp, loc=0, scale=np.exp(mu_tp), size=[size,self.hs.size])
tp = np.squeeze(tp)
assert self.hs.shape == tp.shape
return tp
def ppf(self, u):
"""
Generate Tp sample values based on given Hs values:
"""
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp = stats.lognorm.ppf(u, sigma_tp, loc=0, scale=np.exp(mu_tp))
return tp
def cdf(self, tp):
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp_cdf = stats.lognorm.cdf(tp, sigma_tp, loc=0, scale=
|
np.exp(mu_tp)
|
numpy.exp
|
# -*- coding: utf-8 -*-
# File: parallel_map.py
import numpy as np
import ctypes
import copy
import threading
import multiprocessing as mp
from six.moves import queue
import zmq
from .base import DataFlow, ProxyDataFlow, DataFlowReentrantGuard
from .common import RepeatedData
from ..utils.concurrency import StoppableThread, enable_death_signal
from ..utils import logger
from ..utils.serialize import loads, dumps
from .parallel import (
_MultiProcessZMQDataFlow, _repeat_iter, _get_pipe_name,
_bind_guard, _zmq_catch_error)
__all__ = ['ThreadedMapData', 'MultiThreadMapData',
'MultiProcessMapData', 'MultiProcessMapDataZMQ']
class _ParallelMapData(ProxyDataFlow):
def __init__(self, ds, buffer_size, strict=False):
super(_ParallelMapData, self).__init__(ds)
assert buffer_size > 0, buffer_size
self._buffer_size = buffer_size
self._buffer_occupancy = 0 # actual #elements in buffer, only useful in strict mode
self._strict = strict
def reset_state(self):
super(_ParallelMapData, self).reset_state()
if not self._strict:
ds = RepeatedData(self.ds, -1)
else:
ds = self.ds
self._iter = ds.__iter__()
def _recv(self):
pass
def _send(self, dp):
pass
def _recv_filter_none(self):
ret = self._recv()
assert ret is not None, \
"[{}] Map function cannot return None when strict mode is used.".format(type(self).__name__)
return ret
def _fill_buffer(self, cnt=None):
if cnt is None:
cnt = self._buffer_size - self._buffer_occupancy
try:
for _ in range(cnt):
dp = next(self._iter)
self._send(dp)
except StopIteration:
logger.error(
"[{}] buffer_size cannot be larger than the size of the DataFlow when strict=True!".format(
type(self).__name__))
raise
self._buffer_occupancy += cnt
def get_data_non_strict(self):
for dp in self._iter:
self._send(dp)
ret = self._recv()
if ret is not None:
yield ret
def get_data_strict(self):
self._fill_buffer()
for dp in self._iter:
self._send(dp)
yield self._recv_filter_none()
self._iter = self.ds.__iter__() # refresh
# first clear the buffer, then fill
for k in range(self._buffer_size):
dp = self._recv_filter_none()
self._buffer_occupancy -= 1
if k == self._buffer_size - 1:
self._fill_buffer()
yield dp
def __iter__(self):
if self._strict:
for dp in self.get_data_strict():
yield dp
else:
for dp in self.get_data_non_strict():
yield dp
class MultiThreadMapData(_ParallelMapData):
"""
Same as :class:`MapData`, but start threads to run the mapping function.
This is useful when the mapping function is the bottleneck, but you don't
want to start processes for the entire dataflow pipeline.
Note:
1. There is tiny communication overhead with threads, but you
should avoid starting many threads in your main process to reduce GIL contention.
The threads will only start in the process which calls :meth:`reset_state()`.
Therefore you can use ``PrefetchDataZMQ(MultiThreadMapData(...), 1)``
to reduce GIL contention.
2. Threads run in parallel and can take different time to run the
mapping function. Therefore the order of datapoints won't be
preserved, and datapoints from one pass of `df.__iter__()` might get
mixed with datapoints from the next pass.
You can use **strict mode**, where `MultiThreadMapData.__iter__()`
is guaranteed to produce the exact set which `df.__iter__()`
produces. Although the order of data still isn't preserved.
The behavior of strict mode is undefined if the dataflow is infinite.
"""
class _Worker(StoppableThread):
def __init__(self, inq, outq, evt, map_func):
super(MultiThreadMapData._Worker, self).__init__(evt)
self.inq = inq
self.outq = outq
self.func = map_func
self.daemon = True
def run(self):
try:
while True:
dp = self.queue_get_stoppable(self.inq)
if self.stopped():
return
# cannot ignore None here. will lead to unsynced send/recv
obj = self.func(dp)
self.queue_put_stoppable(self.outq, obj)
except Exception:
if self.stopped():
pass # skip duplicated error messages
else:
raise
finally:
self.stop()
def __init__(self, ds, nr_thread, map_func, buffer_size=200, strict=False):
"""
Args:
ds (DataFlow): the dataflow to map
nr_thread (int): number of threads to use
map_func (callable): datapoint -> datapoint | None
buffer_size (int): number of datapoints in the buffer
strict (bool): use "strict mode", see notes above.
"""
super(MultiThreadMapData, self).__init__(ds, buffer_size, strict)
self._strict = strict
self.nr_thread = nr_thread
self.map_func = map_func
self._threads = []
self._evt = None
def reset_state(self):
super(MultiThreadMapData, self).reset_state()
if self._threads:
self._threads[0].stop()
for t in self._threads:
t.join()
self._in_queue = queue.Queue()
self._out_queue = queue.Queue()
self._evt = threading.Event()
self._threads = [MultiThreadMapData._Worker(
self._in_queue, self._out_queue, self._evt, self.map_func)
for _ in range(self.nr_thread)]
for t in self._threads:
t.start()
self._guard = DataFlowReentrantGuard()
# Call once at the beginning, to ensure inq+outq has a total of buffer_size elements
self._fill_buffer()
def _recv(self):
return self._out_queue.get()
def _send(self, dp):
self._in_queue.put(dp)
def __iter__(self):
with self._guard:
for dp in super(MultiThreadMapData, self).__iter__():
yield dp
def __del__(self):
if self._evt is not None:
self._evt.set()
for p in self._threads:
p.stop()
p.join(timeout=5.0)
# if p.is_alive():
# logger.warn("Cannot join thread {}.".format(p.name))
# TODO deprecated
ThreadedMapData = MultiThreadMapData
class MultiProcessMapDataZMQ(_ParallelMapData, _MultiProcessZMQDataFlow):
"""
Same as :class:`MapData`, but start processes to run the mapping function,
and communicate with ZeroMQ pipe.
Note:
1. Processes run in parallel and can take different time to run the
mapping function. Therefore the order of datapoints won't be
preserved, and datapoints from one pass of `df.__iter__()` might get
mixed with datapoints from the next pass.
You can use **strict mode**, where `MultiProcessMapData.__iter__()`
is guaranteed to produce the exact set which `df.__iter__()`
produces. Although the order of data still isn't preserved.
The behavior of strict mode is undefined if the dataflow is infinite.
"""
class _Worker(mp.Process):
def __init__(self, identity, map_func, pipename, hwm):
super(MultiProcessMapDataZMQ._Worker, self).__init__()
self.identity = identity
self.map_func = map_func
self.pipename = pipename
self.hwm = hwm
def run(self):
enable_death_signal(_warn=self.identity == b'0')
ctx = zmq.Context()
socket = ctx.socket(zmq.REP)
socket.setsockopt(zmq.IDENTITY, self.identity)
socket.set_hwm(self.hwm)
socket.connect(self.pipename)
while True:
dp = loads(socket.recv(copy=False))
dp = self.map_func(dp)
socket.send(dumps(dp), copy=False)
def __init__(self, ds, nr_proc, map_func, buffer_size=200, strict=False):
"""
Args:
ds (DataFlow): the dataflow to map
nr_proc(int): number of threads to use
map_func (callable): datapoint -> datapoint | None
buffer_size (int): number of datapoints in the buffer
strict (bool): use "strict mode", see notes above.
"""
_ParallelMapData.__init__(self, ds, buffer_size, strict)
_MultiProcessZMQDataFlow.__init__(self)
self.nr_proc = nr_proc
self.map_func = map_func
self._strict = strict
self._procs = []
self._guard = DataFlowReentrantGuard()
def reset_state(self):
_MultiProcessZMQDataFlow.reset_state(self)
_ParallelMapData.reset_state(self)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.DEALER)
self.socket.set_hwm(self._buffer_size * 2)
pipename = _get_pipe_name('dataflow-map')
_bind_guard(self.socket, pipename)
self._proc_ids = [u'{}'.format(k).encode('utf-8') for k in range(self.nr_proc)]
worker_hwm = int(self._buffer_size * 2 // self.nr_proc)
self._procs = [MultiProcessMapDataZMQ._Worker(
self._proc_ids[k], self.map_func, pipename, worker_hwm)
for k in range(self.nr_proc)]
self._start_processes()
self._fill_buffer() # pre-fill the bufer
def _send(self, dp):
msg = [b"", dumps(dp)]
self.socket.send_multipart(msg, copy=False)
def _recv(self):
msg = self.socket.recv_multipart(copy=False)
dp = loads(msg[1])
return dp
def __iter__(self):
with self._guard, _zmq_catch_error('MultiProcessMapData'):
for dp in super(MultiProcessMapDataZMQ, self).__iter__():
yield dp
MultiProcessMapData = MultiProcessMapDataZMQ # alias
def _pool_map(data):
global SHARED_ARR, WORKER_ID, MAP_FUNC
res = MAP_FUNC(data)
if res is None:
return None
shared =
|
np.reshape(SHARED_ARR, res.shape)
|
numpy.reshape
|
# -*- coding: utf-8 -*-
"""
Solve the inverse problem of the coupled ODE system
#dS/dt = - BetaI(t)*I/N * S
#S = N-E-I-J-D-H-R
dI/dt = BetaI(t)*I/N * S - gamma * I
dH/dt = (p*gamma) * I - (q*phiD) * H - ((1-q)*phiR) * H
dD/dt = (q*phiD) * H
dR/dt = ((1-p)*gamma) * I + ((1-q)*phiR) * H
dI_sum/dt = BetaI(t)*I/N * S
dH_sum/dt = (p*gamma) * I
Here the parameters BetaI, p, and q are inferred as time-dependent function
gamma, phiD, and phiR are given
"""
import sys
sys.path.insert(0, '../../Utilities/')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas
import math
import tensorflow as tf
import numpy as np
from numpy import *
# from numpy import matlib as mb
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.io
from scipy.interpolate import griddata
import time
from itertools import product, combinations
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#from plotting import newfig, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import datetime
from pyDOE import lhs
# from scipy.special import gamma
from scipy.special import jacobi
start_time = time.time()
#np.random.seed(1234)
#tf.set_random_seed(1234)
# tf.random.set_seed(1234)
#%%
class PhysicsInformedNN:
#Initialize the class
def __init__(self, t_train, I_new_train, D_new_train, H_new_train,
I_sum_train, D_sum_train, H_sum_train, U0, t_f,
S_SIHDR, I_SIHDR, H_SIHDR, D_SIHDR, R_SIHDR,
lb, ub, N, layers, layers_kappa, layers_BetaI, layers_p, layers_q):
self.N = N
#Data for training
self.t_train = t_train
self.I_new_train = I_new_train
self.D_new_train = D_new_train
self.H_new_train = H_new_train
self.I_sum_train = I_sum_train
self.D_sum_train = D_sum_train
self.H_sum_train = H_sum_train
self.S_cur_train = S_SIHDR
self.I_cur_train = I_SIHDR
self.H_cur_train = H_SIHDR
self.D_cur_train = D_SIHDR
self.R_cur_train = R_SIHDR
self.I0_new = U0[0]
self.D0_new = U0[1]
self.H0_new = U0[2]
self.I0_sum = U0[3]
self.D0_sum = U0[4]
self.H0_sum = U0[5]
self.S0 = U0[6]
self.I0 = U0[7]
self.H0 = U0[8]
self.D0 = U0[9]
self.R0 = U0[10]
self.t_f = t_f
#Time division s
self.M = len(t_f)-1
self.tau = t_f[1]-t_f[0]
#Bounds
self.lb = lb
self.ub = ub
# initialize NN
self.weights, self.biases = self.initialize_NN(layers)
self.weights_kappa, self.biases_kappa = self.initialize_NN(layers_kappa)
self.weights_BetaI, self.biases_BetaI = self.initialize_NN(layers_BetaI)
self.weights_p, self.biases_p = self.initialize_NN(layers_p)
self.weights_q, self.biases_q = self.initialize_NN(layers_q)
# self.weights_kappa1, self.biases_kappa1 = self.initialize_NN(layers_kappa)
# self.weights_kappa2, self.biases_kappa2 = self.initialize_NN(layers_kappa)
# self.weights_kappa3, self.biases_kappa3 = self.initialize_NN(layers_kappa)
# self.weights_kappa4, self.biases_kappa4 = self.initialize_NN(layers_kappa)
# self.weights_kappa5, self.biases_kappa5 = self.initialize_NN(layers_kappa)
self.Kappa1_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa2_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa3_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa4_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
self.Kappa5_COEF = tf.Variable(tf.zeros([poly_order,1], dtype=tf.float64) , dtype=tf.float64, trainable=True)
#Fixed parameters
self.N = N
self.gamma = tf.Variable(1.0/6.0,dtype=tf.float64,trainable=False)
self.phiD = tf.Variable(1.0/15.0,dtype=tf.float64,trainable=False)
self.phiR = tf.Variable(1.0/7.5,dtype=tf.float64,trainable=False)
# self.phiD = tf.Variable(1.0/17.0,dtype=tf.float64,trainable=False)
# self.phiR = tf.Variable(1.0/4.0,dtype=tf.float64,trainable=False)
#Fitted parameters
# bound_p = [0, 1]
# bound_q = [0, 1]
# self.p = bound_p[0]+(bound_p[1]-bound_p[0])*tf.sigmoid(tf.Variable(0.0,dtype=tf.float64, trainable=True))
# self.q = bound_q[0]+(bound_q[1]-bound_q[0])*tf.sigmoid(tf.Variable(0.0,dtype=tf.float64, trainable=True))
#tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.saver = tf.train.Saver()
# placeholders for inputs
self.t_u = tf.placeholder(tf.float64, shape=[None, self.t_train.shape[1]])
self.I_new_u = tf.placeholder(tf.float64, shape=[None, self.I_new_train.shape[1]])
self.D_new_u = tf.placeholder(tf.float64, shape=[None, self.D_new_train.shape[1]])
self.H_new_u = tf.placeholder(tf.float64, shape=[None, self.H_new_train.shape[1]])
self.I_sum_u = tf.placeholder(tf.float64, shape=[None, self.I_sum_train.shape[1]])
self.D_sum_u = tf.placeholder(tf.float64, shape=[None, self.D_sum_train.shape[1]])
self.H_sum_u = tf.placeholder(tf.float64, shape=[None, self.H_sum_train.shape[1]])
self.I0_new_u = tf.placeholder(tf.float64, shape=[None, self.I0_new.shape[1]])
self.D0_new_u = tf.placeholder(tf.float64, shape=[None, self.D0_new.shape[1]])
self.H0_new_u = tf.placeholder(tf.float64, shape=[None, self.H0_new.shape[1]])
self.I0_sum_u = tf.placeholder(tf.float64, shape=[None, self.I0_sum.shape[1]])
self.D0_sum_u = tf.placeholder(tf.float64, shape=[None, self.D0_sum.shape[1]])
self.H0_sum_u = tf.placeholder(tf.float64, shape=[None, self.H0_sum.shape[1]])
self.t_tf = tf.placeholder(tf.float64, shape=[None, self.t_f.shape[1]])
# physics informed neural networks
self.S_pred, self.I_pred, self.H_pred, self.D_pred, self.R_pred, self.I_sum_pred, self.H_sum_pred = self.net_u(self.t_u)
self.D_sum_pred = self.D_pred
# self.Kappa_pred1 = self.net_Kappa1(self.t_u)
# self.Kappa_pred2 = self.net_Kappa2(self.t_u)
# self.Kappa_pred3 = self.net_Kappa3(self.t_u)
# self.Kappa_pred4 = self.net_Kappa4(self.t_u)
# self.Kappa_pred5 = self.net_Kappa5(self.t_u)
self.Kappa_pred1 = self.net_Kappa1_plot()
self.Kappa_pred2 = self.net_Kappa2_plot()
self.Kappa_pred3 = self.net_Kappa3_plot()
self.Kappa_pred4 = self.net_Kappa4_plot()
self.Kappa_pred5 = self.net_Kappa5_plot()
self.BetaI_pred = self.net_BetaI(self.t_u)
self.p_pred = self.net_p (self.t_u)
self.q_pred = self.net_q (self.t_u)
self.I_new_pred = self.I_sum_pred[1:,:] - self.I_sum_pred[:-1,:]
self.D_new_pred = self.D_sum_pred[1:,:] - self.D_sum_pred[:-1,:]
self.H_new_pred = self.H_sum_pred[1:,:] - self.H_sum_pred[:-1,:]
self.I0_new_pred = self.I_new_pred[0]
self.D0_new_pred = self.D_new_pred[0]
self.H0_new_pred = self.H_new_pred[0]
self.I0_sum_pred = self.I_sum_pred[0]
self.D0_sum_pred = self.D_sum_pred[0]
self.H0_sum_pred = self.H_sum_pred[0]
self.S0_pred = self.S_pred[0]
self.I0_pred = self.I_pred[0]
self.D0_pred = self.D_pred[0]
self.H0_pred = self.H_pred[0]
self.R0_pred = self.R_pred[0]
self.S_f, self.I_f, self.H_f, self.D_f, self.R_f, self.I_sum_f, self.H_sum_f, self.R_con = self.net_f(self.t_tf)
# loss
self.lossU0 = 100*tf.reduce_mean(tf.square(self.I0_new_u - self.I0_new_pred)) + \
100*tf.reduce_mean(tf.square(self.D0_new_u - self.D0_new_pred)) + \
100*tf.reduce_mean(tf.square(self.H0_new_u - self.H0_new_pred)) + \
tf.reduce_mean(tf.square(self.I0_sum_u - self.I0_sum_pred)) + \
tf.reduce_mean(tf.square(self.D0_sum_u - self.D0_sum_pred)) + \
tf.reduce_mean(tf.square(self.H0_sum_u - self.H0_sum_pred)) + \
tf.reduce_mean(tf.square(self.S0 - self.S0_pred)) + \
tf.reduce_mean(tf.square(self.I0 - self.I0_pred)) + \
tf.reduce_mean(tf.square(self.D0 - self.D0_pred)) + \
tf.reduce_mean(tf.square(self.H0 - self.H0_pred)) + \
tf.reduce_mean(tf.square(self.R0 - self.R0_pred))
self.lossU = 300*tf.reduce_mean(tf.square(self.I_new_u[:-1,:] - self.I_new_pred)) + \
300*tf.reduce_mean(tf.square(self.D_new_u[:-1,:] - self.D_new_pred)) + \
300*tf.reduce_mean(tf.square(self.H_new_u[:-1,:] - self.H_new_pred)) + \
tf.reduce_mean(tf.square(self.I_sum_u - self.I_sum_pred)) + \
tf.reduce_mean(tf.square(self.D_sum_u - self.D_sum_pred)) + \
tf.reduce_mean(tf.square(self.H_sum_u - self.H_sum_pred)) + \
tf.reduce_mean(tf.square(self.I_pred - self.I_cur_train)) + \
tf.reduce_mean(tf.square(self.H_pred - self.H_cur_train))
# tf.reduce_mean(tf.square(self.S_pred - self.S_cur_train)) + \
# tf.reduce_mean(tf.square(self.D_pred - self.D_cur_train)) + \
# tf.reduce_mean(tf.square(self.R_pred - self.R_cur_train))
self.lossF =tf.reduce_mean(tf.square(self.S_f))\
+ tf.reduce_mean(tf.square(self.I_f))\
+ tf.reduce_mean(tf.square(self.H_f)) \
+ tf.reduce_mean(tf.square(self.D_f)) \
+ tf.reduce_mean(tf.square(self.R_f)) \
+ tf.reduce_mean(tf.square(self.I_sum_f)) \
+ tf.reduce_mean(tf.square(self.H_sum_f)) \
+ tf.reduce_mean(tf.square(self.R_con))
self.loss = 10*self.lossU0 + 150*self.lossU + self.lossF
#Optimizer
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
init = tf.global_variables_initializer()
self.sess.run(init)
#Initialize the nueral network
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]]) #weights for the current layer
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float64), dtype=tf.float64) #biases for the current layer
weights.append(W) #save the elements in W to weights (a row vector)
biases.append(b) #save the elements in b to biases (a 1Xsum(layers) row vector)
return weights, biases
#generating weights
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev, dtype=tf.float64), dtype=tf.float64)
#Architecture of the neural network
def neural_net(self, t, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(t-self.lb)/(self.ub-self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net_BetaI(self, t):
BetaI = self.neural_net(t, self.weights_BetaI, self.biases_BetaI)
bound_b = [tf.constant(0.0, dtype=tf.float64), tf.constant(1.0, dtype=tf.float64)]
return bound_b[0]+(bound_b[1]-bound_b[0])*tf.sigmoid(BetaI)
def net_p(self, t):
# p = self.neural_net(t, self.weights_p, self.biases_p)
# return tf.sigmoid(p)
p = 0.5*(1-tf.tanh(t-50.0))*(0.5-0.1)+0.1
return p
def net_q(self, t):
# q = self.neural_net(t, self.weights_q, self.biases_q)
# return tf.sigmoid(q)
q = 0.5*(1-tf.tanh(t-150.0))*(0.5-0.2)+0.2
return q
def net_u(self, t):
SIHDR = self.neural_net(t, self.weights, self.biases)
#SIHDR = SIHDR**2
S = SIHDR[:,0:1]
I = SIHDR[:,1:2]
H = SIHDR[:,2:3]
D = SIHDR[:,3:4]
R = SIHDR[:,4:5]
I_sum = SIHDR[:,5:6]
H_sum = SIHDR[:,6:7]
return S, I, H, D, R, I_sum, H_sum
#Temporal fractional order in (0,1)
# def net_Kappa(self,t):
# Kappa = self.neural_net(t, self.weights_kappa, self.biases_kappa)
# return 1e-5+tf.sigmoid(Kappa)*(1-1e-5)
# # return tf.exp(t*0.0)*0.99999
# def net_Kappa1(self,t):
# Kappa = self.neural_net(t, self.weights_kappa1, self.biases_kappa1)
# return 1e-5+tf.sigmoid(Kappa)*(1-1e-5)
# def net_Kappa2(self,t):
# Kappa = self.neural_net(t, self.weights_kappa2, self.biases_kappa2)
# return 1e-5+tf.sigmoid(Kappa)*(1-1e-5)
# def net_Kappa3(self,t):
# Kappa = self.neural_net(t, self.weights_kappa3, self.biases_kappa3)
# return 1e-5+tf.sigmoid(Kappa)*(1-1e-5)
# def net_Kappa4(self,t):
# Kappa = self.neural_net(t, self.weights_kappa4, self.biases_kappa4)
# return 1e-5+tf.sigmoid(Kappa)*(1-1e-5)
# def net_Kappa5(self,t):
# Kappa = self.neural_net(t, self.weights_kappa5, self.biases_kappa5)
# return 1e-5+tf.sigmoid(Kappa)*(1-1e-5)
def net_Kappa1(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa1_COEF)
return tf.sigmoid(Kappa)
def net_Kappa2(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa2_COEF)
return tf.sigmoid(Kappa)
def net_Kappa3(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa3_COEF)
return tf.sigmoid(Kappa)
def net_Kappa4(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa4_COEF)
return tf.sigmoid(Kappa)
def net_Kappa5(self):
polys = tf.constant(np.transpose(Jacobi_polys[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa5_COEF)
return tf.sigmoid(Kappa)
def net_Kappa1_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa1_COEF)
return tf.sigmoid(Kappa)
def net_Kappa2_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa2_COEF)
return tf.sigmoid(Kappa)
def net_Kappa3_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa3_COEF)
return tf.sigmoid(Kappa)
def net_Kappa4_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa4_COEF)
return tf.sigmoid(Kappa)
def net_Kappa5_plot(self):
polys = tf.constant(np.transpose(Jacobi_polys_plots[:poly_order,:]), dtype=tf.float64)
Kappa = tf.matmul(polys, self.Kappa5_COEF)
return tf.sigmoid(Kappa)
#fractional differential coefficients for the L1 approximation
def FDM1(self, Kappa):
m = self.M #int
Tau = self.tau #array
kappa_vec = tf.reshape(Kappa, [m+1,1]) #tnsor
kappa_mat = tf.tile(kappa_vec, [1, m-1])
idx = np.tril_indices(m+1, k=-1)
Temp1 = np.zeros([m+1,m+1])
Temp1[idx] = idx[0]-idx[1]
Temp1 = np.tril(Temp1, k=-2) #(m+1,m+1) numpy array
Temp1 = tf.constant(Temp1, dtype = tf.float64) #(m+1,m+1) tensor
Temp2 = -np.eye(m+1)
Temp2[idx] = idx[0]-idx[1]-1
Temp2 = np.tril(Temp2, k=-2)
Temp2 = tf.constant(Temp2, dtype = tf.float64)
Temp3 = -2*np.eye(m+1)
Temp3[idx] = idx[0]-idx[1]-2
Temp3 = np.tril(Temp3, k=-2)
Temp3 = tf.constant(Temp3, dtype = tf.float64)
A = np.concatenate((np.zeros((1,m)), np.eye(m)), axis=0, out=None)
A = tf.constant(A[:,0:m-1], dtype = tf.float64)
Temp = tf.pow(Temp1[:,0:m-1],1.0-kappa_mat) -\
2*tf.pow(Temp2[:,0:m-1],1.0-kappa_mat) +\
tf.pow(Temp3[:,0:m-1],1.0-kappa_mat) + A
L_Temp1 = tf.constant(np.arange(m), dtype = tf.float64) #np.arange(m)
L_Temp1 = tf.pow(tf.reshape(L_Temp1, [m,1]), 1.0-kappa_vec[1:m+1, 0:1])
L_Temp2 = tf.constant(np.arange(m)+1, dtype = tf.float64) #np.arange(m) + 1
L_Temp2 = tf.pow(tf.reshape(L_Temp2, [m,1]), 1.0-kappa_vec[1:m+1, 0:1])
L_Temp = tf.concat((tf.zeros((1,1), dtype = tf.float64), L_Temp1-L_Temp2), axis=0)
R_Temp = tf.concat((tf.zeros((m,1), dtype = tf.float64), tf.ones((1,1), dtype = tf.float64)), axis=0)
coeff_mat = tf.concat((L_Temp, Temp, R_Temp), axis=1)
c = tf.tile(tf.math.divide(tf.pow(Tau, -kappa_vec), tf.exp(tf.lgamma(2-kappa_vec))), tf.constant([1, m+1], dtype = tf.int32))
coeff_mat = tf.multiply(c, coeff_mat)
return coeff_mat
def net_f(self, t):
#load fixed parameters
gamma = self.gamma
phiD = self.phiD
phiR = self.phiR
#load time-dependent parameters
betaI = 0.25 #self.net_BetaI(t)
p = 0.2 #self.net_p(t)
q = 0.3 #self.net_q(t)
#Obtain SIHDR from Neural network
S, I, H, D, R, I_sum, H_sum = self.net_u(t)
#Time derivatives
#Fractional differential matrix
Kappa1 = self.net_Kappa1()
Kappa2 = self.net_Kappa2()
Kappa3 = self.net_Kappa3()
Kappa4 = self.net_Kappa4()
Kappa5 = self.net_Kappa5()
DiffMat1 = self.FDM1(Kappa1)
DiffMat2 = self.FDM1(Kappa2)
DiffMat3 = self.FDM1(Kappa3)
DiffMat4 = self.FDM1(Kappa4)
DiffMat5 = self.FDM1(Kappa5)
#fractional time derivatives
# DM = self.DM
S_t = tf.matmul(DiffMat1, S)
I_t = tf.matmul(DiffMat2, I)
H_t = tf.matmul(DiffMat3, H)
D_t = tf.matmul(DiffMat4, D)
R_t = tf.matmul(DiffMat5, R)
I_sum_t = tf.matmul(DiffMat2, I_sum)
H_sum_t = tf.matmul(DiffMat3, H_sum)
T = tf.constant(7.0, dtype = tf.float64)
# T = tf.constant(len(self.t_train), dtype = tf.float64)
## fractional derivative
S_t = tf.pow(T, Kappa1-1)*S_t/tf.exp(tf.lgamma(1.0+Kappa1))
I_t = tf.pow(T, Kappa2-1)*I_t/tf.exp(tf.lgamma(1.0+Kappa2))
H_t = tf.pow(T, Kappa3-1)*H_t/tf.exp(tf.lgamma(1.0+Kappa3))
D_t = tf.pow(T, Kappa4-1)*D_t/tf.exp(tf.lgamma(1.0+Kappa4))
R_t = tf.pow(T, Kappa5-1)*R_t/tf.exp(tf.lgamma(1.0+Kappa5))
I_sum_t = tf.pow(T, Kappa2-1)*I_sum_t/tf.exp(tf.lgamma(1.0+Kappa2))
H_sum_t = tf.pow(T, Kappa3-1)*H_sum_t/tf.exp(tf.lgamma(1.0+Kappa3))
## integer order derivative
# S_t = tf.gradients(S, t, unconnected_gradients='zero')[0]
# I_t = tf.gradients(I, t, unconnected_gradients='zero')[0]
# H_t = tf.gradients(H, t, unconnected_gradients='zero')[0]
# D_t = tf.gradients(D, t, unconnected_gradients='zero')[0]
# R_t = tf.gradients(R, t, unconnected_gradients='zero')[0]
# I_sum_t = tf.gradients(I_sum, t, unconnected_gradients='zero')[0]
# H_sum_t = tf.gradients(H_sum, t, unconnected_gradients='zero')[0]
#Residuals
f_S = S_t + betaI*I/self.N * S
f_I = I_t - betaI*I/self.N * S + gamma * I
f_H = H_t - (p*gamma) * I + (q*phiD) * H + ((1-q)*phiR) * H
f_D = D_t - (q*phiD)*H
f_R = R_t - ((1-p)*gamma) * I - ((1-q)*phiR) * H
f_I_sum = I_sum_t - betaI*I/self.N * S
f_H_sum = H_sum_t - (p*gamma)*I
f_con = S + I + H + D + R - self.N
return f_S[1:,:], f_I[1:,:], f_H[1:,:], f_D[1:,:], f_R[1:,:], f_I_sum[1:,:], f_H_sum[1:,:], f_con[1:,:]
def callback(self, loss, lossU0, lossU, lossF):
total_records_LBFGS.append(np.array([loss, lossU0, lossU, lossF]))
print('Loss: %.3e, LossU0: %.3e, LossU: %.3e, LossF: %.3e' % (loss, lossU0, lossU, lossF))
def train(self, nIter):
tf_dict = {self.t_u: self.t_train, self.t_tf: self.t_f,
self.I_new_u: self.I_new_train, self.D_new_u: self.D_new_train, self.H_new_u: self.H_new_train,
self.I_sum_u: self.I_sum_train, self.H_sum_u: self.H_sum_train, self.D_sum_u: self.D_sum_train,
self.I0_new_u: self.I0_new, self.D0_new_u: self.D0_new, self.H0_new_u: self.H0_new,
self.I0_sum_u: self.I0_sum, self.D0_sum_u: self.D0_sum, self.H0_sum_u: self.H0_sum}
start_time = time.time()
for it in range(nIter+1):
self.sess.run(self.train_op_Adam, tf_dict)
# Print
if it % 100 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.loss, tf_dict)
lossU0_value = self.sess.run(self.lossU0, tf_dict)
lossU_value = self.sess.run(self.lossU, tf_dict)
lossF_value = self.sess.run(self.lossF, tf_dict)
Kappa1_records.append(self.sess.run(self.Kappa_pred1))
Kappa2_records.append(self.sess.run(self.Kappa_pred2))
Kappa3_records.append(self.sess.run(self.Kappa_pred3))
Kappa4_records.append(self.sess.run(self.Kappa_pred4))
Kappa5_records.append(self.sess.run(self.Kappa_pred5))
total_records.append(np.array([it, loss_value, lossU0_value, lossU_value, lossF_value]))
print('It: %d, Loss: %.3e, LossU0: %.3e, LossU: %.3e, LossF: %.3e, Time: %.2f' %
(it, loss_value, lossU0_value, lossU_value, lossF_value, elapsed))
start_time = time.time()
if LBFGS:
self.optimizer.minimize(self.sess,
feed_dict = tf_dict, #Inputs of the minimize operator
fetches = [self.loss, self.lossU0, self.lossU, self.lossF],
loss_callback = self.callback) #Show the results of minimize operator
def predict(self, t_star):
tf_dict = {self.t_u: t_star, self.I0_new_u: self.I0_new, self.D0_new_u: self.D0_new, self.H0_new_u: self.H0_new}
S = self. sess.run(self.S_pred, tf_dict)
I = self. sess.run(self.I_pred, tf_dict)
H = self. sess.run(self.H_pred, tf_dict)
D = self. sess.run(self.D_pred, tf_dict)
R = self. sess.run(self.R_pred, tf_dict)
I_sum = self. sess.run(self.I_sum_pred, tf_dict)
H_sum = self. sess.run(self.H_sum_pred, tf_dict)
D_sum = self. sess.run(self.D_sum_pred, tf_dict)
I_new = self. sess.run(self.I_new_pred, tf_dict)
D_new = self. sess.run(self.D_new_pred, tf_dict)
H_new = self. sess.run(self.H_new_pred, tf_dict)
Kappa1 = self.sess.run(self.Kappa_pred1, tf_dict)
Kappa2 = self.sess.run(self.Kappa_pred2, tf_dict)
Kappa3 = self.sess.run(self.Kappa_pred3, tf_dict)
Kappa4 = self.sess.run(self.Kappa_pred4, tf_dict)
Kappa5 = self.sess.run(self.Kappa_pred5, tf_dict)
BetaI = self.sess.run(self.BetaI_pred, tf_dict)
p = self.sess.run(self.p_pred, tf_dict)
q = self.sess.run(self.q_pred, tf_dict)
return S, I, H, D, R, I_new, H_new, D_new, I_sum, H_sum, D_sum, \
Kappa1, Kappa2, Kappa3, Kappa4, Kappa5, BetaI, p, q
############################################################
#%%
if __name__=="__main__":
#Architecture of of the NN
layers=[1] + 6*[30] + [7]
layers_kappa=[1] + 2*[15] + [1]
layers_BetaI=[1] + 2*[15] + [1]
layers_p=[1] + 2*[15] + [1]
layers_q=[1] + 2*[15] + [1]
current_directory = os.getcwd()
relative_path_loadSIR = '/../Model-SIHDR-DATA/'
read_results_to = current_directory + relative_path_loadSIR
S_SIHDR = np.loadtxt(read_results_to + 'S_pred_mean.txt')[:,None]
I_SIHDR = np.loadtxt(read_results_to + 'I_pred_mean.txt')[:,None]
H_SIHDR = np.loadtxt(read_results_to + 'H_pred_mean.txt')[:,None]
D_SIHDR = np.loadtxt(read_results_to + 'D_pred_mean.txt')[:,None]
R_SIHDR = np.loadtxt(read_results_to + 'R_pred_mean.txt')[:,None]
S_SIHDR = S_SIHDR/1e-4
I_SIHDR = I_SIHDR/1e-4
H_SIHDR = H_SIHDR/1e-4
D_SIHDR = D_SIHDR/1e-4
R_SIHDR = R_SIHDR/1e-4
t_star = np.arange(len(I_SIHDR))
t_star = t_star[:,None]
N = 8.399e6
# I0 = I_SIHDR[0:1, :]
# H0 = H_SIHDR[0:1, :]
# D0 = D_SIHDR[0:1, :]
# R0 = R_SIHDR[0:1, :]
# S0 = N - I0 - R0
# S0 = S0.reshape([1,1])
sf = 1e-6
N = N*sf
S_SIHDR = S_SIHDR * sf
I_SIHDR = I_SIHDR * sf
H_SIHDR = H_SIHDR * sf
D_SIHDR = D_SIHDR * sf
R_SIHDR = R_SIHDR * sf
# S0 = S0 *sf
# I0 = I0 * sf
# H0 = H0 * sf
# D0 = D0 * sf
# R0 = R0 * sf
#Load data
data_frame = pandas.read_csv('../Data/data-by-day.csv')
I_new_star = data_frame['CASE_COUNT'] #T x 1 array
H_new_star = data_frame['HOSPITALIZED_COUNT'] #T x 1 array
D_new_star = data_frame['DEATH_COUNT'] #T x 1 array
#7 days average
I_new_star = I_new_star.rolling(window=7).mean()
H_new_star = H_new_star.rolling(window=7).mean()
D_new_star = D_new_star.rolling(window=7).mean()
I_new_star = I_new_star.to_numpy(dtype=np.float64)
H_new_star = H_new_star.to_numpy(dtype=np.float64)
D_new_star = D_new_star.to_numpy(dtype=np.float64)
I_new_star = I_new_star[6:]
H_new_star = H_new_star[6:]
D_new_star = D_new_star[6:]
I_new_star = I_new_star.reshape([len(I_new_star), 1])
H_new_star = H_new_star.reshape([len(H_new_star), 1])
D_new_star = D_new_star.reshape([len(D_new_star), 1])
I_sum_star = np.cumsum(I_new_star)
H_sum_star = np.cumsum(H_new_star)
D_sum_star = np.cumsum(D_new_star)
I_sum_star = I_sum_star.reshape([len(I_sum_star), 1])
H_sum_star = H_sum_star.reshape([len(H_sum_star), 1])
D_sum_star = D_sum_star.reshape([len(D_sum_star), 1])
#t_star = np.arange(len(I_new_star))
#t_star = t_star.reshape([len(t_star),1])
#N = 8.399e6
#Scaling
#sf = 1e-5
#N = N * sf
I_new_star = I_new_star * sf
D_new_star = D_new_star * sf
H_new_star = H_new_star * sf
I_sum_star = I_sum_star * sf
H_sum_star = H_sum_star * sf
D_sum_star = D_sum_star * sf
#lower and upper bounds
lb = t_star.min(0)
ub = t_star.max(0)
#Initial conditions
I0_new = I_new_star[0:1,:]
D0_new = D_new_star[0:1,:]
H0_new = H_new_star[0:1,:]
I0_sum = I_sum_star[0:1,:]
D0_sum = D_sum_star[0:1,:]
H0_sum = H_sum_star[0:1,:]
#Initial conditions
I0 = I_sum_star[0:1,:]
D0 = D_sum_star[0:1,:]
H0 = np.array([[0.0]])
R0 = np.array([[0.0]])
S0 = N - I0 - D0 - H0 - R0
S0 = S0.reshape([1,1])
U0 = [I0_new, D0_new, H0_new, I0_sum, D0_sum, H0_sum, S0, I0, H0, D0, R0]
#Residual points
N_f = 3000 #5 * len(t_star)
t_f = np.linspace(lb, ub, num = N_f) #uniformed grid for evaluating fractional derivative
poly_order = 10
t_f_mapped = -1 + 2/(ub-lb) * (t_f - lb)
t_star_mapped = -1 + 2/(ub-lb) * (t_star - lb)
Jacobi_polys = np.asarray([ jacobi(n,0,0)(t_f_mapped.flatten()) for n in range(0, 15)])
Jacobi_polys_plots = np.asarray([ jacobi(n,0,0)(t_star_mapped.flatten()) for n in range(0, 15)])
#%%
######################################################################
######################## Training and Predicting ###############################
######################################################################
# t_train = (t_star-lb)/(ub-lb)
t_train = t_star
I_new_train = I_new_star
D_new_train = D_new_star
H_new_train = H_new_star
I_sum_train = I_sum_star
D_sum_train = D_sum_star
H_sum_train = H_sum_star
from datetime import datetime
now = datetime.now()
# dt_string = now.strftime("%m-%d-%H-%M")
dt_string = now.strftime("%m-%d")
#save results
current_directory = os.getcwd()
for j in range(10):
casenumber = 'set' + str(j+1)
relative_path_results = '/Model5-Dev/Train-Results-'+dt_string+'-'+casenumber+'/'
save_results_to = current_directory + relative_path_results
if not os.path.exists(save_results_to):
os.makedirs(save_results_to)
relative_path = '/Model5-Dev/Train-model-'+dt_string+'-'+casenumber+'/'
save_models_to = current_directory + relative_path
if not os.path.exists(save_models_to):
os.makedirs(save_models_to)
break
#Training
total_records = []
Kappa1_records = []
Kappa2_records = []
Kappa3_records = []
Kappa4_records = []
Kappa5_records = []
total_records_LBFGS = []
model = PhysicsInformedNN(t_train, I_new_train, D_new_train, H_new_train,
I_sum_train, D_sum_train, H_sum_train, U0, t_f,
S_SIHDR, I_SIHDR, H_SIHDR, D_SIHDR, R_SIHDR,
lb, ub, N, layers, layers_kappa, layers_BetaI, layers_p, layers_q)
LBFGS = True
# LBFGS = False
model.train(10000) #Training with n iterations
model.saver.save(model.sess, save_models_to+"model.ckpt")
#Predicting
S, I, H, D, R, I_new, H_new, D_new, I_sum, H_sum, D_sum, \
Kappa1, Kappa2, Kappa3, Kappa4, Kappa5, BetaI, p, q = model.predict(t_star)
import datetime
end_time = time.time()
print(datetime.timedelta(seconds=int(end_time-start_time)))
#Calculate RC
Rc = BetaI /(1.0/6.0)
#%%
#save data
np.savetxt(save_results_to + 'S.txt', S.reshape((-1,1)))
np.savetxt(save_results_to + 'I.txt', I.reshape((-1,1)))
np.savetxt(save_results_to + 'D.txt', D.reshape((-1,1)))
np.savetxt(save_results_to + 'H.txt', H.reshape((-1,1)))
np.savetxt(save_results_to + 'R.txt', R.reshape((-1,1)))
np.savetxt(save_results_to + 'I_new.txt', I_new.reshape((-1,1)))
np.savetxt(save_results_to + 'D_new.txt', D_new.reshape((-1,1)))
np.savetxt(save_results_to + 'H_new.txt', H_new.reshape((-1,1)))
np.savetxt(save_results_to + 'I_sum.txt', I_sum.reshape((-1,1)))
np.savetxt(save_results_to + 'H_sum.txt', H_sum.reshape((-1,1)))
np.savetxt(save_results_to + 'D_sum.txt', D_sum.reshape((-1,1)))
# #save BetaI, Rc, and sigma
np.savetxt(save_results_to + 't_star.txt', t_star.reshape((-1,1)))
np.savetxt(save_results_to + 'Rc.txt', Rc.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa1.txt', Kappa1.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa2.txt', Kappa2.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa3.txt', Kappa3.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa4.txt', Kappa4.reshape((-1,1)))
np.savetxt(save_results_to + 'Kappa5.txt', Kappa5.reshape((-1,1)))
np.savetxt(save_results_to + 'BetaI.txt', BetaI.reshape((-1,1)))
np.savetxt(save_results_to + 'p.txt', p.reshape((-1,1)))
np.savetxt(save_results_to + 'q.txt', q.reshape((-1,1)))
#%%
#records for Adam
N_Iter = len(total_records)
iteration = np.asarray(total_records)[:,0]
loss_his = np.asarray(total_records)[:,1]
loss_his_u0 = np.asarray(total_records)[:,2]
loss_his_u = np.asarray(total_records)[:,3]
loss_his_f = np.asarray(total_records)[:,4]
#records for LBFGS
if LBFGS:
N_Iter_LBFGS = len(total_records_LBFGS)
iteration_LBFGS =
|
np.arange(N_Iter_LBFGS)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 13:34:49 2020
@author: lukepinkel
"""
import numba
import numpy as np
import scipy as sp
import scipy.special
SQRT2 = np.sqrt(2)
ROOT2PI = np.sqrt(2.0 * np.pi)
def poisson_logp(x, mu, logp=True):
p = sp.special.xlogy(x, mu) - sp.special.gammaln(x + 1) - mu
if logp==False:
p = np.exp(p)
return p
def log1p(x):
return np.log(1+x)
def norm_cdf(x, mean=0.0, sd=1.0):
z = (x - mean) / sd
p = (sp.special.erf(z/SQRT2) + 1.0) / 2.0
return p
def norm_pdf(x, mean=0.0, sd=1.0):
z = (x - mean) / sd
p = np.exp(-z**2 / 2.0) / (ROOT2PI * sd)
return p
def get_part(arr, sol, size, step, maximum, res):
if step==size:
res.append(sol.copy())
else:
sol[step] = 1
while sol[step]<=maximum:
get_part(arr, sol, size, step+1, maximum, res)
sol[step] += 1
get_part(arr, sol, size, step+1, maximum+1, res)
def partition_set(n):
size = n
arr =
|
np.arange(1, size+1)
|
numpy.arange
|
import os.path
import numpy as np
import psi4
from pyresponse.ao2mo import AO2MO
from pyresponse.core import AO2MOTransformationType, Hamiltonian, Program, Spin
from pyresponse.cphf import CPHF
from pyresponse.data import REFDIR
from pyresponse.operators import Operator
from pyresponse.psi4 import molecules
from pyresponse.psi4.utils import (
mocoeffs_from_psi4wfn,
moenergies_from_psi4wfn,
occupations_from_psi4wfn,
)
from pyresponse.solvers import ExactInv
np.set_printoptions(precision=8, linewidth=200, suppress=True)
datadir = REFDIR / "psi4numpy" / "water"
def test_geometric_hessian_rhf_outside_solver_psi4numpy():
psi4.core.set_output_file("output.dat", False)
mol = psi4.geometry(
"""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
"""
)
psi4.core.set_active_molecule(mol)
options = {
"BASIS": "STO-3G",
"SCF_TYPE": "PK",
"E_CONVERGENCE": 1e-10,
"D_CONVERGENCE": 1e-10,
}
psi4.set_options(options)
_, wfn = psi4.energy("SCF", return_wfn=True)
# Assuming C1 symmetry
occ = wfn.doccpi()[0]
nmo = wfn.nmo()
vir = nmo - occ
C = wfn.Ca_subset("AO", "ALL")
npC = np.asarray(C)
mints = psi4.core.MintsHelper(wfn.basisset())
H_ao = np.asarray(mints.ao_kinetic()) + np.asarray(mints.ao_potential())
# Update H, transform to MO basis
H = np.einsum("uj,vi,uv", npC, npC, H_ao)
# Integral generation from Psi4's MintsHelper
MO = np.asarray(mints.mo_eri(C, C, C, C))
# Physicist notation
MO = MO.swapaxes(1, 2)
F = H + 2.0 * np.einsum("pmqm->pq", MO[:, :occ, :, :occ])
F -= np.einsum("pmmq->pq", MO[:, :occ, :occ, :])
# Uncomment every `np.save` call to regenerate reference data.
# np.save(os.path.join(datadir, 'F.npy'), F)
F_ref = np.load(os.path.join(datadir, "F.npy"))
np.testing.assert_allclose(F, F_ref, rtol=0, atol=1.0e-10)
natoms = mol.natom()
cart = ["_X", "_Y", "_Z"]
oei_dict = {"S": "OVERLAP", "T": "KINETIC", "V": "POTENTIAL"}
deriv1_mat = {}
deriv1 = {}
# 1st Derivative of OEIs
for atom in range(natoms):
for key in oei_dict:
deriv1_mat[key + str(atom)] = mints.mo_oei_deriv1(oei_dict[key], atom, C, C)
for p in range(3):
map_key = key + str(atom) + cart[p]
deriv1[map_key] = np.asarray(deriv1_mat[key + str(atom)][p])
# np.save(os.path.join(datadir, f'{map_key}.npy'), deriv1[map_key])
deriv1_ref = np.load(os.path.join(datadir, f"{map_key}.npy"))
np.testing.assert_allclose(deriv1[map_key], deriv1_ref, rtol=0, atol=1.0e-10)
# 1st Derivative of TEIs
for atom in range(natoms):
string = "TEI" + str(atom)
deriv1_mat[string] = mints.mo_tei_deriv1(atom, C, C, C, C)
for p in range(3):
map_key = string + cart[p]
deriv1[map_key] = np.asarray(deriv1_mat[string][p])
# np.save(os.path.join(datadir, f'{map_key}.npy'), deriv1[map_key])
deriv1_ref = np.load(os.path.join(datadir, f"{map_key}.npy"))
np.testing.assert_allclose(deriv1[map_key], deriv1_ref, rtol=0, atol=1.0e-10)
Hes = {}
deriv2_mat = {}
deriv2 = {}
Hes["S"] = np.zeros((3 * natoms, 3 * natoms))
Hes["V"] =
|
np.zeros((3 * natoms, 3 * natoms))
|
numpy.zeros
|
import numpy as np
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_equal,
)
from pytest import raises
from menpo.math import as_matrix
from menpo.model import LinearVectorModel, PCAModel, PCAVectorModel
from menpo.shape import PointCloud
def test_linear_model_creation():
data = np.zeros((3, 120))
LinearVectorModel(data)
def test_linear_model_basics():
data = np.random.random((3, 120))
linear_model = LinearVectorModel(data)
assert linear_model.n_components == 3
assert linear_model.n_components == 3
assert linear_model.n_features == 120
def test_linear_model_project_vector():
data = np.zeros((3, 120))
data[0, 0] = 1
data[1, 1] = 1
data[2, 2] = 1
linear_model = LinearVectorModel(data)
sample = np.random.random(120)
weights = linear_model.project(sample)
assert_allclose(weights, sample[:3])
def test_linear_model_component():
data = np.random.random((3, 120))
linear_model = LinearVectorModel(data)
assert_equal(linear_model.component(2), data[2])
def test_linear_model_instance_vector():
data = np.zeros((3, 120))
data[0, 0] = 1
data[1, 1] = 1
data[2, 2] = 1
linear_model = LinearVectorModel(data)
weights = np.array([0.263, 7, 41.2])
projected = linear_model.instance(weights)
# only the first 3 features are non zero...
assert_allclose(projected[:3], weights)
# rest should be nil
assert_allclose(projected[3:], 0)
def test_pca_n_active_components():
samples = [np.random.randn(10) for _ in range(10)]
model = PCAVectorModel(samples)
# integer
model.n_active_components = 5
assert_equal(model.n_active_components, 5)
def test_pca_n_active_components_too_many():
samples = [np.random.randn(10) for _ in range(10)]
model = PCAVectorModel(samples)
# too many components
model.n_active_components = 100
assert_equal(model.n_active_components, 9)
# reset too smaller number of components
model.n_active_components = 5
assert_equal(model.n_active_components, 5)
# reset to too many components
model.n_active_components = 100
assert_equal(model.n_active_components, 9)
def test_pca_n_active_components_negative():
samples = [np.random.randn(10) for _ in range(10)]
model = PCAVectorModel(samples)
with raises(ValueError):
model.n_active_components = -5
def test_pca_trim():
samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
model = PCAModel(samples)
# trim components
model.trim_components(5)
# number of active components should be the same as number of components
assert_equal(model.n_active_components, model.n_components)
def test_pca_trim_variance_limit():
samples = [np.random.randn(10) for _ in range(10)]
model = PCAVectorModel(samples)
with raises(ValueError):
# impossible to keep more than 1.0 ratio variance
model.trim_components(2.5)
def test_pca_trim_negative_integers():
samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
model = PCAModel(samples)
with raises(ValueError):
# no negative number of components
model.trim_components(-2)
def test_pca_trim_negative_float():
samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
model = PCAModel(samples)
with raises(ValueError):
# no negative number of components
model.trim_components(-2)
def test_pca_variance():
samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
model = PCAModel(samples)
# kept variance must be equal to total variance
assert_equal(model.variance(), model.original_variance())
# kept variance ratio must be 1.0
assert_equal(model.variance_ratio(), 1.0)
# noise variance must be 0.0
assert_equal(model.noise_variance(), 0.0)
# noise variance ratio must be also 0.0
assert_equal(model.noise_variance_ratio(), 0.0)
def test_pca_inverse_noise_variance():
samples = [PointCloud(np.random.randn(10)) for _ in range(10)]
model = PCAModel(samples)
with raises(ValueError):
# inverse noise_variance it's not computable
model.inverse_noise_variance()
def test_pca_variance_after_change_n_active_components():
samples = [np.random.randn(10) for _ in range(10)]
model = PCAVectorModel(samples)
# set number of active components
model.n_active_components = 5
# kept variance must be smaller than total variance
assert model.variance() < model.original_variance()
# kept variance ratio must be smaller than 1.0
assert model.variance_ratio() < 1.0
# noise variance must be bigger than 0.0
assert model.noise_variance() > 0.0
# noise variance ratio must also be bigger than 0.0
assert model.noise_variance_ratio() > 0.0
# inverse noise variance is computable
assert model.inverse_noise_variance() == 1 / model.noise_variance()
def test_pca_variance_after_trim():
samples = [np.random.randn(10) for _ in range(10)]
model = PCAVectorModel(samples)
# set number of active components
model.trim_components(5)
# kept variance must be smaller than total variance
assert model.variance() < model.original_variance()
# kept variance ratio must be smaller than 1.0
assert model.variance_ratio() < 1.0
# noise variance must be bigger than 0.0
assert model.noise_variance() > 0.0
# noise variance ratio must also be bigger than 0.0
assert model.noise_variance_ratio() > 0.0
# inverse noise variance is computable
assert model.inverse_noise_variance() == 1 / model.noise_variance()
def test_pca_orthogonalize_against():
pca_samples = np.random.randn(10, 10)
pca_model = PCAVectorModel(pca_samples)
lm_samples = np.asarray([np.random.randn(10) for _ in range(4)])
lm_model = LinearVectorModel(np.asarray(lm_samples))
# orthogonalize
pca_model.orthonormalize_against_inplace(lm_model)
# number of active components must remain the same
assert_equal(pca_model.n_active_components, 6)
def test_pca_orthogonalize_against_with_less_active_components():
pca_samples = np.random.randn(10, 10)
pca_model = PCAVectorModel(pca_samples)
lm_samples = np.asarray([np.random.randn(10) for _ in range(4)])
lm_model = LinearVectorModel(np.asarray(lm_samples))
# set number of active components
pca_model.n_active_components = 5
# orthogonalize
pca_model.orthonormalize_against_inplace(lm_model)
# number of active components must remain the same
assert_equal(pca_model.n_active_components, 5)
def test_pca_increment_centred():
pca_samples = [PointCloud(np.random.randn(10, 2)) for _ in range(10)]
ipca_model = PCAModel(pca_samples[:3])
ipca_model.increment(pca_samples[3:6])
ipca_model.increment(pca_samples[6:])
bpca_model = PCAModel(pca_samples)
assert_almost_equal(np.abs(ipca_model.components), np.abs(bpca_model.components))
assert_almost_equal(ipca_model.eigenvalues, bpca_model.eigenvalues)
assert_almost_equal(ipca_model.mean().as_vector(), bpca_model.mean().as_vector())
def test_pca_increment_noncentred():
pca_samples = [np.random.randn(10) for _ in range(10)]
ipca_model = PCAVectorModel(pca_samples[:3], centre=False)
ipca_model.increment(pca_samples[3:6])
ipca_model.increment(pca_samples[6:])
bpca_model = PCAVectorModel(pca_samples, centre=False)
assert_almost_equal(np.abs(ipca_model.components), np.abs(bpca_model.components))
assert_almost_equal(ipca_model.eigenvalues, bpca_model.eigenvalues)
assert_almost_equal(ipca_model.mean(), bpca_model.mean())
def test_pca_vector_init_from_covariance():
n_samples = 30
n_features = 10
centre_values = [True, False]
for centre in centre_values:
# generate samples matrix and mean vector
samples = np.random.randn(n_samples, n_features)
mean = np.mean(samples, axis=0)
# compute covariance matrix
if centre:
X = samples - mean
C = np.dot(X.T, X) / (n_samples - 1)
else:
C = np.dot(samples.T, samples) / (n_samples - 1)
# create the 2 pca models
pca1 = PCAVectorModel.init_from_covariance_matrix(
C, mean, centred=centre, n_samples=n_samples
)
pca2 = PCAVectorModel(samples, centre=centre, inplace=False)
# compare them
assert_array_almost_equal(pca1.mean(), pca2.mean())
assert_array_almost_equal(
pca1.component(0, with_mean=False), pca2.component(0, with_mean=False)
)
assert_array_almost_equal(pca1.component(7), pca2.component(7))
assert_array_almost_equal(pca1.components, pca2.components)
|
assert_array_almost_equal(pca1.eigenvalues, pca2.eigenvalues)
|
numpy.testing.assert_array_almost_equal
|
# -*- coding: utf-8 -*-
"""
Module to perform core QuakeMigrate functions: detect() and locate().
"""
import warnings
import numpy as np
from obspy import UTCDateTime, Stream, Trace
from obspy.signal.invsim import cosine_taper
from obspy.signal.trigger import classic_sta_lta
import pandas as pd
from scipy.interpolate import Rbf
from scipy.optimize import curve_fit
from scipy.signal import butter, lfilter, fftconvolve
import QMigrate.core.model as qmod
import QMigrate.core.QMigratelib as ilib
import QMigrate.io.quakeio as qio
import QMigrate.plot.quakeplot as qplot
import QMigrate.util as util
# Filter warnings
warnings.filterwarnings("ignore", message=("Covariance of the parameters" +
" could not be estimated"))
warnings.filterwarnings("ignore", message=("File will be written with more" +
" than one different record" +
" lengths. This might have a" +
" negative influence on the" +
" compatibility with other" +
" programs."))
def sta_lta_centred(a, nsta, nlta):
"""
Calculates the ratio of the average signal in a short-term (signal) window
to a preceding long-term (noise) window. STA/LTA value is assigned to the
end of the LTA / start of the STA.
Parameters
----------
a : array-like
Signal array
nsta : int
Number of samples in short-term window
nlta : int
Number of samples in long-term window
Returns
-------
sta / lta : array-like
Ratio of short term average to average in a preceding long term average
window. STA/LTA value is assigned to end of LTA window / start of STA
window -- "centred"
"""
nsta = int(round(nsta))
nlta = int(round(nlta))
# Cumulative sum to calculate moving average
sta = np.cumsum(a ** 2)
sta = np.require(sta, dtype=np.float)
lta = sta.copy()
# Compute the STA and the LTA
sta[nsta:] = sta[nsta:] - sta[:-nsta]
sta[nsta:-nsta] = sta[nsta*2:]
sta /= nsta
lta[nlta:] = lta[nlta:] - lta[:-nlta]
lta /= nlta
sta[:(nlta - 1)] = 0
sta[-nsta:] = 0
# Avoid division by zero by setting zero values to tiny float
dtiny = np.finfo(0.0).tiny
idx = lta < dtiny
lta[idx] - dtiny
return sta / lta
def onset(sig, stw, ltw, centred=False):
"""
Calculate STA/LTA onset (characteristic) function from filtered seismic
data.
Parameters
----------
sig : array-like
Data signal used to generate an onset function
stw : int
Short term window length (# of samples)
ltw : int
Long term window length (# of samples)
centred : bool, optional
Compute centred STA/LTA (STA window is preceded by LTA window; value
is assigned to end of LTA window / start of STA window) or classic
STA/LTA (STA window is within LTA window; value is assigned to end of
STA & LTA windows).
Centred gives less phase-shifted (late) onset function, and is closer
to a Gaussian approximation, but is far more sensitive to data with
sharp offsets due to instrument failures. We recommend using classic
for detect() and centred for locate() if your data quality allows it.
This is the default behaviour; override by setting self.onset_centred.
Returns
-------
onset_raw : array-like
Raw STA/LTA ratio onset function generated from data
onset : array-like
log10(onset_raw) ; after clipping between -0.2 and infinity.
"""
stw = int(round(stw))
ltw = int(round(ltw))
n_channels, n_samples = sig.shape
onset = np.copy(sig)
onset_raw = np.copy(sig)
for i in range(n_channels):
if np.sum(sig[i, :]) == 0.0:
onset[i, :] = 0.0
onset_raw[i, :] = onset[i, :]
else:
if centred is True:
onset[i, :] = sta_lta_centred(sig[i, :], stw, ltw)
else:
onset[i, :] = classic_sta_lta(sig[i, :], stw, ltw)
onset_raw[i, :] = onset[i, :]
np.clip(1 + onset[i, :], 0.8, np.inf, onset[i, :])
np.log(onset[i, :], onset[i, :])
return onset_raw, onset
def filter(sig, sampling_rate, lc, hc, order=2):
"""
Apply zero phase-shift Butterworth band-pass filter to seismic data.
Parameters
----------
sig : array-like
Data signal to be filtered
sampling_rate : int
Number of samples per second, in Hz
lc : float
Lowpass frequency of band-pass filter
hc : float
Highpass frequency of band-pass filter
order : int, optional
Number of corners. NOTE: two-pass filter effectively doubles the
number of corners.
Returns
-------
fsig : array-like
Filtered seismic data
"""
# Construct butterworth band-pass filter
b1, a1 = butter(order, [2.0 * lc / sampling_rate,
2.0 * hc / sampling_rate], btype="band")
nchan, nsamp = sig.shape
fsig = np.copy(sig)
# Apply cosine taper then apply band-pass filter in both directions
for ch in range(0, nchan):
fsig[ch, :] = fsig[ch, :] - fsig[ch, 0]
tap = cosine_taper(len(fsig[ch, :]), 0.1)
fsig[ch, :] = fsig[ch, :] * tap
fsig[ch, :] = lfilter(b1, a1, fsig[ch, ::-1])[::-1]
fsig[ch, :] = lfilter(b1, a1, fsig[ch, :])
return fsig
class DefaultQuakeScan(object):
"""
Default parameter class for QuakeScan.
"""
def __init__(self):
"""
Initialise DefaultQuakeScan object.
Parameters (all optional)
----------
p_bp_filter : array-like, [float, float, int]
Butterworth bandpass filter specification
[lowpass, highpass, corners*]
*NOTE: two-pass filter effectively doubles the number of corners.
s_bp_filter : array-like, [float, float, int]
Butterworth bandpass filter specification
[lowpass, highpass, corners*]
*NOTE: two-pass filter effectively doubles the number of corners.
p_onset_win : array-like, floats
P onset window parameters
[STA, LTA]
s_onset_win : array-like, floats
S onset window parameters
[STA, LTA]
decimate : array-like, ints
Decimation factor in each grid axis to apply to the look-up table
[Dec_x, Dec_y, Dec_z]
time_step : float
Time length (in seconds) of time step used in detect(). Note: total
detect run duration should be divisible by time_step. Increasing
time_step will increase RAM usage during detect, but will slightly
speed up overall detect run.
sampling_rate : int
Desired sampling rate for input data; sampling rate that detect()
and locate() will be computed at.
onset_centred : bool, optional
Compute centred STA/LTA (STA window is preceded by LTA window;
value is assigned to end of LTA window / start of STA window) or
classic STA/LTA (STA window is within LTA window; value is assigned
to end of STA & LTA windows).
Centred gives less phase-shifted (late) onset function, and is
closer to a Gaussian approximation, but is far more sensitive to
data with sharp offsets due to instrument failures. We recommend
using classic for detect() and centred for locate() if your data
quality allows it. This is the default behaviour; override by
setting this variable.
pick_threshold : float (between 0 and 1)
For use with picking_mode = 'Gaussian'. Picks will only be made if
the onset function exceeds this percentile of the noise level
(average amplitude of onset function outside pick windows).
Recommended starting value: 1.0
picking_mode : str
Currently the only available picking mode is 'Gaussian'
fraction_tt : float
Defines width of time window around expected phase arrival time in
which to search for a phase pick as a function of the travel-time
from the event location to that station -- should be an estimate of
the uncertainty in the velocity model.
marginal_window : float
Time window (+/- marginal_window) about the maximum coalescence
time to marginalise the 4d coalescence grid compouted in locate()
to estimate the earthquake location and uncertainty. Should be an
estimate of the time uncertainty in the earthquake origin time -
a combination of the expected spatial error and the seismic
velocity in the region of the event
pre_pad : float, optional
Option to override the default pre-pad duration of data to read
before computing 4d coalescence in detect() and locate(). Default
value is calculated from the onset function durations.
n_cores : int
Number of cores to use on the executing host for detect() /locate()
continuous_scanmseed_write : bool
Option to continuously write the .scanmseed file outputted by
detect() at the end of every time step. Default behaviour is to
write at the end of the time period, or the end of each day.
plot_event_summary : bool, optional
Plot event summary figure - see QMigrate.plot.quakeplot for more
details.
plot_station_traces : bool, optional
Plot data and onset functions overlain by phase picks for each
station used in locate()
plot_coal_video : bool, optional
Plot coalescence video for each earthquake located in locate()
write_4d_coal_grid : bool, optional
Save the full 4d coalescence grid output by compute for each event
located by locate() -- NOTE these files are large.
write_cut_waveforms : bool, optional
Write raw cut waveforms for all data found in the archive for each
event located by locate() -- NOTE this data has not been processed
or quality-checked!
cut_waveform_format : str, optional
File format to write waveform data to. Options are all file formats
supported by obspy, including: "MSEED" (default), "SAC", "SEGY",
"GSE2"
pre_cut : float, optional
Specify how long before the event origin time to cut the waveform
data from
post_cut : float, optional
Specify how long after the event origin time to cut the waveform
data to
xy_files : list, string
List of file strings:
With columns ["File", "Color", "Linewidth", "Linestyle"]
Where File is the file path to the xy file to be plotted on the
map. File should contain two columns ["Longitude", "Latitude"].
** NOTE ** - do not include a header line in either file.
"""
# Filter parameters
self.p_bp_filter = [2.0, 16.0, 2]
self.s_bp_filter = [2.0, 12.0, 2]
# Onset window parameters
self.p_onset_win = [0.2, 1.0]
self.s_onset_win = [0.2, 1.0]
# Traveltime lookup table decimation factor
self.decimate = [1, 1, 1]
# Time step for continuous compute in detect
self.time_step = 120.
# Data sampling rate
self.sampling_rate = 50
# Centred onset function override -- None means it will be
# automatically set in detect() and locate()
self.onset_centred = None
# Pick related parameters
self.pick_threshold = 1.0
self.picking_mode = "Gaussian"
self.fraction_tt = 0.1
# Marginal window
self.marginal_window = 2.
# Default pre-pad for compute
self.pre_pad = None
# Number of cores to perform detect/locate on
self.n_cores = 1
# Toggle whether to incrementally write .scanmseed in detect()
self.continuous_scanmseed_write = False
# Plotting toggles
self.plot_event_summary = True
self.plot_station_traces = False
self.plot_coal_video = False
# Saving toggles
self.write_4d_coal_grid = False
self.write_cut_waveforms = False
self.cut_waveform_format = "MSEED"
self.pre_cut = None
self.post_cut = None
# xy files for plotting
self.xy_files = None
class QuakeScan(DefaultQuakeScan):
"""
QuakeMigrate scanning class
Forms the core of the QuakeMigrate method, providing wrapper functions for
the C-compiled migration methods.
Methods
-------
detect(start_time, end_time)
Core detection method -- compute decimated 3-D coalescence continuously
throughout entire time period; output as
.scanmseed (in mSEED format).
locate(start_time, end_time)
Core locate method -- compute 3-D coalescence over short time window
around candidate earthquake triggered from
coastream; output location & uncertainties
(.event file), phase picks (.picks file), plus
multiple optional plots / data for further
analysis and processing.
"""
raw_data = {}
filt_data = {}
onset_data = {}
DEFAULT_GAUSSIAN_FIT = {"popt": 0,
"xdata": 0,
"xdata_dt": 0,
"PickValue": -1}
EVENT_FILE_COLS = ["DT", "COA", "X", "Y", "Z",
"LocalGaussian_X", "LocalGaussian_Y", "LocalGaussian_Z",
"LocalGaussian_ErrX", "LocalGaussian_ErrY",
"LocalGaussian_ErrZ", "GlobalCovariance_X",
"GlobalCovariance_Y", "GlobalCovariance_Z",
"GlobalCovariance_ErrX", "GlobalCovariance_ErrY",
"GlobalCovariance_ErrZ"]
def __init__(self, data, lookup_table, output_path=None, run_name=None, log=False):
"""
Class initialisation method.
Parameters
----------
data : Archive object
Contains information on data archive structure and
read_waveform_data() method
lookup_table : str
Look-up table file path
output_path : str
Path of parent output directory: e.g. ./OUTPUT
run_name : str
Name of current run: all outputs will be saved in the directory
output_path/run_name
"""
DefaultQuakeScan.__init__(self)
self.data = data
lut = qmod.LUT()
lut.load(lookup_table)
self.lut = lut
if output_path is not None:
self.output = qio.QuakeIO(output_path, run_name, log)
else:
self.output = None
# Define post-pad as a function of the maximum travel-time between a
# station and a grid point plus the LTA (in case onset_centred is True)
# ---> applies to both detect() and locate()
ttmax = np.max(lut.fetch_map("TIME_S"))
lta_max = max(self.p_onset_win[1], self.s_onset_win[1])
self.post_pad = np.ceil(ttmax + 2 * lta_max)
self.log = log
msg = "=" * 120 + "\n"
msg += "=" * 120 + "\n"
msg += "\tQuakeMigrate - Coalescence Scanning - Path: {} - Name: {}\n"
msg += "=" * 120 + "\n"
msg += "=" * 120 + "\n"
msg = msg.format(self.output.path, self.output.name)
self.output.log(msg, self.log)
def __str__(self):
"""
Return short summary string of the QuakeScan object
It will provide information on all of the various parameters that the
user can/has set.
"""
out = "QuakeMigrate parameters"
out += "\n\tTime step\t\t:\t{}".format(self.time_step)
out += "\n\n\tData sampling rate\t:\t{}".format(self.sampling_rate)
out += "\n\n\tDecimation\t\t:\t[{}, {}, {}]".format(
self.decimate[0], self.decimate[1], self.decimate[2])
out += "\n\n\tBandpass filter P\t:\t[{}, {}, {}]".format(
self.p_bp_filter[0], self.p_bp_filter[1], self.p_bp_filter[2])
out += "\n\tBandpass filter S\t:\t[{}, {}, {}]".format(
self.s_bp_filter[0], self.s_bp_filter[1], self.s_bp_filter[2])
out += "\n\n\tOnset P [STA, LTA]\t:\t[{}, {}]".format(
self.p_onset_win[0], self.p_onset_win[1])
out += "\n\tOnset S [STA, LTA]\t:\t[{}, {}]".format(
self.s_onset_win[0], self.s_onset_win[1])
out += "\n\n\tPre-pad\t\t\t:\t{}".format(self.pre_pad)
out += "\n\tPost-pad\t\t:\t{}".format(self.post_pad)
out += "\n\n\tMarginal window\t\t:\t{}".format(self.marginal_window)
out += "\n\tPick threshold\t\t:\t{}".format(self.pick_threshold)
out += "\n\tPicking mode\t\t:\t{}".format(self.picking_mode)
out += "\n\tFraction ttime\t\t:\t{}".format(self.fraction_tt)
out += "\n\n\tCentred onset\t\t:\t{}".format(self.onset_centred)
out += "\n\n\tNumber of CPUs\t\t:\t{}".format(self.n_cores)
return out
def detect(self, start_time, end_time):
"""
Scans through continuous data calculating coalescence on a decimated
3D grid by back-migrating P and S onset (characteristic) functions.
Parameters
----------
start_time : str
Start time of continuous scan
end_time : str
End time of continuous scan (last sample returned will be that
which immediately precedes this time stamp)
log : bool, optional
Write output to a log file (default: False)
"""
# Convert times to UTCDateTime objects
start_time = UTCDateTime(start_time)
end_time = UTCDateTime(end_time)
# Decimate LUT
self.lut = self.lut.decimate(self.decimate)
# Detect uses the non-centred onset by default
if self.onset_centred is None:
self.onset_centred = False
# Define pre-pad as a function of the onset windows
if self.pre_pad is None:
self.pre_pad = max(self.p_onset_win[1],
self.s_onset_win[1]) \
+ 3 * max(self.p_onset_win[0],
self.s_onset_win[0])
msg = "=" * 120 + "\n"
msg += "\tDETECT - Continuous Seismic Processing\n"
msg += "=" * 120 + "\n"
msg += "\n"
msg += "\tParameters specified:\n"
msg += "\t\tStart time = {}\n"
msg += "\t\tEnd time = {}\n"
msg += "\t\tTime step (s) = {}\n"
msg += "\t\tNumber of CPUs = {}\n"
msg += "\n"
msg += "\t\tSampling rate = {}\n"
msg += "\t\tGrid decimation [X, Y, Z] = [{}, {}, {}]\n"
msg += "\t\tBandpass filter P = [{}, {}, {}]\n"
msg += "\t\tBandpass filter S = [{}, {}, {}]\n"
msg += "\t\tOnset P [STA, LTA] = [{}, {}]\n"
msg += "\t\tOnset S [STA, LTA] = [{}, {}]\n"
msg += "\n"
msg += "=" * 120
msg = msg.format(str(start_time), str(end_time), self.time_step,
self.n_cores, self.sampling_rate,
self.decimate[0], self.decimate[1], self.decimate[2],
self.p_bp_filter[0], self.p_bp_filter[1],
self.p_bp_filter[2], self.s_bp_filter[0],
self.s_bp_filter[1], self.s_bp_filter[2],
self.p_onset_win[0], self.p_onset_win[1],
self.s_onset_win[0], self.s_onset_win[1])
self.output.log(msg, self.log)
# Detect max coalescence value and location at each time sample
# within the decimated grid
self._continuous_compute(start_time, end_time)
def locate(self, start_time, end_time):
"""
Re-computes the 3D coalescence on a less decimated grid for a short
time window around each candidate earthquake triggered from the
decimated continuous detect scan. Calculates event location and
uncertainties, makes phase arrival picks, plus multiple optional
plotting / data outputs for further analysis and processing.
Parameters
----------
start_time : str
Start time of locate run: earliest event trigger time that will be
located
end_time : str
End time of locate run: latest event trigger time that will be
located is one sample before this time
log : bool, optional
Write output to a log file (default: False)
"""
# Convert times to UTCDateTime objects
start_time = UTCDateTime(start_time)
end_time = UTCDateTime(end_time)
msg = "=" * 120 + "\n"
msg += "\tLOCATE - Determining earthquake location and uncertainty\n"
msg += "=" * 120 + "\n"
msg += "\n"
msg += "\tParameters specified:\n"
msg += "\t\tStart time = {}\n"
msg += "\t\tEnd time = {}\n"
msg += "\t\tNumber of CPUs = {}\n\n"
msg += "=" * 120 + "\n"
msg = msg.format(str(start_time), str(end_time), self.n_cores)
self.output.log(msg, self.log)
# Decimate LUT
self.lut = self.lut.decimate(self.decimate)
# Locate uses the centred onset by default
if self.onset_centred is None:
self.onset_centred = True
self._locate_events(start_time, end_time)
def _append_coastream(self, coastream, daten, max_coa, max_coa_norm, loc,
sampling_rate):
"""
Append latest timestep of detect() output to obspy.Stream() object.
Multiply by factor of ["1e5", "1e5", "1e6", "1e6", "1e3"] respectively
for channels ["COA", "COA_N", "X", "Y", "Z"], round and convert to
int32 as this dramatically reduces memory usage, and allows the
coastream data to be saved in mSEED format with STEIM2 compression.
The multiplication factor is removed when the data is read back in.
Parameters
----------
coastream : obspy Stream object
Data output by detect() so far
channels: ["COA", "COA_N", "X", "Y", "Z"]
NOTE these values have been multiplied by a factor and converted to
an int
daten : array-like
Array of UTCDateTime time stamps for the time step
max_coa : array-like
Coalescence value through time
max_coa_norm : array-like
Normalised coalescence value through time
loc : array-like
Location of maximum coalescence through time
sampling_rate : int
Sampling rate that detect is run at.
Returns
-------
coastream : obspy Stream object
Data output by detect() so far with most recent timestep appended
channels: ["COA", "COA_N", "X", "Y", "Z"]
NOTE these values have been multiplied by a factor and converted to
an int
"""
# clip max value of COA to prevent int overflow
max_coa[max_coa > 21474.] = 21474.
max_coa_norm[max_coa_norm > 21474.] = 21474.
npts = len(max_coa)
starttime = UTCDateTime(daten[0])
meta = {"network": "NW",
"npts": npts,
"sampling_rate": sampling_rate,
"starttime": starttime}
st = Stream(Trace(data=np.round(max_coa * 1e5).astype(np.int32),
header={**{"station": "COA"}, **meta}))
st += Stream(Trace(data=np.round(max_coa_norm * 1e5).astype(np.int32),
header={**{"station": "COA_N"}, **meta}))
st += Stream(Trace(data=np.round(loc[:, 0] * 1e6).astype(np.int32),
header={**{"station": "X"}, **meta}))
st += Stream(Trace(data=np.round(loc[:, 1] * 1e6).astype(np.int32),
header={**{"station": "Y"}, **meta}))
st += Stream(Trace(data=np.round(loc[:, 2] * 1e3).astype(np.int32),
header={**{"station": "Z"}, **meta}))
if coastream is not None:
coastream = coastream + st
coastream.merge(method=-1)
else:
coastream = st
# Have we moved to the next day? If so write out the file and empty
# coastream
written = False
if coastream[0].stats.starttime.julday != \
coastream[0].stats.endtime.julday:
write_start = coastream[0].stats.starttime
write_end = UTCDateTime(coastream[0].stats.endtime.date) \
- 1 / coastream[0].stats.sampling_rate
self.output.write_coastream(coastream, write_start, write_end)
written = True
coastream.trim(starttime=write_end + 1 / sampling_rate)
return coastream, written
def _continuous_compute(self, start_time, end_time):
"""
Compute coalescence between two time stamps, divided into small time
steps. Outputs coastream and station availability data to file.
Parameters
----------
start_time : UTCDateTime object
Time stamp of first sample
end_time : UTCDateTime object
Time stamp of final sample
"""
coastream = None
t_length = self.pre_pad + self.post_pad + self.time_step
self.pre_pad += np.ceil(t_length * 0.06)
self.post_pad += np.ceil(t_length * 0.06)
try:
nsteps = int(np.ceil((end_time - start_time) / self.time_step))
except AttributeError:
msg = "Error: Time step has not been specified"
self.output.log(msg, self.log)
# Initialise pandas DataFrame object to track availability
stn_ava_data = pd.DataFrame(index=np.arange(nsteps),
columns=self.data.stations)
for i in range(nsteps):
timer = util.Stopwatch()
w_beg = start_time + self.time_step * i - self.pre_pad
w_end = start_time + self.time_step * (i + 1) + self.post_pad
msg = ("~" * 24) + " Processing : {} - {} " + ("~" * 24)
msg = msg.format(str(w_beg), str(w_end))
self.output.log(msg, self.log)
try:
self.data.read_waveform_data(w_beg, w_end, self.sampling_rate)
daten, max_coa, max_coa_norm, loc, map_4d = self._compute(
w_beg, w_end,
self.data.signal,
self.data.availability)
stn_ava_data.loc[i] = self.data.availability
coord = self.lut.xyz2coord(loc)
del loc, map_4d
except util.ArchiveEmptyException:
msg = "!" * 24 + " " * 16
msg += " No files in archive for this time step "
msg += " " * 16 + "!" * 24
self.output.log(msg, self.log)
daten, max_coa, max_coa_norm, coord = self._empty(w_beg, w_end)
stn_ava_data.loc[i] = self.data.availability
except util.DataGapException:
msg = "!" * 24 + " " * 9
msg += "All available data for this time period contains gaps"
msg += " " * 10 + "!" * 24
msg += "\n" + "!" * 24 + " " * 11
msg += "or data not available at start/end of time period"
msg += " " * 12 + "!" * 24
self.output.log(msg, self.log)
daten, max_coa, max_coa_norm, coord = self._empty(w_beg, w_end)
stn_ava_data.loc[i] = self.data.availability
stn_ava_data.rename(index={i: str(w_beg + self.pre_pad)},
inplace=True)
# Append upto sample-before-last - if end_time is
# 2014-08-24T00:00:00, your last sample will be 2014-08-23T23:59:59
coastream, written = self._append_coastream(coastream,
daten[:-1],
max_coa[:-1],
max_coa_norm[:-1],
coord[:-1, :],
self.sampling_rate)
del daten, max_coa, max_coa_norm, coord
if self.continuous_scanmseed_write and not written:
self.output.write_coastream(coastream)
written = True
self.output.log(timer(), self.log)
if not written:
self.output.write_coastream(coastream)
del coastream
self.output.write_stn_availability(stn_ava_data)
self.output.log("=" * 120, self.log)
def _locate_events(self, start_time, end_time):
"""
Loop through list of earthquakes read in from trigger results and
re-compute coalescence; output phase picks, event location and
uncertainty, plus optional plots and outputs.
Parameters
----------
start_time : UTCDateTime object
Start time of locate run: earliest event trigger time that will be
located
end_time : UTCDateTime object
End time of locate run: latest event trigger time that will be
located
"""
# Define pre-pad as a function of the onset windows
if self.pre_pad is None:
self.pre_pad = max(self.p_onset_win[1],
self.s_onset_win[1]) \
+ 3 * max(self.p_onset_win[0],
self.s_onset_win[0])
# Adjust pre- and post-pad to take into account cosine taper
t_length = self.pre_pad + 4*self.marginal_window + self.post_pad
self.pre_pad += np.ceil(t_length * 0.06)
self.post_pad += np.ceil(t_length * 0.06)
trig_events = self.output.read_triggered_events(start_time, end_time)
n_evts = len(trig_events)
for i, trig_event in trig_events.iterrows():
event_uid = trig_event["EventID"]
msg = "=" * 120 + "\n"
msg += "\tEVENT - {} of {} - {}\n"
msg += "=" * 120 + "\n\n"
msg += "\tDetermining event location...\n"
msg = msg.format(i + 1, n_evts, event_uid)
self.output.log(msg, self.log)
w_beg = trig_event["CoaTime"] - 2*self.marginal_window \
- self.pre_pad
w_end = trig_event["CoaTime"] + 2*self.marginal_window \
+ self.post_pad
timer = util.Stopwatch()
self.output.log("\tReading waveform data...", self.log)
try:
self._read_event_waveform_data(trig_event, w_beg, w_end)
except util.ArchiveEmptyException:
msg = "\tNo files found in archive for this time period"
self.output.log(msg, self.log)
continue
except util.DataGapException:
msg = "\tAll available data for this time period contains gaps"
msg += "\n\tOR data not available at start/end of time period\n"
self.output.log(msg, self.log)
continue
self.output.log(timer(), self.log)
timer = util.Stopwatch()
self.output.log("\tComputing 4D coalescence grid...", self.log)
daten, max_coa, max_coa_norm, loc, map_4d = self._compute(
w_beg, w_end,
self.data.signal,
self.data.availability)
coord = self.lut.xyz2coord(np.array(loc).astype(int))
event_coa_data = pd.DataFrame(np.array((daten, max_coa,
coord[:, 0],
coord[:, 1],
coord[:, 2])).transpose(),
columns=["DT", "COA", "X", "Y", "Z"])
event_coa_data["DT"] = event_coa_data["DT"].apply(UTCDateTime)
event_coa_data_dtmax = \
event_coa_data["DT"].iloc[event_coa_data["COA"].astype("float").idxmax()]
w_beg_mw = event_coa_data_dtmax - self.marginal_window
w_end_mw = event_coa_data_dtmax + self.marginal_window
if (event_coa_data_dtmax >= trig_event["CoaTime"]
- self.marginal_window) \
and (event_coa_data_dtmax <= trig_event["CoaTime"]
+ self.marginal_window):
w_beg_mw = event_coa_data_dtmax - self.marginal_window
w_end_mw = event_coa_data_dtmax + self.marginal_window
else:
msg = "\n\tEvent {} is outside marginal window.\n"
msg += "\tDefine more realistic error - the marginal window"
msg += " should be an estimate of the origin time uncertainty,"
msg += "\n\tdetermined by the expected spatial uncertainty and"
msg += "the seismic velocity in the region of the earthquake\n"
msg += "\n" + "=" * 120 + "\n"
msg = msg.format(event_uid)
self.output.log(msg, self.log)
continue
event_mw_data = event_coa_data
event_mw_data = event_mw_data[(event_mw_data["DT"] >= w_beg_mw) &
(event_mw_data["DT"] <= w_end_mw)]
map_4d = map_4d[:, :, :,
event_mw_data.index[0]:event_mw_data.index[-1]]
event_mw_data = event_mw_data.reset_index(drop=True)
event_max_coa = event_mw_data.iloc[event_mw_data["COA"].astype("float").idxmax()]
# Update event UID; make out_str
event_uid = str(event_max_coa.values[0])
for char_ in ["-", ":", ".", " ", "Z", "T"]:
event_uid = event_uid.replace(char_, "")
out_str = "{}_{}".format(self.output.name, event_uid)
self.output.log(timer(), self.log)
# Make phase picks
timer = util.Stopwatch()
self.output.log("\tMaking phase picks...", self.log)
phase_picks = self._phase_picker(event_max_coa)
self.output.write_picks(phase_picks["Pick"], event_uid)
self.output.log(timer(), self.log)
# Determining earthquake location error
timer = util.Stopwatch()
self.output.log("\tDetermining earthquake location and uncertainty...", self.log)
loc_spline, loc_gau, loc_gau_err, loc_cov, \
loc_cov_err = self._calculate_location(map_4d)
self.output.log(timer(), self.log)
# Make event dictionary with all final event location data
event = pd.DataFrame([[event_max_coa.values[0],
event_max_coa.values[1],
loc_spline[0], loc_spline[1], loc_spline[2],
loc_gau[0], loc_gau[1], loc_gau[2],
loc_gau_err[0], loc_gau_err[1],
loc_gau_err[2],
loc_cov[0], loc_cov[1], loc_cov[2],
loc_cov_err[0], loc_cov_err[1],
loc_cov_err[2]]],
columns=self.EVENT_FILE_COLS)
self.output.write_event(event, event_uid)
self._optional_locate_outputs(event_mw_data, event, out_str,
phase_picks, event_uid, map_4d)
self.output.log("=" * 120 + "\n", self.log)
del map_4d, event_coa_data, event_mw_data, event_max_coa, \
phase_picks
self.coa_map = None
def _read_event_waveform_data(self, event, w_beg, w_end):
"""
Read waveform data for a triggered event.
Parameters
----------
event : pandas DataFrame
Triggered event output from _trigger_scn().
Columns: ["EventNum", "CoaTime", "COA_V", "COA_X", "COA_Y", "COA_Z",
"MinTime", "MaxTime"]
w_beg : UTCDateTime object
Start datetime to read waveform data
w_end : UTCDateTime object
End datetime to read waveform data
Returns
-------
daten, max_coa, max_coa_norm, coord : array-like
Empty arrays with the correct shape to write to .scanmseed as if
they were coastream outputs from _compute()
"""
# Extra pre- and post-pad default to None
pre_pad = post_pad = None
if self.pre_cut:
# only subtract 1*marginal_window so if the event otime moves by
# this much the selected pre_cut can still be applied
pre_pad = self.pre_cut - self.marginal_window - self.pre_pad
if pre_pad < 0:
msg = "\t\tWarning: specified pre_cut {} is shorter than"
msg += "default pre_pad\n"
msg += "\t\t\tCutting from pre_pad = {}"
msg = msg.format(self.pre_cut, self.pre_pad)
self.output.log(msg, self.log)
pre_pad = None
if self.post_cut:
# only subtract 1*marginal_window so if the event otime moves by
# this much the selected post_cut can still be applied
post_pad = self.post_cut - self.marginal_window - \
self.post_pad
if post_pad < 0:
msg = "\t\tWarning: specified post_cut {} is shorter than"
msg += "default post_pad\n"
msg += "\t\t\tCutting to post_pad = {}"
msg = msg.format(self.post_cut, self.post_pad)
self.output.log(msg, self.log)
post_pad = None
self.data.read_waveform_data(w_beg, w_end, self.sampling_rate, pre_pad,
post_pad)
def _compute(self, w_beg, w_end, signal, station_availability):
"""
Compute 3-D coalescence between two time stamps.
Parameters
----------
w_beg : UTCDateTime object
Time stamp of first sample in window
w_end : UTCDateTime object
Time stamp of final sample in window
signal : array-like
Pre-processed continuous 3-component data stream for all available
stations -- linearly detrended, de-meaned, resampled if necessary
station_availability : array-like
List of available stations
Returns
-------
daten : array-like
UTCDateTime time stamp for each sample between w_beg and w_end
max_coa : array-like
Coalescence value through time
max_coa_norm : array-like
Normalised coalescence value through time
loc : array-like
Location of maximum coalescence through time
map_4d : array-like
4-D coalescence map
"""
avail_idx = np.where(station_availability == 1)[0]
sige = signal[0]
sign = signal[1]
sigz = signal[2]
p_onset_raw, p_onset = self._compute_p_onset(sigz,
self.sampling_rate)
s_onset_raw, s_onset = self._compute_s_onset(sige, sign,
self.sampling_rate)
self.data.p_onset = p_onset
self.data.s_onset = s_onset
self.data.p_onset_raw = p_onset_raw
self.data.s_onset_raw = s_onset_raw
ps_onset = np.concatenate((self.data.p_onset, self.data.s_onset))
ps_onset[np.isnan(ps_onset)] = 0
p_ttime = self.lut.fetch_index("TIME_P", self.sampling_rate)
s_ttime = self.lut.fetch_index("TIME_S", self.sampling_rate)
ttime = np.c_[p_ttime, s_ttime]
del p_ttime, s_ttime
nchan, tsamp = ps_onset.shape
pre_smp = int(round(self.pre_pad * int(self.sampling_rate)))
pos_smp = int(round(self.post_pad * int(self.sampling_rate)))
nsamp = tsamp - pre_smp - pos_smp
# Prep empty 4-D coalescence map and run C-compiled ilib.migrate()
ncell = tuple(self.lut.cell_count)
map_4d = np.zeros(ncell + (nsamp,), dtype=np.float64)
ilib.migrate(ps_onset, ttime, pre_smp, pos_smp, nsamp, map_4d,
self.n_cores)
# Prep empty coa and loc arrays and run C-compiled ilib.find_max_coa()
max_coa =
|
np.zeros(nsamp, np.double)
|
numpy.zeros
|
import numpy as np
from scipy.optimize import minimize
from numba import jit
'''
extract information from other numpy waveforms
'''
class Waveana(object):
def __init__(self, wave=[], eid=0):
self.eid = eid
self.wave = wave
self.meanBaseline = 0
self.std = 0
self.allCharge = 0
self.minPeakCharge = 0
self.minPeakBaseline = 0
self.minPeakStd = 0
self.end5mV = 0
self.begin5mV = 0
self.tpl = []
self.triggerTime = 0
def setWave(self, wave):
self.wave = wave
self.meanBaseline = 0
self.std = 0
self.allCharge = 0
self.minPeakCharge = 0
self.minPeakBaseline = np.average(wave)
self.minPeakStd = np.std(wave)
self.end5mV = 0
self.begin5mV = 0
self.minIndex = np.argmin(self.wave)
def getBaseline(self, nsigma=5, padding=5, threshold=1):
# 仅仅去除超出判定阈值前后padding区域,取平均值,使用的负脉冲判断
std = max(np.std(self.wave), threshold)
meanBaseline = np.average(self.wave)
signalPos = np.where(self.wave<(meanBaseline-nsigma*std))[0]
if signalPos.shape[0]>0:
signalPos = np.unique(np.clip(signalPos.reshape(-1,1)+np.arange(-padding,padding), 0, self.wave.shape[0]))
mask = np.ones(self.wave.shape[0]).astype(bool)
mask[signalPos] = False
cutWave = self.wave[mask]
else:
cutWave = self.wave
self.meanBaseline = np.average(cutWave)
self.std = np.std(cutWave)
return self.meanBaseline, self.std
def getBaselineFine(self, minIndex, nsigma=5, expandWidth=500, desiredWidth=100, padding=10):
# extract the wave,尽可能使用波形前面的100ns/500ns部分计算基线大小,如果不够长,使用波形后面紧跟的500ns计算
if minIndex< desiredWidth:
begin = np.clip(minIndex+100, 0, self.wave.shape[0])
end = np.clip(begin+expandWidth, 0, self.wave.shape[0])
else:
begin = np.clip(minIndex - expandWidth, 0, self.wave.shape[0])
end = minIndex - 10
extractWave = self.wave[begin:end]
self.hist = np.histogram(extractWave, bins=1000, range=[1,1001])[0]
baselineEstimate = np.argmax(self.hist)+1
stdl = np.max([0,baselineEstimate-6])
stdr = np.min([self.hist.shape[0],baselineEstimate+5])
for i in range(baselineEstimate-1, np.max([0,baselineEstimate-6]),-1):
if self.hist[i]<(self.hist[baselineEstimate-1]/2):
stdl=i
break
for i in range(baselineEstimate, np.min([self.hist.shape[0],baselineEstimate+6])):
if self.hist[i]<(self.hist[baselineEstimate-1]/2):
stdr=i
break
stdEstimate = (stdr-stdl)/2.355/2
threshold = np.clip(nsigma*stdEstimate,1,5)
signalPos = extractWave<(baselineEstimate-threshold)
signalPos = np.unique(np.clip(signalPos.reshape(-1,1)+np.arange(-padding,padding), 0, self.wave.shape[0]))
mask = np.ones(extractWave.shape[0]).astype(bool)
mask[signalPos] = False
cutWave = extractWave[mask]
# 取出std比较小的部分,和上述算法互相对比,取最佳
cutWave2 = self.getSmallStdWave()
if np.std(cutWave2)<np.std(cutWave) or cutWave.shape[0]<=10:
cutWave = cutWave2
baselineEstimate = np.average(cutWave)
stdEstimate = np.std(cutWave)
# [begin,end] is the interval of baseline
self.begin = begin
self.end = end
if np.isnan(self.minPeakStd):
print(cutWave)
print(self.minPeakStd,self.minPeakBaseline)
exit(1)
self.minPeakBaseline = baselineEstimate
self.minPeakStd = stdEstimate
'''
threshold = np.clip(nsigma*stdEstimate,3,5)
cutWave0 = cutWave[(cutWave>=(baselineEstimate-threshold))&(cutWave<=(baselineEstimate+threshold))]
self.minPeakBaseline = np.average(cutWave0)
self.minPeakStd = np.std(cutWave0)
if np.isnan(self.minPeakStd):
print(cutWave)
print(cutWave0)
print(self.minPeakStd,self.minPeakBaseline)
exit(1)
'''
return self.minPeakBaseline
def getSmallStdWave(self, number=10):
step = int(self.wave.shape[0]/10)
stdArray = np.zeros(number)
for i in range(number):
stdArray[i] = np.std(self.wave[i*step:(i+1)*step])
smallIndex = np.argmin(stdArray)
return self.wave[smallIndex*step:(smallIndex+1)*step]
def findPeakStd(self, npeak=2, nsigma=3, chargeThreshold=15, baselength=50):
'''
use baseline noise std* nsigma as threshold for whether signal. nthreshold for the second signal
'''
peakflag = 0
peakList = []
threshold = self.meanBaseline-self.std*nsigma
self.modifyWave = self.wave+0
# if self.minPeak<self.std*nsigma:
# return peakList
# else:
peakList.append(self.resolve(self.minIndex,nsigma))
for n in range(npeak-1):
# negative wave; discrimina by charge
temp = self.resolve(np.argmin(self.modifyWave),nsigma)
if temp[2]>chargeThreshold:
peakList.append(temp)
return peakList
def resolve(self, peakPos, nsigma):
peak = self.modifyWave[peakPos]
# find the forehead baseline, otherwise back to find; start and end
cursor = peakPos-1
threshold = self.meanBaseline-self.std*nsigma
while cursor>=100 and self.modifyWave[cursor]<=threshold:
cursor = cursor - 1
if cursor>= 100:
baseline = np.average(self.modifyWave[(cursor-100):(cursor-10)])
std = np.std(self.modifyWave[(cursor-100):(cursor-10)])
else:
cursor = peakPos+1
while cursor<=(self.modifyWave.shape[0]-100) and self.modifyWave[cursor]<=threshold:
cursor = cursor + 1
baseline = np.average(self.modifyWave[(cursor+10):(cursor+100)])
std = np.std(self.modifyWave[(cursor+10):(cursor+100)])
# update threshold
threshold = baseline - std*nsigma
cursor = peakPos - 1
while cursor>=0 and self.modifyWave[cursor]<=threshold:
cursor = cursor - 1
begin = cursor
if begin>10:
cBegin = begin-10
else:
cBegin = 0
cursor = peakPos + 1
while cursor<(self.modifyWave.shape[0]) and self.modifyWave[cursor]<=threshold:
cursor = cursor + 1
end = cursor
if end<(self.modifyWave.shape[0]-10):
cEnd = end + 10
else:
cEnd = self.modifyWave.shape[0]
peakRiseTime = peakPos-begin
peakDownTime = end - peakPos
peakCharge = np.sum(baseline-self.modifyWave[cBegin:cEnd])
self.modifyWave[begin:end] = baseline
return (peakPos, peakCharge, peak, peakRiseTime, peakDownTime, baseline, std)
def integrateWave(self):
baseline = self.minPeakBaseline
self.allCharge =
|
np.sum(baseline-self.wave)
|
numpy.sum
|
from __future__ import division
import json
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from visigoth.tools import flexible_values
from visigoth.stimuli import ElementArray, Point, StimAperture
from psychopy import visual, event
class AttWedge(object):
def __init__(self, win, field_size, wedge_angle,
element_size, element_tex, element_mask, contrast,
sf_distr, prop_color, drift_rate, oddball_coherence):
# Define the length and width of the rectangular area with stims
self.length = length = field_size / 2 + 2 * element_size
self.width = width = 2 * np.tan(np.deg2rad(wedge_angle) / 2) * length
# Use poisson disc sampling to get roughly uniform coverage of the area
xys = poisson_disc_sample(length, width, element_size / 4)
self.xys = xys
# Assign parameters we will need when drawing the stimulus
self.edge_offset = width / 2 + element_size / 2
self.drift_step = drift_rate / win.framerate
self.sf_distr = sf_distr
self.prop_color = prop_color
self.wedge_angle = wedge_angle
self.oddball_coherence = oddball_coherence
self.element_size = element_size
self.element_tex = element_tex
self.element_mask = element_mask
# Initialize the angled bars that will be superimposed to define
# the "wedge" shape of the stimulus
l, w, o = length, width, 2 * element_size
self.edge_verts = [
|
np.array([(-o, 0), (l + o, 0), (l + o, +w), (-o, +w)])
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gr3
import faulthandler
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import os
import gr
import signal
from . import spinVis_camera
from . import spinVis_coor
from PyQt5.QtWidgets import (QHBoxLayout, QCheckBox, QRadioButton, QButtonGroup, QLineEdit, QVBoxLayout, QPushButton,
QLabel, QTableWidgetItem) # Import der versch. QtWidgets
from PyQt5.QtCore import Qt
import math
import numpy as np
class MainWindow(QtWidgets.QWidget): #Class MainWindow is the overall window, consisting of a GUI and a canvas part
def __init__(self,ladestyle , *args, **kwargs):
super().__init__(**kwargs) #Takes data from parent
self.inputstyle = ladestyle #Shows if data comes from pipe or file selection
self.initUI() #Initialize User Interfaces
pass
def initUI(self):
self.setWindowTitle('SpinVis2 by PGI/JCNS-TA')
self.draw_window = GLWidget() #Initialisation of the canvas window
self.gui_window = GUIWindow(
self.draw_window, self.inputstyle) #Initialisation of the GUI with the GLWindow as an parameter
self.draw_window.setMinimumSize(700, 700) #Size 700x700 is the biggest window possible for the laptop display of a MacBook Pro
self.gui_window.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
self.gui_window.setFocusPolicy(Qt.ClickFocus)
self.complete_window_hbox = QHBoxLayout() # Horizontales Layout umd beides nebeneinander zulegen
self.complete_window_hbox.addWidget(self.draw_window)
self.complete_window_hbox.addWidget(self.gui_window)
self.complete_window_hbox.setContentsMargins(0, 0, 0, 0)
self.complete_window_hbox.setSpacing(0) # Abstand 0 setzen um beides direkt nebeneinander zu haben
self.setLayout(self.complete_window_hbox)
def keyPressEvent(self, QKeyEvent):
#self.gui_window.p_win.perspective_check.setFocusPolicy(Qt.NoFocus)
#self.gui_window.p_win.orthographic_check.setFocusPolicy(Qt.NoFocus)
if QKeyEvent.key() == QtCore.Qt.Key_Right:
self.draw_window.rotate_right()
if QKeyEvent.key() == QtCore.Qt.Key_Left:
self.draw_window.rotate_left()
if QKeyEvent.key() == QtCore.Qt.Key_Up:
self.draw_window.rotate_up()
if QKeyEvent.key() == QtCore.Qt.Key_Down:
self.draw_window.rotate_down()
if QKeyEvent.key() == QtCore.Qt.Key_A:
self.draw_window.move_left()
if QKeyEvent.key() == QtCore.Qt.Key_D:
self.draw_window.move_right()
if QKeyEvent.key() == QtCore.Qt.Key_W:
self.draw_window.move_up()
if QKeyEvent.key() == QtCore.Qt.Key_S:
self.draw_window.move_down()
if QKeyEvent.key() == QtCore.Qt.Key_Z:
spinVis_camera.zoom(0.1, self.draw_window.width(), self.draw_window.height())
if QKeyEvent.key() == QtCore.Qt.Key_X:
spinVis_camera.zoom(- 0.1, self.draw_window.width(), self.draw_window.height())
self.draw_window.update()
class GUIWindow(
QtWidgets.QScrollArea): # GUI Window setzt sich aus einzelnen Windows die vertikal unteieinander gelegt wurden zusammen
def __init__(self,glwindow, ladestyle, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebergabe des GLwindows als lokale private variable
self.ladestyle = ladestyle
self.initUI()
pass
def initUI(self):
self.parent_widget = QtWidgets.QWidget()
self.pgroup = QtWidgets.QGroupBox()
self.p_win = ProjectionWindow(self._glwindow)
self.slide_win = AngleWindow(self._glwindow) #Slider Boxlayout fuer Kamerasteuerung per Slider
self.t_win = TranslationWindow(self._glwindow)
self.bond_win = BondWindow(self._glwindow)
self.screen_win = ScreenWindow(self._glwindow) # Screen Boxlayout fuer Screenshot steuerung
self.cs_win = SpinColorWindow(self._glwindow)
self.l_win = DataLoadWindow(self._glwindow, self.ladestyle, self.cs_win) # Lade Boxlayout um neuen Datensatz zu laden
self.c_win = ColorWindow(self._glwindow, self.cs_win) # Color Boxlayout um Farbe für Hintergrund und Spins zu setzen
self.v_win = VideoWindow(self._glwindow)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.p_win)
self.vbox.addWidget(self.slide_win)
self.vbox.addWidget(self.t_win)
self.vbox.addWidget(self.bond_win)
self.vbox.addWidget(self.screen_win)
self.vbox.addWidget(self.l_win)
self.vbox.addWidget(self.cs_win)
self.vbox.addWidget(self.c_win)
self.vbox.addWidget(self.v_win)
self.vbox.addStretch(1)
self.parent_widget.setLayout(self.vbox)
self.setWidget(self.parent_widget)
self.setWidgetResizable(True)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
class ProjectionWindow(QtWidgets.QWidget): #
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebergabe des GLwindows als lokale private variable
self.initUI()
pass
def initUI(self):
self.projectiongroup = QtWidgets.QGroupBox("Projection Window")
self.projectiongroup.setTitle("Projection Window")
self.projectiongroup.setToolTip("The projectiontype defines how a 3D object is pictured on a 2D screen. \n"
"The perspektive projection is simulating the effects of the real world. Objects \n"
"farther away appear smaller. The orthographic projection depicts every object \n"
"with the same size. It may reveal patterns in the data, that would else remain hidden")
self.groupbox = QtWidgets.QVBoxLayout()
self.groupbox.addWidget(self.projectiongroup)
self.projection_box = QHBoxLayout() # Eine grosse HBox die ein Label und eine weite HBox beinhaltet. In der sind 2 VBoxLayouts mit jeweils einem HLabel und einem RadioButton
# self.projection_box.addStretch(1)
self.projection_label = QtWidgets.QLabel()
self.projection_label.setText("Choose a perspective type:") # Pop-Up das Hilftext anzeigt
self.checkboxbox = QHBoxLayout() # Checkboxbox beinhaltet 2 HBoxen, die je ein Label und ein Radiobutton haben
self.perspective_box = QHBoxLayout()
self.perspective_check = QRadioButton()
self.perspective_check.setFocusPolicy(Qt.NoFocus)
self.perspective_label = QLabel()
self.perspective_label.setText("Perspektive")
# self.perspective_check.toggle()
self.perspective_box.addWidget(self.perspective_label)
self.perspective_box.addWidget(self.perspective_check)
self.checkboxmanagment = QButtonGroup() # Mit der Buttongroup werden die Radiobuttons auf exklusiv gestellt
self.orthographic_box = QHBoxLayout()
self.orthographic_check = QRadioButton()
self.orthographic_check.setChecked(True)
self.orthographic_check.setFocusPolicy(Qt.NoFocus)
self.orthographic_label = QLabel()
self.orthographic_label.setText("Orthographic")
self.perspective_box.addWidget(self.orthographic_label)
self.perspective_box.addWidget(self.orthographic_check)
self.checkboxbox.addLayout(self.perspective_box)
self.checkboxbox.addLayout(self.orthographic_box)
self.orthographic_check.clicked.connect(self.radio_clicked)
self.perspective_check.clicked.connect(self.radio_clicked)
self.projection_box.addWidget(self.projection_label)
self.projection_box.addLayout(self.checkboxbox)
self.projectiongroup.setLayout(self.projection_box)
self.groupbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.groupbox)
pass
def radio_clicked(self):
if self.orthographic_check.isChecked():
spinVis_camera.is_orthograpghic = True
spinVis_camera.set_projection_type_orthographic()
else:
spinVis_camera.is_orthograpghic = False
spinVis_camera.set_projection_type_perspective()
self._glwindow.update()
def is_orthographic_projection(self):
if self.orthographic_check.isChecked():
return True
else:
return False
class SpinColorWindow(QtWidgets.QWidget):
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebernahme des GLWindows zur Slidersteuerung
self.initUI()
pass
def initUI(self):
self.spincolorgroup = QtWidgets.QGroupBox("Spin Color Window")
self.spincolorgroup.setTitle("Spin Color Window")
self.spincolorgroup.setToolTip("Click on the cell under the symbol of the spins you want to change color.\n"
"If you use the feature further down to change the color of all spins or \n"
"change the data set that is in use, the selection will be reseted. \n"
"If you want to load a color scheme, it must have the same list of symbols \n"
"like the one you have loaded right now. A saved color scheme can be found \n"
"under spinvis_color_save.txt")
self.groupbox = QtWidgets.QVBoxLayout()
self.groupbox.addWidget(self.spincolorgroup)
self.table_box = QtWidgets.QHBoxLayout()
self.button_box = QtWidgets.QVBoxLayout()
self.load_scheme_button = QtWidgets.QPushButton("Load scheme")
self.load_scheme_button.setFixedSize(130,30)
self.load_scheme_button.clicked.connect(self.load_color)
self.save_scheme_button = QtWidgets.QPushButton("Save scheme")
self.save_scheme_button.setFixedSize(130,30)
self.save_scheme_button.clicked.connect(self.save_color)
self.button_box.addWidget(self.save_scheme_button)
self.button_box.addWidget(self.load_scheme_button)
self.color_table = QtWidgets.QTableWidget(0, 0)
self.color_table.setFixedHeight(70)
self.color_table.setFocusPolicy(Qt.ClickFocus)
self.table_box.addWidget(self.color_table)
self.table_box.addLayout(self.button_box)
self.color_table.clicked.connect(self.on_click)
self.spincolorgroup.setLayout(self.table_box)
self.groupbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.groupbox)
def load_color(self):
options = QtWidgets.QFileDialog.Options() # File Dialog zum auswählen der Daten-Datei
options |= QtWidgets.QFileDialog.DontUseNativeDialog
input, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Choose your data", "",
"All Files (*);;", options=options)
list = []
if (not input):
pass # Falls nichts ausgewählt wird, wird kein neuer Datensatz gewählt
else:
if not (input.endswith(".txt")):
print("Please make sure that your file is a '.txt'")
return
try: # Es wird probiert den Pfad zu öffnen um eine Exception, bei nicht vorhandener Datei abzufangen (mit Path-Dialog eeigentlich unnötig)
with open(input, 'r') as f:
pass
except FileNotFoundError:
print("This path does not exist, please try again")
return
with open(input,
'r') as infile: # Fuer den Bereich die Datei oeffnen, fuer jede Zeile das ganze in 3 Tupel schreiben
for line in infile.readlines():
line = line.strip()
list.append(line)
if len(list) == self.color_table.columnCount():
i= 0
for line in list:
helf = line.split() # Leerzeichen als Trennzeichen
if helf[0] == self.color_table.horizontalHeaderItem(i).text().title():
rgb_list = [int(helf[1]), int(helf[2]), int(helf[3])]
rgb = QtGui.QColor.fromRgb(rgb_list[0], rgb_list[1], rgb_list[2])
self.color_table.item(0, i).setBackground(rgb)
spinVis_camera.set_symbol_spin_color([helf[1], helf[2], helf[3]], self.color_table.horizontalHeaderItem(
i).text().title())
i = i+1
def save_color(self):
f = open("spinvis_color_save.txt", "w")
for i in range(self.color_table.columnCount()):
symbol = self.color_table.horizontalHeaderItem(i).text().title()
rgbtuple = self.color_table.item(0, i).background().color().getRgb()
print(symbol + "\t" + str(rgbtuple[0]) + "\t" + str(rgbtuple[1]) + "\t" + str(rgbtuple[2]), file=f)
f.close()
def on_click(self):
spin_rgb = [c * 255 for c in
spinVis_camera.spin_rgb] # Umrechnung der momentanen Farbe als Stndardwert, Multiplikation der Werte von 0-1 auf 0-255
for currentQTableWidgetItem in self.color_table.selectedItems():
currentQTableWidgetItem.setSelected(False)
selectedColor = QtWidgets.QColorDialog.getColor(QtGui.QColor.fromRgb(*spin_rgb))
if selectedColor.isValid():
self.color_table.item(currentQTableWidgetItem.row(), currentQTableWidgetItem.column()).setBackground(selectedColor)
spinVis_camera.set_symbol_spin_color(selectedColor.getRgb(), self.color_table.horizontalHeaderItem(currentQTableWidgetItem.column()).text().title())
self._glwindow.update()
def fillTable(self, list):
symbol_list = np.array(list)
self.color_table.setRowCount(1)
f = open("listtest.txt", 'w')
for e in symbol_list:
print(e, file=f)
self.color_table.setColumnCount(len(symbol_list))
i = 0
symbol_list_int = symbol_list.tolist()
for e in symbol_list_int:
self.color_table.setHorizontalHeaderItem(i, QTableWidgetItem(str(e)))
self.color_table.setItem(0, i, QTableWidgetItem(""))
c = spinVis_camera.get_symbol_color(e)
self.color_table.item(0, i).setBackground(QtGui.QColor.fromRgb(int(c[0]), int(c[1]), int(c[2])))
i = i+1
def color_all_spins(self, rgb):
for e in range(self.color_table.columnCount().__int__()):
self.color_table.item(0, e).setBackground(QtGui.QColor.fromRgb(rgb[0], rgb[1], rgb[2]))
class DataLoadWindow(QtWidgets.QWidget):
def __init__(self, glwindow, ladestyle, spin_colour_win,*args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebernahme des GLWindows zur Slidersteuerung
self.lade_style = ladestyle
self.spin_colour_win = spin_colour_win
self.initUI()
pass
def initUI(self):
self.loadgroup = QtWidgets.QGroupBox("Data Load Window")
self.loadgroup.setTitle("Data Load Window")
self.loadgroup.setToolTip("Please choose a '.txt' file to load that has the following structure: \n"
"the position of the center of the spin, the direction of the spin, a symbol. \n"
"For example: -1.819 6.300 -25.500 0.022 -0.075 0.355 54. \n"
"Its important that the individual numbers are seperated by tabs. \n"
"But you can also save a data file. If you do so you can find it under \n"
"spinvis_save_data.txt.")
self.groupbox = QtWidgets.QVBoxLayout()
self.groupbox.addWidget(self.loadgroup)
self.load_label = QtWidgets.QLabel()
self.load_label.setText("Load new set of spins from a txt file:")
self.load_button = QPushButton('Load set', self)
self.load_button.setFixedSize(130, 30)
self.load_button.clicked.connect(self.load_file)
self.save_button = QPushButton('Save set', self)
self.save_button.setFixedSize(130, 30)
self.save_button.clicked.connect(self.save_data)
self.hbox = QHBoxLayout() # HBox mit einem Label und einem Knopf zum Laden
#self.hbox.addStretch(1)
self.hbox.addWidget(self.load_label)
self.hbox.addWidget(self.save_button)
self.hbox.addWidget(self.load_button)
self.loadgroup.setLayout(self.hbox)
self.groupbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.groupbox)
def save_data(self):
spinVis_camera.save_file()
def load_file(self):
options = QtWidgets.QFileDialog.Options() # File Dialog zum auswählen der Daten-Datei
options |= QtWidgets.QFileDialog.DontUseNativeDialog
input, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Choose your data", "",
"All Files (*);;", options=options)
if (not input):
pass # Falls nichts ausgewählt wird, wird kein neuer Datensatz gewählt
else:
self._glwindow.data_path = input # So wird der Eingabestring verwendet und der neue Datensatz gewählt
#try:
self._glwindow.setDataSet()
''' except TypeError:
typ_err_box = QtWidgets.QMessageBox()
typ_err_box.setIcon(2) # Gives warning Icon
typ_err_box.setText("Error ocurred while trying to load a data set!")
typ_err_box.setInformativeText(
"Something went wrong trying to open the data file. Please make sure that the the selected file is a '.txt'-file with the schematic"
" described in the tooltip.")
typ_err_box.exec_()'''
self.spin_colour_win.fillTable(spinVis_camera.fill_table())
class ColorWindow(QtWidgets.QWidget):
def __init__(self, glwindow,spin_color_window, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebernahme des GLWindows zur Slidersteuerung
self._spin_color_win = spin_color_window
self.initUI()
pass
def initUI(self):
self.colorgroup = QtWidgets.QGroupBox("Color Window")
self.colorgroup.setTitle("Color Window")
self.colorgroup.setToolTip("Click on either button to open a window where you can choose a color \n"
"for either the background or the spins. This color will be saved for \n"
"how long the app is running. However it will be reset if you restart \n"
"the app.")
self.groupbox = QtWidgets.QVBoxLayout()
self.groupbox.addWidget(self.colorgroup)
self.color_hbox = QHBoxLayout() # Hbox mt 2 Knöpfen und einem Label
self.color_label = QLabel()
self.color_label.setText("Choose a color:")
self.sphere_switch = QtWidgets.QPushButton()
self.sphere_switch.setText("Switch spheres")
self.sphere_switch.clicked.connect(self.switch_sphere)
self.bg_color_button = QPushButton('Background', self)
self.bg_color_button.setFixedSize(100, 30)
self.bg_color_button.clicked.connect(self.get_bg_color)
self.spin_color_button = QPushButton('Spins', self)
self.spin_color_button.setFixedSize(100, 30)
self.spin_color_button.clicked.connect(self.get_spin_color)
self.color_dialog = QtWidgets.QColorDialog() # Dialog Picker zum Farben auswählen
self.color_hbox.addWidget(self.color_label)
self.color_hbox.addWidget(self.sphere_switch)
self.color_hbox.addWidget(self.bg_color_button)
self.color_hbox.addWidget(self.spin_color_button)
self.colorgroup.setLayout(self.color_hbox)
self.groupbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.groupbox)
def switch_sphere(self):
if self._glwindow._issphere:
self._glwindow._issphere = False
else:
self._glwindow._issphere = True
self._glwindow.spinDraw()
self._glwindow.update()
def get_bg_color(self):
bg_rgb = [c * 255 for c in
spinVis_camera.bg_rgb] # Umrechnung der momentanen Farbe als Stndardwert, Multiplikation der Werte von 0-1 auf 0-255
selectedColor = QtWidgets.QColorDialog.getColor(QtGui.QColor.fromRgb(
*bg_rgb)) # Speichert die Auswahl des Farbendialogs und setzt Standardwert auf vorher umgewandelte Farben
if selectedColor.isValid():
self._glwindow.set_bg_color(selectedColor.getRgb())
def get_spin_color(self):
spin_rgb = [c * 255 for c in
spinVis_camera.spin_rgb] # Umrechnung der momentanen Farbe als Stndardwert, Multiplikation der Werte von 0-1 auf 0-255
selectedColor = QtWidgets.QColorDialog.getColor(QtGui.QColor.fromRgb(
*spin_rgb)) # Speichert die Auswahl des Farbendialogs und setzt Standardwert auf vorher umgewandelte Farben
if selectedColor.isValid():
self._spin_color_win.color_all_spins(selectedColor.getRgb())
spin_rgb[0] = selectedColor.getRgb()[0] / 255
spin_rgb[1] = selectedColor.getRgb()[1] / 255
spin_rgb[2] = selectedColor.getRgb()[2] / 255
self._glwindow.set_spin_color(selectedColor.getRgb())
class VideoWindow(QtWidgets.QWidget):
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebernahme des GLWindows zur Slidersteuerung
self.initUI()
pass
def initUI(self):
self.vidgroup = QtWidgets.QGroupBox("Video Window")
self.vidgroup.setTitle("Video Window")
self.vidgroup.setToolTip("Create a video by clicking once on the 'Make a video' button. Pressing a second time\n"
"will finish the video. The default parameters are 1920*1920 pixels and 60 fps.\n"
"But if you enter valid value, they will be used instead. The default name is spinvis_output.mp4")
self.groupbox = QtWidgets.QVBoxLayout()
self.vidbox = QHBoxLayout()
self.vidlabel = QLabel()
self.vidlabel.setText("Create a video: ")
self.groupbox.addWidget(self.vidgroup)
self.namebox = QVBoxLayout()
self.namelabel = QLabel()
self.namelabel.setText("Name the video:")
self.vidname = QtWidgets.QTextEdit()
self.vidname.setFixedSize(100,25)
self.namebox.addWidget(self.vidlabel)
self.namebox.addWidget(self.vidname)
self.fpsbox = QVBoxLayout()
self.fpslabel = QLabel()
self.fpslabel.setText("FPS:")
self.validator = QtGui.QIntValidator(1, 120, self)
self.fpscounter = QtWidgets.QLineEdit()
self.fpscounter.setValidator(self.validator)
self.fpscounter.setFixedSize(25,25)
self.fpsbox.addWidget(self.fpslabel)
self.fpsbox.addWidget(self.fpscounter)
self.resolution_validator = QtGui.QIntValidator(1, 4000, self)
self.widthbox = QVBoxLayout()
self.widthlabel = QLabel()
self.widthlabel.setText("Width: ")
self.vidwidth = QtWidgets.QLineEdit()
self.vidwidth.setValidator(self.resolution_validator)
self.vidwidth.setFixedSize(50, 25)
self.widthbox.addWidget(self.widthlabel)
self.widthbox.addWidget(self.vidwidth)
self.heighthbox = QVBoxLayout()
self.heightlabel = QLabel()
self.heightlabel.setText("Height: ")
self.vidheight = QtWidgets.QLineEdit()
self.vidheight.setValidator(self.resolution_validator)
self.vidheight.setFixedSize(50, 25)
self.heighthbox.addWidget(self.heightlabel)
self.heighthbox.addWidget(self.vidheight)
self.vidbutton = QPushButton("Make a video")
self.vidbutton.clicked.connect(self.doVideo)
self.vidbox.addLayout(self.namebox)
self.vidbox.addLayout(self.fpsbox)
self.vidbox.addLayout(self.widthbox)
self.vidbox.addLayout(self.heighthbox)
self.vidbox.addWidget(self.vidbutton)
self.vidgroup.setLayout(self.vidbox)
self.groupbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.groupbox)
def doVideo(self):
self._glwindow.make_video(self.vidname.toPlainText().title(), self.fpscounter.text().title(),
self.vidwidth.text().title(), self.vidheight.text().title())
pass
class ScreenWindow(QtWidgets.QWidget):
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebernahme des GLWindows zur Slidersteuerung
self.initUI()
pass
def initUI(self):
self.screengroup = QtWidgets.QGroupBox("Screenshot Window")
self.screengroup.setTitle("Screenshot Window")
self.screengroup.setToolTip("Click the button to generate a screenshot. This screenshot contains only the\n"
"coloured part of the window, not the gui. Currently length is the fixed size \n"
"of 1000 * 1000px, but we are working on a setting for that. The screenshot can \n"
"either be in the PNG or the HTML-format. Latter can be opend with your browser")
self.outer_box = QVBoxLayout() # Position the screengroup on the widget (self)
self.outer_box.addWidget(self.screengroup)
self.fullbox = QHBoxLayout() # Fullbox beinhaltet Screenshotknopf, Radiobuttons und Eingabe Zeile, jeweils in VBox mit Labeln
self.screenbox = QVBoxLayout() # Screenbox beinhaltet das Label und den Knopf names Screenshot
self.lbl = QLabel()
self.screenbutton = QPushButton('Screenshot', self)
self.screenbutton.setFixedSize(100, 30)
self.screenbutton.clicked.connect(self.doScreenshot)
self.lbl.setText("Screenshot")
self.lbl.setFixedSize(100, 30)
self.screenbox.addWidget(self.lbl)
self.screenbox.addWidget(self.screenbutton)
self.fileVBox = QVBoxLayout() # Filebox beinhaltet die Eingabezeile und das Label
self.fileName = QLineEdit()
self.fileName.setFocusPolicy(Qt.ClickFocus)
self.fileLabel = QLabel()
self.fileLabel.setText("Filename:")
self.fileVBox.addWidget(self.fileLabel)
self.fileVBox.addWidget(self.fileName)
self.checkboxbox = QHBoxLayout() # Checkboxbox beinhaltet 2 HBoxen, die je ein Label und ein Radiobutton haben
self.pngbox = QVBoxLayout()
self.pngcheck = QRadioButton()
self.pngcheck.setChecked(True)
self.pnglabel = QLabel()
self.pnglabel.setText("PNG")
self.pngcheck.toggle()
self.pngbox.addWidget(self.pnglabel)
self.pngbox.addWidget(self.pngcheck)
self.checkboxmanagment = QButtonGroup() # Mit der Buttongroup werden die Radiobuttons auf exklusiv gestellt
self.povbox = QVBoxLayout()
self.povcheck = QRadioButton()
self.povcheck.setChecked(False)
self.povlabel = QLabel()
self.povlabel.setText("POV")
self.htmlbox = QVBoxLayout()
self.htmlcheck = QRadioButton()
self.htmlcheck.setChecked(False)
self.htmllabel = QLabel()
self.htmllabel.setText("HTML")
# self.pngcheck..connect(self.pngChange)
# self.htmlcheck.stateChanged.connect(self.htmlChange)
self.checkboxmanagment.addButton(
self.pngcheck) # Hinzufuegen der Radiobuttons und dann das setzen der Gruppe auf exklusiv
self.checkboxmanagment.addButton(self.povcheck)
self.checkboxmanagment.addButton(self.htmlcheck)
self.checkboxmanagment.setExclusive(True) # Exklusiv, sodass immer nur genau ein Knopf an sein kann
self.povbox.addWidget(self.povlabel)
self.povbox.addWidget(self.povcheck)
self.htmlbox.addWidget(self.htmllabel)
self.htmlbox.addWidget(self.htmlcheck)
self.checkboxbox.addLayout(self.pngbox)
self.checkboxbox.addLayout(self.povbox)
self.checkboxbox.addLayout(self.htmlbox)
self.fullbox.addLayout(self.screenbox) # Hinzufuegen der einzelnen Boxen zu der Gesamtbox
self.fullbox.addLayout(self.checkboxbox)
self.fullbox.addLayout(self.fileVBox)
self.screengroup.setLayout(self.fullbox)
self.outer_box.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.outer_box)
self.warning_box = QtWidgets.QMessageBox()
self.warning_box.setText("The name must be at least one character long")
pass
def doScreenshot(self):
if self.pngcheck.isChecked(): # Wenn die Png Box an ist dann wird der Dateiname auf Variable gesetzt und das ganze anf das
dataname = self.fileName.text() # glwindw uebergeben, da ansonsten zu fehlern kommt
if dataname != "":
self._glwindow.export(dataname)
else:
self.warning_box.show()
else:
if self.htmlcheck.isChecked():
format = "html"
else:
format = "pov"
filename = spinVis_camera.make_screenshot(self.fileName.text(), format, 1920,
1920) # Test.screenshot ruft gr3.export mit html/pov auf
spinVis_camera.render_povray(filename, block=False)
self._glwindow.update()
self.update()
pass
class AngleWindow(QtWidgets.QWidget):
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebergabe des glwindow
self.initUI()
pass
def initUI(self):
self.anglegroup = QtWidgets.QGroupBox("Angle Window")
self.anglegroup.setTitle("Angle Window")
self.anglegroup.setToolTip("These 3 angles allow you to set a specific camera position. While phi and thete\n"
"depict your camera position, is alpha used to show the rotation of the upvector.\n"
"Any tripel you enter gives a unique view. ")
self.groupbox = QtWidgets.QHBoxLayout()
self.groupbox.addWidget(self.anglegroup)
self.anglebox = QHBoxLayout()
self.theta_box = QHBoxLayout()
self.theta_lbl = QLabel()
self.theta_lbl.setText("Theta: ")
self.theta_input = QtWidgets.QLineEdit()
self.theta_input.setFocusPolicy(Qt.ClickFocus)
self.theta_input.setFixedSize(70,25)
self.theta_box.addWidget(self.theta_lbl)
self.theta_box.addWidget(self.theta_input)
#self.theta_validator = QtGui.QDoubleValidator(-1/2 * math.pi, math.pi/2,5, self)
#self.theta_input.setValidator(self.theta_validator)
self.phi_box = QHBoxLayout()
self.phi_lbl = QLabel()
self.phi_lbl.setText("Phi: ")
self.phi_input = QtWidgets.QLineEdit()
self.phi_input.setFocusPolicy(Qt.ClickFocus)
self.phi_input.setFixedSize(75,25)
self.phi_box.addWidget(self.phi_lbl)
self.phi_box.addWidget(self.phi_input)
#self.phi_validator = QtGui.QDoubleValidator(-1* math.pi, math.pi,5, self)
#self.phi_input.setValidator(self.phi_validator)
self.up_box = QHBoxLayout()
self.up_lbl = QLabel()
self.up_lbl.setText("Alpha: ")
self.up_input = QtWidgets.QLineEdit()
self.up_input.setFocusPolicy(Qt.ClickFocus)
self.up_input.setFixedSize(75, 25)
self.up_box.addWidget(self.up_lbl)
self.up_box.addWidget(self.up_input)
self.angle_button = QPushButton("Set camera")
self.angle_button.setMaximumSize(150, 25)
self.angle_button.clicked.connect(self.camera_change_from_angle)
euler_norm = np.linalg.norm(spinVis_coor.camera_koordinates)
euler_theta = math.acos(spinVis_coor.camera_koordinates[2] / euler_norm)
euler_phi = np.arctan2(spinVis_coor.camera_koordinates[1], spinVis_coor.camera_koordinates[0])
self.theta_input.setText(str(round(euler_theta, 5)))
self.phi_input.setText(str(round(euler_phi, 5)))
r = np.array([-math.sin(euler_phi), math.cos(euler_phi), 0])
v = np.array([-math.cos(euler_phi) * math.cos(euler_theta), -math.sin(euler_phi) * math.cos(euler_theta),
math.sin(euler_theta)])
alpha = math.atan2(-1 * np.dot(spinVis_camera.up_vector, r), np.dot(spinVis_camera.up_vector, v))
self.up_input.setText(str(round(alpha, 5)))
self.anglebox.addLayout(self.theta_box)
self.anglebox.addLayout(self.phi_box)
self.anglebox.addLayout(self.up_box)
self.anglebox.addWidget(self.angle_button)
self.anglegroup.setLayout(self.anglebox)
self._glwindow.register(self.theta_input)
self._glwindow.register(self.phi_input)
self._glwindow.register(self.up_input)
self.groupbox.setContentsMargins(0,0,0,0)
self.setLayout(self.groupbox)
def keyPressEvent(self, QKeyEvent):
if QKeyEvent.key() == QtCore.Qt.Key_Return or QKeyEvent.key() == QtCore.Qt.Key_Enter:
self.camera_change_from_angle()
def camera_change_from_angle(self):
try:
theta = float(self.theta_input.text().__str__())
phi = float(self.phi_input.text().__str__())
input_up = float(self.up_input.text().__str__())
r = np.array([-math.sin(phi), math.cos(phi), 0])
v = np.array([-math.cos(phi) * math.cos(theta), -math.sin(phi) * math.cos(theta), math.sin(theta)])
self._glwindow.new_up_v = v * math.cos(input_up) - r * math.sin(input_up)
spinVis_coor.euler_angles_to_koordinates(theta, phi, np.linalg.norm(spinVis_coor.camera_koordinates),
input_up)
self._glwindow.update()
except ValueError:
val_err_box = QtWidgets.QMessageBox()
val_err_box.setIcon(2) # Gives warning Icon
val_err_box.setText("Error ocurred while trying to recalculate the camera position!")
val_err_box.setInformativeText("Your entred value was not a floating number. Please make sure that your input is right, before trying to change the camera.")
val_err_box.exec_()
class BondWindow(QtWidgets.QWidget):
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._distance_threshold = 0.0
self._glwindow = glwindow # Uebergabe des glwindow
self.initUI()
spinVis_camera.bond_distance_threshold_callback = lambda value: self.threshold_input.setText(str(round(value, 5)))
def initUI(self):
self.bondgroup = QtWidgets.QGroupBox("Bond Window")
self.bondgroup.setTitle("Bond Window")
self.bondgroup.setToolTip("Set a distance threshold for bond calculation. The default value is 150 per cent\n"
"of the minimum distance between the centers of two spins.")
self.groupbox = QtWidgets.QHBoxLayout()
self.groupbox.addWidget(self.bondgroup)
self.bondbox = QHBoxLayout()
self.threshold_box = QHBoxLayout()
self.threshold_checkbox = QCheckBox("Show bonds")
self.threshold_checkbox.stateChanged.connect(self.update_bond_distance_threshold)
self.threshold_lbl = QLabel()
self.threshold_lbl.setText("Distance threshold: ")
self.threshold_input = QtWidgets.QLineEdit()
self.threshold_input.setFocusPolicy(Qt.ClickFocus)
self.threshold_input.setFixedSize(70, 25)
self.threshold_input.returnPressed.connect(self.update_bond_distance_threshold)
self.threshold_box.addWidget(self.threshold_checkbox)
self.threshold_box.addWidget(self.threshold_lbl)
self.threshold_box.addWidget(self.threshold_input)
self.bond_button = QPushButton("Set threshold")
self.bond_button.setMaximumSize(150, 25)
self.bond_button.clicked.connect(self.update_bond_distance_threshold)
self.bondbox.addLayout(self.threshold_box)
self.bondbox.addWidget(self.bond_button)
self.bondgroup.setLayout(self.bondbox)
self.groupbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.groupbox)
def update_bond_distance_threshold(self):
try:
if self.threshold_input.text().strip() != "":
threshold = float(self.threshold_input.text())
spinVis_camera.bond_distance_threshold = threshold
else:
spinVis_camera.bond_distance_threshold = None
spinVis_camera.bond_is_activated = self.threshold_checkbox.isChecked()
self._glwindow.spinDraw()
self._glwindow.update()
except ValueError:
val_err_box = QtWidgets.QMessageBox()
val_err_box.setIcon(2) # Gives warning Icon
val_err_box.setText("Error ocurred while trying to recalculate the bonds!")
val_err_box.setInformativeText("Your entred value was not a floating number. Please make sure that your input is right, before trying to change the bond distance threshold.")
val_err_box.exec_()
class TranslationWindow(QtWidgets.QWidget):
def __init__(self, glwindow, *args, **kwargs):
super().__init__(*args, **kwargs)
self._camera_angle = 0.0
self._glwindow = glwindow # Uebergabe des glwindow
self.initUI()
pass
def initUI(self):
self.translationgroup = QtWidgets.QGroupBox("Angle Window")
self.translationgroup.setTitle("Translation Window")
self.translationgroup.setToolTip("These 3 angles allow you to set a specific camera position. While phi and thete\n"
"depict your camera position, is alpha used to show the rotation of the upvector.\n"
"Any tripel you enter gives a unique view. ")
self.groupbox = QtWidgets.QHBoxLayout()
self.groupbox.addWidget(self.translationgroup)
self.translationbox = QHBoxLayout()
self.x_box = QHBoxLayout()
self.x_lbl = QLabel()
self.x_lbl.setText("X: ")
self.x_input = QtWidgets.QLineEdit()
self.x_input.setFocusPolicy(Qt.ClickFocus)
self.x_input.setFixedSize(70,25)
self.x_box.addWidget(self.x_lbl)
self.x_box.addWidget(self.x_input)
#self.theta_validator = QtGui.QDoubleValidator(-1/2 * math.pi, math.pi/2,5, self)
#self.theta_input.setValidator(self.theta_validator)
self._glwindow._focus_observer.append(self.x_input.setText)
self.y_box = QHBoxLayout()
self.y_lbl = QLabel()
self.y_lbl.setText("Y: ")
self.y_input = QtWidgets.QLineEdit()
self.y_input.setFocusPolicy(Qt.ClickFocus)
self.y_input.setFixedSize(75,25)
self.y_box.addWidget(self.y_lbl)
self.y_box.addWidget(self.y_input)
self._glwindow._focus_observer.append(self.y_input.setText)
#self.phi_validator = QtGui.QDoubleValidator(-1* math.pi, math.pi,5, self)
#self.phi_input.setValidator(self.phi_validator)
self.z_box = QHBoxLayout()
self.z_lbl = QLabel()
self.z_lbl.setText("Z: ")
self.z_input = QtWidgets.QLineEdit()
self.z_input.setFocusPolicy(Qt.ClickFocus)
self.z_input.setFixedSize(75, 25)
self.z_box.addWidget(self.z_lbl)
self.z_box.addWidget(self.z_input)
self._glwindow._focus_observer.append(self.z_input.setText)
self.translation_button = QPushButton("Translate focus")
self.translation_button.setMaximumSize(150, 25)
self.translation_button.clicked.connect(self.change_focus_point)
self.x_input.setText(str(0))
self.y_input.setText(str(0))
self.z_input.setText(str(0))
self.translationbox.addLayout(self.x_box)
self.translationbox.addLayout(self.y_box)
self.translationbox.addLayout(self.z_box)
self.translationbox.addWidget(self.translation_button)
self.translationgroup.setLayout(self.translationbox)
self.groupbox.setContentsMargins(0,0,0,0)
self.setLayout(self.groupbox)
def keyPressEvent(self, QKeyEvent):
if QKeyEvent.key() == QtCore.Qt.Key_Return or QKeyEvent.key() == QtCore.Qt.Key_Enter:
self.change_focus_point()
def change_focus_point(self):
spinVis_camera.focus_point = np.array([float(self.x_input.text()), float(self.y_input.text()), float(self.z_input.text())])
spinVis_camera.grLookAt()
self._glwindow.update()
class GLWidget(QtWidgets.QOpenGLWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._first_paint = True
self._make_video = False
self._camera_angle = 0.0
self._draw_spin = True # Verhindert das bei jeder Kamerabewegung die Spins neu gezeichnet werden
self._export_screen = False # exportScreen wird beim Knopfdruck auf True gestellt und triggert so export()
self.point_c_vektor = np.array([spinVis_coor.camera_koordinates[0], spinVis_coor.camera_koordinates[1],
spinVis_coor.camera_koordinates[
2]]) # Ortsvektor des Punktes andem die Kamerabewegung aufhört
self.current_camera_vector = np.array([spinVis_coor.camera_koordinates[0], spinVis_coor.camera_koordinates[1],
spinVis_coor.camera_koordinates[2]]) # Kameravektor
self.camera_vektor_length = np.linalg.norm(spinVis_coor.camera_koordinates) # Norm des Kameravektors
self.point_a_vektor = np.array([spinVis_coor.camera_koordinates[0], spinVis_coor.camera_koordinates[1],
spinVis_coor.camera_koordinates[
2]]) # Ortsvektor des Punktes andem die Kamerabewegung startet
self._radius = 2 # Radius des Arcballs
self.focus_point = np.array([0.0, 0.0, 0.0]) # Fokuspunkt
self.new_up_v = np.array([0.0, 0.0, 1.0]) #
self._pressed_first_time = False
self._mouseY = 1.0
self._maus_z = 2.0
self.first_rot = True
self.upslidval = 0
self.sideslidval = 0
self.vid_timer = QtCore.QTimer()
self.vid_timer.timeout.connect(self.video_connect)
self._angle_observers = []
self._angle_blocker = []
self._focus_observer = []
self._issphere = spinVis_camera.IS_SPHERE_DEFAULT
self.data_path = ""
self.setAcceptDrops(True)
self.initUI()
pass
def export(self, stringname): # Export Funktion im GLWindow um Freeze zu vermeiden
self._export_screen = True
self.screendateiname = stringname # Benutzt den uebergebenen dateinamen
self.update()
def dragEnterEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == "file":
event.acceptProposedAction()
def dropEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == "file":
filepath = str(urls[0].path())
self.data_path = filepath
self.setDataSet()
def initUI(self):
x = 0
y = 0
pass
def initializeGL(self):
gr3.init()
# test.eingabe()
gr3.usecurrentframebuffer()
spinVis_camera.grSetUp(self.width(),
self.height()) # GrSetup mit Zoomvariable und den Winkeln fuer die Kugelgleichung
def resizeGL(self, width, height):
spinVis_camera.grSetUp(self.width(), self.height())
def spinDraw(self):
spinVis_camera.grDrawSpin(None, self._issphere, skip_colors=True)
def paintGL(self):
gr3.usecurrentframebuffer()
if self._make_video:
gr.clearws()
gr3.drawimage(0, 1, 0, 1,
self.width(), self.height(),
gr3.GR3_Drawable.GR3_DRAWABLE_GKS)
gr.updatews()
if self._export_screen: # Screenshot und setzen von export screen auf False fuer neuen Durchlauf
spinVis_camera.grSetUp(1920, 1920)
spinVis_camera.make_screenshot(self.screendateiname, "png", 1920, 1920)
spinVis_camera.grSetUp(self.width(), self.height())
self._export_screen = False
gr3.drawimage(0, self.devicePixelRatio() * self.width(), 0, self.devicePixelRatio() * self.height(),
self.devicePixelRatio() * self.width(), self.devicePixelRatio() * self.height(),
gr3.GR3_Drawable.GR3_DRAWABLE_OPENGL)
def trackballCameraChange(self):
spinVis_camera.grCameraArcBallChange(self.current_camera_vector)
self.update()
def rotation_matrix(self, axis,
theta): # Berechnet aus Achse und Winkel eine Rotationsmatrix um den der Kamerapunkt rotiert wird
axis = np.array(axis)
if (math.sqrt(np.dot(axis, axis)) != 0):
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def mouseMoveEvent(self, e):
if self._mouseX == e.x() and self._mouseY == self.height() - e.y(): # Wenn nicht rotiert wurde wird es abgebrochen (Berechnung unnötig)
return
self._mouseX = e.x()
self._mouseY = self.height() - e.y() # Setzt Y 0-Wert auf unten links statt oben links
self.point_c_vektor[0] = 2 * self._mouseX / self.width() - 1 # Umrechnung der Werte von 0-Fenstergrösse zu 0-1
self.point_c_vektor[1] = 2 * self._mouseY / self.height() - 1
self.calculate_koordinates_from_mouseclick(self.point_c_vektor) # Berechnung des 3D Punktes der Kamerabewegung
self.change_camera(self.point_a_vektor, self.point_c_vektor)
self.point_a_vektor = list(self.point_c_vektor) # Update Vektor a für nächste Berechnung
self.update()
def calculate_koordinates_from_mouseclick(self, list):
if (math.sqrt(list[0] ** 2 + list[1] ** 2) <= self._radius / math.sqrt(
2)): # Berechnung des 3D-Punktes durch Arc-Ball Berechnung
list[2] = math.sqrt(self._radius ** 2 - (list[0] ** 2 + list[1] ** 2))
else:
list[2] = self._radius ** 2 / (2 * (list[0] ** 2 + list[1] ** 2))
def wheelEvent(self, event):
pixels = event.pixelDelta()
degrees = event.angleDelta() / 8
if not pixels.isNull():
zoom = pixels.y() / 10
else:
zoom = degrees.y() / 5
spinVis_camera.zoom(zoom, self.width(), self.height())
self.camera_vektor_length = np.linalg.norm(spinVis_coor.camera_koordinates)
# self.current_camera_vector = koor.camera_koordinates
self.update()
def mousePressEvent(self, e):
self._mouseX = e.x()
self._mouseY = self.height() - e.y()
self.point_a_vektor[0] = 2 * self._mouseX / self.width() - 1 # Umrechnung der Werte von 0-Fenstergrösse zu 0-1
self.point_a_vektor[1] = 2 * self._mouseY / self.height() - 1
self.calculate_koordinates_from_mouseclick(self.point_a_vektor)
pass
def recalculate_up_vector(self, forward_vector,
up_vector): # Berechnet den neuen up_vector aus dem neuen forward_vector
right_vector = np.cross(forward_vector, up_vector)
up_vector = np.cross(right_vector, forward_vector)
return up_vector / np.linalg.norm(up_vector)
def change_camera(self, start_point, end_point):
i = 0
self.new_up_v = self.recalculate_up_vector(spinVis_coor.camera_koordinates,
self.new_up_v) # Update des up Vektors
skalar = np.dot(start_point, end_point) # Skalarprodukt der Endpunkte der Kamerabewegung
if skalar: # Falls das Skalar 0 ist also die Ortsvektoren orthogonal sind dann wird nicht berechnet
u = np.cross(start_point, end_point)
up_vector = self.new_up_v # lokale Instanz des Up Vektors
forward_vector = spinVis_coor.camera_koordinates - spinVis_camera.focus_point
right_vector = np.cross(forward_vector, up_vector)
up_vector = np.cross(right_vector, forward_vector)
forward_vector /= np.linalg.norm(forward_vector)
right_vector /= np.linalg.norm(right_vector)
up_vector /= np.linalg.norm(up_vector)
u = u[0] * right_vector - u[1] * up_vector + u[2] * forward_vector
norm = np.linalg.norm(u)
theta = np.arctan(norm / skalar)
if norm:
self.new_up_v = np.dot(self.rotation_matrix(u, theta), self.new_up_v)
camera_vector = spinVis_coor.camera_koordinates - spinVis_camera.focus_point
spinVis_coor.camera_koordinates = np.dot(self.rotation_matrix(u, theta),
spinVis_coor.camera_koordinates)
spinVis_coor.camera_koordinates /= np.linalg.norm(spinVis_coor.camera_koordinates)
spinVis_coor.camera_koordinates *= self.camera_vektor_length
spinVis_coor.camera_koordinates = np.dot(self.rotation_matrix(u, theta),
spinVis_coor.camera_koordinates - spinVis_camera.focus_point) + spinVis_camera.focus_point
euler_norm = np.linalg.norm(spinVis_coor.camera_koordinates - spinVis_camera.focus_point)
euler_theta = math.acos((spinVis_coor.camera_koordinates[2] - spinVis_camera.focus_point[2]) / euler_norm)
euler_phi = np.arctan2(spinVis_coor.camera_koordinates[1] - spinVis_camera.focus_point[1], spinVis_coor.camera_koordinates[0] - spinVis_camera.focus_point[0])
for observer in self._angle_observers:
if (i == 0):
observer.setText(str(round(euler_theta, 5)))
i += 1
elif i == 1:
observer.setText(str(round(euler_phi, 5)))
i += 1
else:
r = np.array([-math.sin(euler_phi), math.cos(euler_phi), 0])
v = np.array([-math.cos(euler_phi)*math.cos(euler_theta), -math.sin(euler_phi)*math.cos(euler_theta), math.sin(euler_theta)])
alpha = math.atan2(-1 * np.dot(up_vector, r), np.dot(up_vector, v))
observer.setText(str(round(alpha, 5)))
spinVis_camera.setUpVektor(self.new_up_v)
spinVis_camera.grCameraArcBallChange(spinVis_coor.camera_koordinates)
def rotate_right(self):
center_point_vektor = np.array([0.0, 0.0, 0.0])
self.calculate_koordinates_from_mouseclick(center_point_vektor)
rotate_point_vektor = np.array([0.02, 0.0, 0.0])
self.calculate_koordinates_from_mouseclick(rotate_point_vektor)
self.change_camera(center_point_vektor, rotate_point_vektor)
def rotate_left(self):
center_point_vektor = np.array([0.0, 0.0, 0.0])
self.calculate_koordinates_from_mouseclick(center_point_vektor)
rotate_point_vektor = np.array([-0.02, 0.0, 0.0])
self.calculate_koordinates_from_mouseclick(rotate_point_vektor)
self.change_camera(center_point_vektor, rotate_point_vektor)
def rotate_up(self):
center_point_vektor = np.array([0.0, 0.0, 0.0])
self.calculate_koordinates_from_mouseclick(center_point_vektor)
rotate_point_vektor = np.array([0.0, 0.02, 0.0])
self.calculate_koordinates_from_mouseclick(rotate_point_vektor)
self.change_camera(center_point_vektor, rotate_point_vektor)
def rotate_down(self):
center_point_vektor =
|
np.array([0.0, 0.0, 0.0])
|
numpy.array
|
""" mateqn.py
Matrix equation solvers (Lyapunov, Riccati)
Implementation of the functions lyap, dlyap, care and dare
for solution of Lyapunov and Riccati equations. """
# Python 3 compatibility (needs to go here)
from __future__ import print_function
"""Copyright (c) 2011, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
Author: <NAME>
"""
from numpy import shape, size, array, asarray, copy, zeros, eye, dot
from scipy.linalg import eigvals, solve_discrete_are, solve
from .exception import ControlSlycot, ControlArgument
from .statesp import _ssmatrix
__all__ = ['lyap', 'dlyap', 'dare', 'care']
#### Lyapunov equation solvers lyap and dlyap
def lyap(A, Q, C=None, E=None):
"""X = lyap(A, Q) solves the continuous-time Lyapunov equation
:math:`A X + X A^T + Q = 0`
where A and Q are square matrices of the same dimension.
Further, Q must be symmetric.
X = lyap(A,Q,C) solves the Sylvester equation
:math:`A X + X Q + C = 0`
where A and Q are square matrices.
X = lyap(A,Q,None,E) solves the generalized continuous-time
Lyapunov equation
:math:`A X E^T + E X A^T + Q = 0`
where Q is a symmetric matrix and A, Q and E are square matrices
of the same dimension.
"""
# Make sure we have access to the right slycot routines
try:
from slycot import sb03md
except ImportError:
raise ControlSlycot("can't find slycot module 'sb03md'")
try:
from slycot import sb04md
except ImportError:
raise ControlSlycot("can't find slycot module 'sb04md'")
# Reshape 1-d arrays
if len(shape(A)) == 1:
A = A.reshape(1, A.size)
if len(shape(Q)) == 1:
Q = Q.reshape(1, Q.size)
if C is not None and len(shape(C)) == 1:
C = C.reshape(1, C.size)
if E is not None and len(shape(E)) == 1:
E = E.reshape(1, E.size)
# Determine main dimensions
if size(A) == 1:
n = 1
else:
n = size(A, 0)
if size(Q) == 1:
m = 1
else:
m = size(Q, 0)
# Solve standard Lyapunov equation
if C is None and E is None:
# Check input data for consistency
if shape(A) != shape(Q):
raise ControlArgument("A and Q must be matrices of identical \
sizes.")
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if size(Q) > 1 and shape(Q)[0] != shape(Q)[1]:
raise ControlArgument("Q must be a quadratic matrix.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
# Solve the Lyapunov equation by calling Slycot function sb03md
try:
X,scale,sep,ferr,w = sb03md(n,-Q,A,eye(n,n),'C',trana='T')
except ValueError as ve:
if ve.info < 0:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == n+1:
e = ValueError("The matrix A and -A have common or very \
close eigenvalues.")
e.info = ve.info
else:
e = ValueError("The QR algorithm failed to compute all \
the eigenvalues (see LAPACK Library routine DGEES).")
e.info = ve.info
raise e
# Solve the Sylvester equation
elif C is not None and E is None:
# Check input data for consistency
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if size(Q) > 1 and shape(Q)[0] != shape(Q)[1]:
raise ControlArgument("Q must be a quadratic matrix.")
if (size(C) > 1 and shape(C)[0] != n) or \
(size(C) > 1 and shape(C)[1] != m) or \
(size(C) == 1 and size(A) != 1) or (size(C) == 1 and size(Q) != 1):
raise ControlArgument("C matrix has incompatible dimensions.")
# Solve the Sylvester equation by calling the Slycot function sb04md
try:
X = sb04md(n,m,A,Q,-C)
except ValueError as ve:
if ve.info < 0:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info > m:
e = ValueError("A singular matrix was encountered whilst \
solving for the %i-th column of matrix X." % ve.info-m)
e.info = ve.info
else:
e = ValueError("The QR algorithm failed to compute all the \
eigenvalues (see LAPACK Library routine DGEES).")
e.info = ve.info
raise e
# Solve the generalized Lyapunov equation
elif C is None and E is not None:
# Check input data for consistency
if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
(size(Q) > 1 and shape(Q)[0] != n) or \
(size(Q) == 1 and n > 1):
raise ControlArgument("Q must be a square matrix with the same \
dimension as A.")
if (size(E) > 1 and shape(E)[0] != shape(E)[1]) or \
(size(E) > 1 and shape(E)[0] != n) or \
(size(E) == 1 and n > 1):
raise ControlArgument("E must be a square matrix with the same \
dimension as A.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
# Make sure we have access to the write slicot routine
try:
from slycot import sg03ad
except ImportError:
raise ControlSlycot("can't find slycot module 'sg03ad'")
# Solve the generalized Lyapunov equation by calling Slycot
# function sg03ad
try:
A,E,Q,Z,X,scale,sep,ferr,alphar,alphai,beta = \
sg03ad('C','B','N','T','L',n,A,E,eye(n,n),eye(n,n),-Q)
except ValueError as ve:
if ve.info < 0 or ve.info > 4:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == 1:
e = ValueError("The matrix contained in the upper \
Hessenberg part of the array A is not in \
upper quasitriangular form")
e.info = ve.info
elif ve.info == 2:
e = ValueError("The pencil A - lambda * E cannot be \
reduced to generalized Schur form: LAPACK \
routine DGEGS has failed to converge")
e.info = ve.info
elif ve.info == 4:
e = ValueError("The pencil A - lambda * E has a \
degenerate pair of eigenvalues. That is, \
lambda_i = lambda_j for some i and j, where \
lambda_i and lambda_j are eigenvalues of \
A - lambda * E. Hence, the equation is \
singular; perturbed values were \
used to solve the equation (but the matrices \
A and E are unchanged)")
e.info = ve.info
raise e
# Invalid set of input parameters
else:
raise ControlArgument("Invalid set of input parameters")
return _ssmatrix(X)
def dlyap(A,Q,C=None,E=None):
""" dlyap(A,Q) solves the discrete-time Lyapunov equation
:math:`A X A^T - X + Q = 0`
where A and Q are square matrices of the same dimension. Further
Q must be symmetric.
dlyap(A,Q,C) solves the Sylvester equation
:math:`A X Q^T - X + C = 0`
where A and Q are square matrices.
dlyap(A,Q,None,E) solves the generalized discrete-time Lyapunov
equation
:math:`A X A^T - E X E^T + Q = 0`
where Q is a symmetric matrix and A, Q and E are square matrices
of the same dimension. """
# Make sure we have access to the right slycot routines
try:
from slycot import sb03md
except ImportError:
raise ControlSlycot("can't find slycot module 'sb03md'")
try:
from slycot import sb04qd
except ImportError:
raise ControlSlycot("can't find slycot module 'sb04qd'")
try:
from slycot import sg03ad
except ImportError:
raise ControlSlycot("can't find slycot module 'sg03ad'")
# Reshape 1-d arrays
if len(shape(A)) == 1:
A = A.reshape(1,A.size)
if len(shape(Q)) == 1:
Q = Q.reshape(1,Q.size)
if C is not None and len(shape(C)) == 1:
C = C.reshape(1,C.size)
if E is not None and len(shape(E)) == 1:
E = E.reshape(1,E.size)
# Determine main dimensions
if size(A) == 1:
n = 1
else:
n = size(A,0)
if size(Q) == 1:
m = 1
else:
m = size(Q,0)
# Solve standard Lyapunov equation
if C is None and E is None:
# Check input data for consistency
if shape(A) != shape(Q):
raise ControlArgument("A and Q must be matrices of identical \
sizes.")
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if size(Q) > 1 and shape(Q)[0] != shape(Q)[1]:
raise ControlArgument("Q must be a quadratic matrix.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
# Solve the Lyapunov equation by calling the Slycot function sb03md
try:
X,scale,sep,ferr,w = sb03md(n,-Q,A,eye(n,n),'D',trana='T')
except ValueError as ve:
if ve.info < 0:
e = ValueError(ve.message)
e.info = ve.info
else:
e = ValueError("The QR algorithm failed to compute all the \
eigenvalues (see LAPACK Library routine DGEES).")
e.info = ve.info
raise e
# Solve the Sylvester equation
elif C is not None and E is None:
# Check input data for consistency
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix")
if size(Q) > 1 and shape(Q)[0] != shape(Q)[1]:
raise ControlArgument("Q must be a quadratic matrix")
if (size(C) > 1 and shape(C)[0] != n) or \
(size(C) > 1 and shape(C)[1] != m) or \
(size(C) == 1 and size(A) != 1) or (size(C) == 1 and size(Q) != 1):
raise ControlArgument("C matrix has incompatible dimensions")
# Solve the Sylvester equation by calling Slycot function sb04qd
try:
X = sb04qd(n,m,-A,asarray(Q).T,C)
except ValueError as ve:
if ve.info < 0:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info > m:
e = ValueError("A singular matrix was encountered whilst \
solving for the %i-th column of matrix X." % ve.info-m)
e.info = ve.info
else:
e = ValueError("The QR algorithm failed to compute all the \
eigenvalues (see LAPACK Library routine DGEES)")
e.info = ve.info
raise e
# Solve the generalized Lyapunov equation
elif C is None and E is not None:
# Check input data for consistency
if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
(size(Q) > 1 and shape(Q)[0] != n) or \
(size(Q) == 1 and n > 1):
raise ControlArgument("Q must be a square matrix with the same \
dimension as A.")
if (size(E) > 1 and shape(E)[0] != shape(E)[1]) or \
(size(E) > 1 and shape(E)[0] != n) or \
(size(E) == 1 and n > 1):
raise ControlArgument("E must be a square matrix with the same \
dimension as A.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
# Solve the generalized Lyapunov equation by calling Slycot
# function sg03ad
try:
A,E,Q,Z,X,scale,sep,ferr,alphar,alphai,beta = \
sg03ad('D','B','N','T','L',n,A,E,eye(n,n),eye(n,n),-Q)
except ValueError as ve:
if ve.info < 0 or ve.info > 4:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == 1:
e = ValueError("The matrix contained in the upper \
Hessenberg part of the array A is not in \
upper quasitriangular form")
e.info = ve.info
elif ve.info == 2:
e = ValueError("The pencil A - lambda * E cannot be \
reduced to generalized Schur form: LAPACK \
routine DGEGS has failed to converge")
e.info = ve.info
elif ve.info == 3:
e = ValueError("The pencil A - lambda * E has a \
pair of reciprocal eigenvalues. That is, \
lambda_i = 1/lambda_j for some i and j, \
where lambda_i and lambda_j are eigenvalues \
of A - lambda * E. Hence, the equation is \
singular; perturbed values were \
used to solve the equation (but the \
matrices A and E are unchanged)")
e.info = ve.info
raise e
# Invalid set of input parameters
else:
raise ControlArgument("Invalid set of input parameters")
return _ssmatrix(X)
#### Riccati equation solvers care and dare
def care(A, B, Q, R=None, S=None, E=None, stabilizing=True):
""" (X,L,G) = care(A,B,Q,R=None) solves the continuous-time algebraic Riccati
equation
:math:`A^T X + X A - X B R^{-1} B^T X + Q = 0`
where A and Q are square matrices of the same dimension. Further,
Q and R are a symmetric matrices. If R is None, it is set to the
identity matrix. The function returns the solution X, the gain
matrix G = B^T X and the closed loop eigenvalues L, i.e., the
eigenvalues of A - B G.
(X,L,G) = care(A,B,Q,R,S,E) solves the generalized continuous-time
algebraic Riccati equation
:math:`A^T X E + E^T X A - (E^T X B + S) R^{-1} (B^T X E + S^T) + Q = 0`
where A, Q and E are square matrices of the same
dimension. Further, Q and R are symmetric matrices. If R is None,
it is set to the identity matrix. The function returns the
solution X, the gain matrix G = R^-1 (B^T X E + S^T) and the
closed loop eigenvalues L, i.e., the eigenvalues of A - B G , E."""
# Make sure we can import required slycot routine
try:
from slycot import sb02md
except ImportError:
raise ControlSlycot("can't find slycot module 'sb02md'")
try:
from slycot import sb02mt
except ImportError:
raise ControlSlycot("can't find slycot module 'sb02mt'")
# Make sure we can find the required slycot routine
try:
from slycot import sg02ad
except ImportError:
raise ControlSlycot("can't find slycot module 'sg02ad'")
# Reshape 1-d arrays
if len(shape(A)) == 1:
A = A.reshape(1,A.size)
if len(shape(B)) == 1:
B = B.reshape(1,B.size)
if len(shape(Q)) == 1:
Q = Q.reshape(1,Q.size)
if R is not None and len(shape(R)) == 1:
R = R.reshape(1,R.size)
if S is not None and len(shape(S)) == 1:
S = S.reshape(1,S.size)
if E is not None and len(shape(E)) == 1:
E = E.reshape(1,E.size)
# Determine main dimensions
if size(A) == 1:
n = 1
else:
n = size(A,0)
if size(B) == 1:
m = 1
else:
m = size(B,1)
if R is None:
R = eye(m,m)
# Solve the standard algebraic Riccati equation
if S is None and E is None:
# Check input data for consistency
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
(size(Q) > 1 and shape(Q)[0] != n) or \
size(Q) == 1 and n > 1:
raise ControlArgument("Q must be a quadratic matrix of the same \
dimension as A.")
if (size(B) > 1 and shape(B)[0] != n) or \
size(B) == 1 and n > 1:
raise ControlArgument("Incompatible dimensions of B matrix.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
if not (asarray(R) == asarray(R).T).all():
raise ControlArgument("R must be a symmetric matrix.")
# Create back-up of arrays needed for later computations
R_ba = copy(R)
B_ba = copy(B)
# Solve the standard algebraic Riccati equation by calling Slycot
# functions sb02mt and sb02md
try:
A_b,B_b,Q_b,R_b,L_b,ipiv,oufact,G = sb02mt(n,m,B,R)
except ValueError as ve:
if ve.info < 0:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == m+1:
e = ValueError("The matrix R is numerically singular.")
e.info = ve.info
else:
e = ValueError("The %i-th element of d in the UdU (LdL) \
factorization is zero." % ve.info)
e.info = ve.info
raise e
try:
if stabilizing:
sort = 'S'
else:
sort = 'U'
X, rcond, w, S_o, U, A_inv = sb02md(n, A, G, Q, 'C', sort=sort)
except ValueError as ve:
if ve.info < 0 or ve.info > 5:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == 1:
e = ValueError("The matrix A is (numerically) singular in \
continuous-time case.")
e.info = ve.info
elif ve.info == 2:
e = ValueError("The Hamiltonian or symplectic matrix H cannot \
be reduced to real Schur form.")
e.info = ve.info
elif ve.info == 3:
e = ValueError("The real Schur form of the Hamiltonian or \
symplectic matrix H cannot be appropriately ordered.")
e.info = ve.info
elif ve.info == 4:
e = ValueError("The Hamiltonian or symplectic matrix H has \
less than n stable eigenvalues.")
e.info = ve.info
elif ve.info == 5:
e = ValueError("The N-th order system of linear algebraic \
equations is singular to working precision.")
e.info = ve.info
raise e
# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(dot(1/(R_ba), asarray(B_ba).T), X)
else:
G = dot(solve(R_ba, asarray(B_ba).T), X)
# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
return (_ssmatrix(X) , w[:n] , _ssmatrix(G))
# Solve the generalized algebraic Riccati equation
elif S is not None and E is not None:
# Check input data for consistency
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
(size(Q) > 1 and shape(Q)[0] != n) or \
size(Q) == 1 and n > 1:
raise ControlArgument("Q must be a quadratic matrix of the same \
dimension as A.")
if (size(B) > 1 and shape(B)[0] != n) or \
size(B) == 1 and n > 1:
raise ControlArgument("Incompatible dimensions of B matrix.")
if (size(E) > 1 and shape(E)[0] != shape(E)[1]) or \
(size(E) > 1 and shape(E)[0] != n) or \
size(E) == 1 and n > 1:
raise ControlArgument("E must be a quadratic matrix of the same \
dimension as A.")
if (size(R) > 1 and shape(R)[0] != shape(R)[1]) or \
(size(R) > 1 and shape(R)[0] != m) or \
size(R) == 1 and m > 1:
raise ControlArgument("R must be a quadratic matrix of the same \
dimension as the number of columns in the B matrix.")
if (size(S) > 1 and shape(S)[0] != n) or \
(size(S) > 1 and shape(S)[1] != m) or \
size(S) == 1 and n > 1 or \
size(S) == 1 and m > 1:
raise ControlArgument("Incompatible dimensions of S matrix.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
if not (asarray(R) == asarray(R).T).all():
raise ControlArgument("R must be a symmetric matrix.")
# Create back-up of arrays needed for later computations
R_b = copy(R)
B_b = copy(B)
E_b = copy(E)
S_b = copy(S)
# Solve the generalized algebraic Riccati equation by calling the
# Slycot function sg02ad
try:
if stabilizing:
sort = 'S'
else:
sort = 'U'
rcondu, X, alfar, alfai, beta, S_o, T, U, iwarn = \
sg02ad('C', 'B', 'N', 'U', 'N', 'N', sort, 'R', n, m, 0, A, E, B, Q, R, S)
except ValueError as ve:
if ve.info < 0 or ve.info > 7:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == 1:
e = ValueError("The computed extended matrix pencil is \
singular, possibly due to rounding errors.")
e.info = ve.info
elif ve.info == 2:
e = ValueError("The QZ algorithm failed.")
e.info = ve.info
elif ve.info == 3:
e = ValueError("Reordering of the generalized eigenvalues \
failed.")
e.info = ve.info
elif ve.info == 4:
e = ValueError("After reordering, roundoff changed values of \
some complex eigenvalues so that leading \
eigenvalues in the generalized Schur form no \
longer satisfy the stability condition; this \
could also be caused due to scaling.")
e.info = ve.info
elif ve.info == 5:
e = ValueError("The computed dimension of the solution does \
not equal N.")
e.info = ve.info
elif ve.info == 6:
e = ValueError("The spectrum is too close to the boundary of \
the stability domain.")
e.info = ve.info
elif ve.info == 7:
e = ValueError("A singular matrix was encountered during the \
computation of the solution matrix X.")
e.info = ve.info
raise e
# Calculate the closed-loop eigenvalues L
L = zeros((n,1))
L.dtype = 'complex64'
for i in range(n):
L[i] = (alfar[i] + alfai[i]*1j)/beta[i]
# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(1/(R_b), dot(asarray(B_b).T, dot(X,E_b)) + asarray(S_b).T)
else:
G = solve(R_b, dot(asarray(B_b).T, dot(X, E_b)) + asarray(S_b).T)
# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
return (_ssmatrix(X), L, _ssmatrix(G))
# Invalid set of input parameters
else:
raise ControlArgument("Invalid set of input parameters.")
def dare(A, B, Q, R, S=None, E=None, stabilizing=True):
""" (X,L,G) = dare(A,B,Q,R) solves the discrete-time algebraic Riccati
equation
:math:`A^T X A - X - A^T X B (B^T X B + R)^{-1} B^T X A + Q = 0`
where A and Q are square matrices of the same dimension. Further, Q
is a symmetric matrix. The function returns the solution X, the gain
matrix G = (B^T X B + R)^-1 B^T X A and the closed loop eigenvalues L,
i.e., the eigenvalues of A - B G.
(X,L,G) = dare(A,B,Q,R,S,E) solves the generalized discrete-time algebraic
Riccati equation
:math:`A^T X A - E^T X E - (A^T X B + S) (B^T X B + R)^{-1} (B^T X A + S^T) + Q = 0`
where A, Q and E are square matrices of the same dimension. Further, Q and
R are symmetric matrices. The function returns the solution X, the gain
matrix :math:`G = (B^T X B + R)^{-1} (B^T X A + S^T)` and the closed loop
eigenvalues L, i.e., the eigenvalues of A - B G , E.
"""
if S is not None or E is not None or not stabilizing:
return dare_old(A, B, Q, R, S, E, stabilizing)
else:
Rmat = _ssmatrix(R)
Qmat = _ssmatrix(Q)
X = solve_discrete_are(A, B, Qmat, Rmat)
G = solve(B.T.dot(X).dot(B) + Rmat, B.T.dot(X).dot(A))
L = eigvals(A - B.dot(G))
return _ssmatrix(X), L, _ssmatrix(G)
def dare_old(A, B, Q, R, S=None, E=None, stabilizing=True):
# Make sure we can import required slycot routine
try:
from slycot import sb02md
except ImportError:
raise ControlSlycot("can't find slycot module 'sb02md'")
try:
from slycot import sb02mt
except ImportError:
raise ControlSlycot("can't find slycot module 'sb02mt'")
# Make sure we can find the required slycot routine
try:
from slycot import sg02ad
except ImportError:
raise ControlSlycot("can't find slycot module 'sg02ad'")
# Reshape 1-d arrays
if len(shape(A)) == 1:
A = A.reshape(1,A.size)
if len(shape(B)) == 1:
B = B.reshape(1,B.size)
if len(shape(Q)) == 1:
Q = Q.reshape(1,Q.size)
if R is not None and len(shape(R)) == 1:
R = R.reshape(1,R.size)
if S is not None and len(shape(S)) == 1:
S = S.reshape(1,S.size)
if E is not None and len(shape(E)) == 1:
E = E.reshape(1,E.size)
# Determine main dimensions
if size(A) == 1:
n = 1
else:
n = size(A,0)
if size(B) == 1:
m = 1
else:
m = size(B,1)
# Solve the standard algebraic Riccati equation
if S is None and E is None:
# Check input data for consistency
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
(size(Q) > 1 and shape(Q)[0] != n) or \
size(Q) == 1 and n > 1:
raise ControlArgument("Q must be a quadratic matrix of the same \
dimension as A.")
if (size(B) > 1 and shape(B)[0] != n) or \
size(B) == 1 and n > 1:
raise ControlArgument("Incompatible dimensions of B matrix.")
if not (asarray(Q) == asarray(Q).T).all():
raise ControlArgument("Q must be a symmetric matrix.")
if not (asarray(R) == asarray(R).T).all():
raise ControlArgument("R must be a symmetric matrix.")
# Create back-up of arrays needed for later computations
A_ba = copy(A)
R_ba = copy(R)
B_ba = copy(B)
# Solve the standard algebraic Riccati equation by calling Slycot
# functions sb02mt and sb02md
try:
A_b,B_b,Q_b,R_b,L_b,ipiv,oufact,G = sb02mt(n,m,B,R)
except ValueError as ve:
if ve.info < 0:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == m+1:
e = ValueError("The matrix R is numerically singular.")
e.info = ve.info
else:
e = ValueError("The %i-th element of d in the UdU (LdL) \
factorization is zero." % ve.info)
e.info = ve.info
raise e
try:
if stabilizing:
sort = 'S'
else:
sort = 'U'
X, rcond, w, S, U, A_inv = sb02md(n, A, G, Q, 'D', sort=sort)
except ValueError as ve:
if ve.info < 0 or ve.info > 5:
e = ValueError(ve.message)
e.info = ve.info
elif ve.info == 1:
e = ValueError("The matrix A is (numerically) singular in \
discrete-time case.")
e.info = ve.info
elif ve.info == 2:
e = ValueError("The Hamiltonian or symplectic matrix H cannot \
be reduced to real Schur form.")
e.info = ve.info
elif ve.info == 3:
e = ValueError("The real Schur form of the Hamiltonian or \
symplectic matrix H cannot be appropriately ordered.")
e.info = ve.info
elif ve.info == 4:
e = ValueError("The Hamiltonian or symplectic matrix H has \
less than n stable eigenvalues.")
e.info = ve.info
elif ve.info == 5:
e = ValueError("The N-th order system of linear algebraic \
equations is singular to working precision.")
e.info = ve.info
raise e
# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(1/(dot(asarray(B_ba).T, dot(X, B_ba)) + R_ba), \
dot(asarray(B_ba).T, dot(X, A_ba)))
else:
G = solve(dot(asarray(B_ba).T, dot(X, B_ba)) + R_ba, \
dot(asarray(B_ba).T, dot(X, A_ba)))
# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
return (_ssmatrix(X) , w[:n], _ssmatrix(G))
# Solve the generalized algebraic Riccati equation
elif S is not None and E is not None:
# Check input data for consistency
if size(A) > 1 and shape(A)[0] != shape(A)[1]:
raise ControlArgument("A must be a quadratic matrix.")
if (size(Q) > 1 and shape(Q)[0] != shape(Q)[1]) or \
(size(Q) > 1 and shape(Q)[0] != n) or \
size(Q) == 1 and n > 1:
raise ControlArgument("Q must be a quadratic matrix of the same \
dimension as A.")
if (size(B) > 1 and shape(B)[0] != n) or \
size(B) == 1 and n > 1:
raise ControlArgument("Incompatible dimensions of B matrix.")
if (size(E) > 1 and shape(E)[0] != shape(E)[1]) or \
(size(E) > 1 and shape(E)[0] != n) or \
size(E) == 1 and n > 1:
raise ControlArgument("E must be a quadratic matrix of the same \
dimension as A.")
if (size(R) > 1 and shape(R)[0] != shape(R)[1]) or \
(size(R) > 1 and shape(R)[0] != m) or \
size(R) == 1 and m > 1:
raise ControlArgument("R must be a quadratic matrix of the same \
dimension as the number of columns in the B matrix.")
if (
|
size(S)
|
numpy.size
|
"""
This script takes a folder of GCaMP6f experiments and analyzes their data.
This script is extremely rigid, and requires the folder to be organized in
a very specific way. Image acquisition and foler arrangement is described here:
https://github.com/swharden/ROI-Analysis-Pipeline/tree/master/pyROI
"""
import os
import sys
import glob
import numpy as np
import matplotlib.pyplot as plt
def plotDataLines(times, data, AVG, ERR, b1, b2, title, showLines=False, showAvg=False, saveAs=False):
print(f"Creating new figure: {title}")
plt.figure(figsize=(8, 6))
plt.grid(alpha=.4, ls='--')
plt.axvspan(b1, b2, color='b', alpha=.1, label="baseline", lw=0)
#plt.axvspan(10, 13, color='r', alpha=.1, label="ANG-II", lw=0)
plt.axhline(0, color='k', ls='--')
if len(times)<len(data):
print(f"There's a time point mismatch!")
print(f"number of times seen: {len(times)}")
print(f"number of data points: {len(data)}")
averageTimeSpacing = times[-1]/len(times)
print(f"average space between times: {averageTimeSpacing}")
while len(times)<len(data):
additionalTimePoint = times[-1]+averageTimeSpacing
print(f"adding a time point to compensate: {additionalTimePoint}")
times = np.append(times, additionalTimePoint)
if showLines:
nLines = len(data[0])
print(f"Graphing {len(times)} time points as {nLines} individual lines")
for i in range(nLines):
thisData = data[:, i]
plt.plot(times, thisData, label="ROI-%02d" % i, alpha=.8)
if showAvg:
print(f"Graphing {len(times)} time points as individual lines")
plt.plot(times, AVG, color='k', lw=3, alpha=.7)
plt.fill_between(times, AVG+ERR, AVG-ERR, color='k', alpha=.2, lw=0)
plt.ylabel(r'$\Delta$ F/F', fontsize=16) # delta F(neuron)/F(background)
plt.xlabel("Experiment Duration (minutes)", fontsize=16)
plt.title(title, fontsize=24)
plt.legend(framealpha=1, shadow=True, fancybox=True, facecolor="#EEEEEE")
plt.tight_layout()
plt.margins(0, .1)
if saveAs:
print("saving",saveAs)
plt.savefig(saveAs)
plt.close()
def analyzeExperiment(path, recalculate=False, baselineMinutes=[7, 9]):
path = os.path.abspath(path)
title = os.path.basename(path)
print("Analyzing experiment:", path)
# create folder where output images will go
if not os.path.exists(path+"/swhlab/"):
print("making swhlab subfolder to hold output data")
os.mkdir(path+"/swhlab/")
# figure out the time axis
#times = np.arange(len(data))/framerate
times = createTimesCSVfile(path)
avgFrameRate = len(times)/(times[-1]-times[0])
# load the data
data = np.loadtxt(path+"/results.xls", skiprows=1, delimiter="\t")
data = data[:, 1:] # remove first column (just ascending numbers)
baselineROI = data[:, 0] # the first ROI is always a baseline
data = data[:, 1:] # remove the baseline column now
# do the analysis / ratios
for i in range(len(data[0])):
thisRow = data[:, i]
thisRow = thisRow/baselineROI # report intensity as a fraction relative to baseline
thisRow = thisRow*100 # convert it to percent
b1, b2 = baselineMinutes
baseline = np.average(thisRow[int(b1*avgFrameRate):int(b2*avgFrameRate)])
data[:, i] = thisRow - baseline
# calculate average and stderr
AVG = np.average(data, axis=1)
ERR = np.std(data, axis=1)/np.power(len(data),.5)
# save what we've calculated
|
np.savetxt(path+"/dataRaw.csv", data, delimiter=',', fmt="%f")
|
numpy.savetxt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 20:34:58 2019
@author: kenneth
"""
import numpy as np
class Perceptron(object):
def __init__(self, activation = False, norm = None, lamda = None):
self.activation = activation
self.norm = norm
self.lamda = lamda
if self.norm == 'l2':
if self.lamda is None:
self.lamda = 0.001
else:
self.lamda = lamda
elif self.norm == 'l1':
if self.lamda is None:
self.lamda = 0.001
else:
self.lamda = lamda
elif self.norm == 'ElasticNet':
if self.lamda is None:
self.lamda = 0.001
else:
self.lamda = lamda
return
@staticmethod
def sigmoid(X, beta):
'''
:params: X: traing data at ith iteration
:return: 0 or 1
'''
return 1/(1 + np.exp(-(np.dot(X, beta))))
@staticmethod
def relu(X, beta):
'''
:params: X: traing data at ith iteration
:return: 0 or max
'''
return np.maximum(np.dot(X, beta), 0)
@staticmethod
def tanh(X, beta):
'''
:params: X: traing data at ith iteration
:return: 0 or tanh(X, beta)
'''
return (np.exp(np.dot(X, beta)) - np.exp(-np.dot(X, beta)))/\
(np.exp(np.dot(X, beta)) + np.exp(-np.dot(X, beta)))
def cost(self, X, Y, beta):
'''
:params: X: traing data at ith iteration
:return: 0 or 1
'''
if not self.norm:
if not self.activation or self.activation == 'sigmoid':
return -(1/len(Y)) * np.sum((Y*np.log(Perceptron.sigmoid(X, beta))) + ((1 - Y)*np.log(1 - Perceptron.sigmoid(X, beta))))
elif self.activation == 'relu':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))))))
elif self.activation == 'tanh':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.tanh(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))))))
elif self.norm == 'l2':
if not self.activation or self.activation == 'sigmoid':
return -(1/len(Y)) * (np.sum((Y*np.log(Perceptron.sigmoid(X, beta))) + ((1 - Y)*np.log(1 - Perceptron.sigmoid(X, beta)))) + ((self.lamda/2)*np.sum(np.square(beta))))
elif self.activation == 'relu':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))))) + \
((self.lamda/2)*np.sum(np.square(beta))))
elif self.activation == 'tanh':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.tanh(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))))) + \
((self.lamda/2)*np.sum(np.square(beta))))
elif self.norm == 'l1':
if not self.activation or self.activation == 'sigmoid':
return -(1/len(Y)) * (np.sum((Y*np.log(Perceptron.sigmoid(X, beta))) + ((1 - Y)*np.log(1 - Perceptron.sigmoid(X, beta)))) + ((self.lamda)*np.sum(beta)))
elif self.activation == 'relu':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))))) + \
((self.lamda)*np.sum(beta)))
elif self.activation == 'tanh':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.tanh(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))))) + \
((self.lamda)*np.sum(beta)))
elif self.norm == 'ElasticNet':
if not self.activation or self.activation == 'sigmoid':
return -(1/len(Y)) * (np.sum((Y*np.log(Perceptron.sigmoid(X, beta))) + ((1 - Y)*np.log(1 - Perceptron.sigmoid(X, beta)))) + ((self.lamda/2)*np.sum(np.square(beta))) + ((self.lamda)*np.sum(beta)))
elif self.activation == 'relu':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.relu(X, beta)))))) + \
((self.lamda/2)*np.sum(np.square(beta))) + ((self.lamda)*np.sum(beta)))
elif self.activation == 'tanh':
return -(1/len(Y)) * (np.sum((Y*np.where(np.log(Perceptron.tanh(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))) + \
((1 - Y)*np.log(1 - np.where(np.log(Perceptron.relu(X, beta)) == 1, 0.99, np.log(Perceptron.tanh(X, beta)))))) + \
((self.lamda/2)*np.sum(np.square(beta))) + ((self.lamda)*np.sum(beta)))
def fit(self, X, Y, alpha, iterations):
self.alpha = alpha
self.iterations = iterations
self.beta = np.zeros(X.shape[1]).reshape(-1, 1)
self.cost_rec = np.zeros(self.iterations)
self.beta_rec = np.zeros((self.iterations, X.shape[1]))
#--Unregularized
if not self.norm:
if not self.activation or self.activation == 'sigmoid':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * X_samp.T.dot(Y_samp - Perceptron.sigmoid(X_samp, self.beta))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
elif self.activation == 'relu':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * X_samp.T.dot(Y_samp - Perceptron.relu(X_samp, self.beta))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
elif self.activation == 'tanh':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * X_samp.T.dot(Y_samp - Perceptron.tanh(X_samp, self.beta))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
#--l2
elif self.norm == 'l2':
if not self.activation or self.activation == 'sigmoid':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * (X_samp.T.dot(Y_samp - Perceptron.sigmoid(X_samp, self.beta)) +\
((self.lamda/len(Y))*self.beta))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
elif self.activation == 'relu':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * (X_samp.T.dot(Y_samp - Perceptron.relu(X_samp, self.beta)) +\
((self.lamda/len(Y))*self.beta))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
elif self.activation == 'tanh':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * (X_samp.T.dot(Y_samp - Perceptron.tanh(X_samp, self.beta)) +\
((self.lamda/len(Y))*self.beta))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
#--l1
elif self.norm == 'l1':
if not self.activation or self.activation == 'sigmoid':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * (X_samp.T.dot(Y_samp - Perceptron.sigmoid(X_samp, self.beta)) +\
(self.lamda*np.sign(self.beta)))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
elif self.activation == 'relu':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * (X_samp.T.dot(Y_samp - Perceptron.relu(X_samp, self.beta)) +\
(self.lamda*np.sign(self.beta)))
self.beta_rec[ii, :] = self.beta.T
sampledCost += self.cost(X_samp, Y_samp, self.beta)
self.cost_rec[ii] = sampledCost
print('*'*40)
print('%s iteratiion, cost = %s'%(ii, self.cost_rec[ii]))
return self
elif self.activation == 'tanh':
ylen = len(Y)
for ii in range(self.iterations):
#compute stochastic gradient
sampledCost = 0
for ij in range(ylen):
random_samples = np.random.randint(1, ylen)
X_samp = X[:random_samples]
Y_samp = Y[:random_samples]
self.beta = self.beta + (1/len(Y_samp)) *(self.alpha) * (X_samp.T.dot(Y_samp - Perceptron.tanh(X_samp, self.beta)) +\
(self.lamda*
|
np.sign(self.beta)
|
numpy.sign
|
import tensorflow as tf
import numpy as np
import time
from sklearn.base import BaseEstimator
from tensorflow import keras
# tf.enable_eager_execution()
#print(tf.executing_eagerly())
# tfe = tf.contrib.eager
class Model(object):
"""Fuzzy Cognitive Maps model
"""
def __init__(self, **kwargs):
# print(kwargs)
self.dim = kwargs.get('dim')
self.act = kwargs.get('act')
self.depth = kwargs.get('depth', 5)
self.init_method = kwargs.get('init_method', None)
random_state = kwargs.get('random_state')
if random_state is not None:
np.random.seed(random_state)
weights = kwargs.get('weights')
if weights is None:
w,b = self._generate_inital_weights()
else:
w=weights[0]
b=weights[1]
self.dim = w.shape[0]
if (w.shape[0] != w.shape[1]):
raise ValueError('Expecting square matrix W, while the shape is (%d,%d)' % w.shape)
if (w.shape[0] != b.shape[0]):
raise ValueError('W and b shapes should be equal')
self.W = tf.Variable(w)
self.b = tf.Variable(b.reshape(b.size, 1))
def _generate_inital_weights(self):
if self.init_method == 'random':
w = np.random.randn(self.dim, self.dim) * 0.01
b = np.random.randn(self.dim) * 0.01
if self.init_method == 'xavier':
w =
|
np.random.randn(self.dim, self.dim)
|
numpy.random.randn
|
r"""This module contains code to generate synthetic diffusion signal
attenuation following several state-of-the-art model, including diffusion
tensor [1]_, stick model [2]_, hindered diffusion in cylinder [3]_, and
mixture of the above. All the models described here have axial and antipodal
symmetry, and we define an abstract class to serve as in interface.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>. "MR diffusion
tensor spectroscopy and imaging." Biophysical journal 66, no. 1 (1994):
259-267
.. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"Characterization and propagation of uncertainty in diffusion-weighted
MR Imaging." Magnetic Resonance in Medicine 50, no. 5 (2003): 1077-1088.
.. [3] Soderman, Olle, and <NAME>. "Restricted diffusion in cylindrical
geometry." Journal of Magnetic Resonance, Series A 117, no. 1 (1995):
94-97.
"""
import numpy as np
from phantomas.utils import shm
from numpy.polynomial.legendre import Legendre
class AxiallySymmetricModel():
"""This class is an abstract class which defines the interface for all the
synthetic models in use in phantomas.
"""
def signal(self, qnorms, thetas, tau=1 / (4 * np.pi**2)):
r"""
Returns the simulated signal attenuation. The angles thetas correspond
to the angles between the sampling directions and the principal axis
of the diffusion model. Must be implemented in subclasses.
Parameters
----------
qnorms : array-like shape (K, )
The norm of q vectors in mm\ :superscript:`-1`.
thetas : array-like, shape (K, )
Angles between the sampling directions and the axis.
tau : double
Diffusion time in s.
"""
raise NotImplementedError("The method signal must be implemented in "
"subclasses.")
def odf(self, thetas, tau=1 / (4 * np.pi**2)):
"""Returns the ground truth ODF, when available.
Parameters
----------
thetas : array-like, shape (K, )
Angles between the sampling directions and the axis.
tau : double
The diffusion time in s.
"""
raise NotImplementedError("The method signal must be implemented in "
"subclasses")
def signal_convolution_sh(self, order, qnorm, tau=1 / (4 * np.pi**2),
nb_samples=100):
r"""
Returns the convolution operator in spherical harmonics basis, using
the Funk-Hecke theorem as described in [1]_.
Parameters
----------
order : int
The (even) spherical harmonics truncation order.
qnorm : double
The norm of q vector in mm\ :superscript:`-1`.
tau : double
The diffusion time in s.
nb_samples : int
The number of samples controling the accuracy of the numerical
integral.
Note
----
The function implemented here is the general, numerical implementation
of the Funk-Hecke theorem. It is eventually replaced by analytical
formula (when available) in subclasses.
References
----------
.. [1] Descoteaux, Maxime. "High angular resolution diffusion MRI: from
local estimation to segmentation and tractography." PhD diss.,
Universite de Nice Sophia-Antipolis, France, 2010.
"""
cos_thetas = np.linspace(0, 1, nb_samples)
thetas = np.arccos(cos_thetas)
qnorms = qnorm * np.ones(nb_samples)
fir = self.signal(qnorms, thetas, tau)
H = np.zeros((order + 1, nb_samples))
dim_sh = shm.dimension(order)
for l in range(0, order + 1, 2):
coeffs = np.zeros(l + 1)
coeffs[l] = 1.0
H[l, :] = Legendre(coeffs)(cos_thetas)
ls = map(shm.sh_degree, range(dim_sh))
rs = np.dot(H, fir) / nb_samples
return rs[ls]
class GaussianModel(AxiallySymmetricModel):
r"""
This class models a Gaussian diffusion tensor wot axial symmetry.
Typically, the eigenvalues of this tensors are
:math:`\lambda_1 \gg \lambda_2 = \lambda_3`.
Parameters
----------
lambda1 : double
The eigenvalue associated with the principal direction, in
mm\ :sup:`2`/s.
lambda2 : double
The eigenvalue associated with the two minor eigenvectors, in
mm\ :sup:`2`/s.
"""
def __init__(self, lambda1=1.7e-3, lambda2=0.2e-3):
self.lambda1 = lambda1
self.lambda2 = lambda2
def signal(self, qnorms, thetas, tau=1 / (4 * np.pi**2)):
r"""Returns the simulated signal attenuation, following the Stejskal
and Tanner [1]_ equation. The angles thetas correspond to the angles
between the sampling directions and the principal axis of the
diffusion tensor.
Parameters
----------
qnorms : array-like shape (K, )
The norm of q vectors in mm\ :superscript:`-1`.
thetas : array-like, shape (K, )
Angles between the sampling directions and the axis.
tau : double
Diffusion time in the Stejskal and Tanner sequence in s.
"""
bvals = 4 * np.pi**2 * qnorms**2 * tau
signal =
|
np.exp(-bvals * self.lambda2)
|
numpy.exp
|
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from functools import reduce
import numpy as np
def plot_word_clouds(classifiers, feature_names_list, top_features=20):
f, axs = plt.subplots(len(classifiers), 2, figsize=(50, 25))
for classifier, feature_names, ax in zip(classifiers, feature_names_list, axs):
coef = classifier.coef_.ravel()
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients = np.argsort(coef)[:top_features]
positives = [int(1000 * x) for x in coef[top_positive_coefficients]]
negatives = [abs(int(1000 * x)) for x in coef[top_negative_coefficients]]
positive_text = ' '.join(
reduce(
lambda x, y: x + y,
[[pos_word] * pos_coef for pos_word, pos_coef in zip(np.array(feature_names)[top_positive_coefficients], positives)]
)
)
negative_text = ' '.join(
reduce(
lambda x, y: x + y,
[[neg_word] * neg_coef for neg_word, neg_coef in zip(np.array(feature_names)[top_negative_coefficients], negatives)]
)
)
pos_wordcloud = WordCloud(collocations=False, max_words=top_features).generate(positive_text)
neg_wordcloud = WordCloud(collocations=False, max_words=top_features).generate(negative_text)
ax[0].imshow(pos_wordcloud, interpolation="bilinear")
ax[0].axis("off")
ax[0].set_title('Top-{} positive words'.format(top_features), fontsize=45)
ax[1].imshow(neg_wordcloud, interpolation="bilinear")
ax[1].axis("off")
ax[1].set_title('Top-{} negative words'.format(top_features), fontsize=45)
plt.tight_layout()
plt.show()
def plot_coefficients(classifiers, feature_names_list, top_features=20):
f, axs = plt.subplots(1, len(classifiers), sharey=False, sharex=False, figsize=(30, 50))
for classifier, feature_names, ax in zip(classifiers, feature_names_list, axs):
coef = classifier.coef_.ravel()
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients =
|
np.argsort(coef)
|
numpy.argsort
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import openpyxl
import copy
import gc as garc
import time
from fcgadgets.macgyver import utilities_general as gu
from fcgadgets.macgyver import utilities_inventory as invu
from fcgadgets.silviculture import economics as econo
from fcgadgets.taz import aspatial_stat_models as asm
#%% CONVERT LUT NUMBER TO STRING NAME
def lut_n2s(dc,numb):
if numb!=-999:
vals=np.fromiter(dc.values(),dtype=float)
keys=np.fromiter(dc.keys(),dtype='<U70')
ind=np.where(vals==numb)[0]
s=keys[ind]
else:
s=np.array(['Unidentified'],ndmin=1)
return s
#%% Index to batch
def IndexToBatch(meta,iBat):
iStart=meta['Project']['Batch Interval']*iBat
iStop=np.minimum(meta['Project']['N Stand'],iStart+meta['Project']['Batch Interval'])
indBat=np.arange(iStart,iStop,1)
return indBat
#%% QUERY RESULTS CODES FOR MANAGEMENT ACTIVITY TYPES
def QueryResultsActivity(d):
# Convert to arrays with at least 1d
for key in d:
d[key]=np.array(d[key],ndmin=1)
Name=[]
for i in range(d['SILV_BASE_CODE'].size):
if (d['SILV_BASE_CODE'][i]=='FE') & (d['SILV_TECHNIQUE_CODE'][i]=='CA'):
Name.append('Fertilization Aerial')
elif (d['SILV_BASE_CODE'][i]=='FE') & (d['SILV_TECHNIQUE_CODE'][i]=='CG') & (d['SILV_METHOD_CODE'][i]!='BAGS'):
Name.append('Fertilization Hand')
elif (d['SILV_BASE_CODE'][i]=='FE') & (d['SILV_TECHNIQUE_CODE'][i]=='CG') & (d['SILV_METHOD_CODE'][i]=='BAGS'):
Name.append('Fertilization Teabag')
elif (d['SILV_BASE_CODE'][i]=='FE') & (d['SILV_TECHNIQUE_CODE'][i]=='OG'):
Name.append('Fertilization Organic')
elif (d['SILV_BASE_CODE'][i]=='PL') & (d['SILV_METHOD_CODE'][i]!='LAYOT'):
# The planting in road rehab projects falls into this milestone type
Name.append('Planting')
elif (d['SILV_BASE_CODE'][i]=='DS') & (d['SILV_TECHNIQUE_CODE'][i]!='GS'):
# Everything except grass seeding
Name.append('Direct Seeding')
elif (d['SILV_BASE_CODE'][i]=='PC') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_OBJECTIVE_CODE_1'][i]=='DM'):
# This will exclude SILV_TECHNIQUE_CODE=BI. Virtually all of it is mechanical.
Name.append('Dwarf Mistletoe Control')
elif (d['SILV_BASE_CODE'][i]=='PC') & (d['SILV_TECHNIQUE_CODE'][i]=='CA') & (d['SILV_OBJECTIVE_CODE_1'][i]=='ID'):
Name.append('IDW Control')
elif (d['SILV_BASE_CODE'][i]=='RD') & (d['SILV_BASE_CODE'][i]=='UP'):
Name.append('Road Rehab')
elif (d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='BU') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='PBURN'):
Name.append('Slashpile Burn')
elif (d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='Unidentified') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='GUARD') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='HAND') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='KNOCK') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='POWER') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='MANCT') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='MDOWN') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='PILE') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='MA') & (d['SILV_METHOD_CODE'][i]=='SNAG') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='CABLE') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='GUARD') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='MDOWN') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='PILE') | \
(d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='PUSH'):
Name.append('Knockdown')
elif (d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='BRIP'):
Name.append('Ripping')
elif (d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='DISC'):
Name.append('Disc Trenching')
elif (d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='MULCH'):
Name.append('Mulching')
elif (d['SILV_BASE_CODE'][i]=='SP') & (d['SILV_TECHNIQUE_CODE'][i]=='ME') & (d['SILV_METHOD_CODE'][i]=='HARV'):
Name.append('Harvest Salvage')
elif (d['SILV_BASE_CODE'][i]=='LB') & (d['SILV_TECHNIQUE_CODE'][i]=='GR'):
Name.append('LB-GR')
elif (d['SILV_BASE_CODE'][i]=='SU'):
Name.append('Surveys')
else:
Name.append('Undefined')
return Name
#%% CONVERT DICTIONARY TO DATA STRUCTURE CLASS
class BunchDictionary(dict):
def __init__(self, *args, **kwds):
super(BunchDictionary, self).__init__(*args, **kwds)
self.__dict__ = self
#%% Build event chronology from spreadsheet
def BuildEventChronologyFromSpreadsheet(meta):
# Import inventory to get BGC zone
iScn=0
iBat=0
inv=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Inventory_Bat' + FixFileNum(iBat) + '.pkl')
# Simulate wildfires
if (meta['Scenario'][iScn]['Wildfire Status Pre-modern']=='On') | (meta['Scenario'][iScn]['Wildfire Status Modern']=='On') | (meta['Scenario'][iScn]['Wildfire Status Future']=='On'):
asm.SimulateWildfireFromAAO(meta,inv)
# Simulate MPB
if (meta['Scenario'][iScn]['MPB Status Pre-modern']=='On') | (meta['Scenario'][iScn]['MPB Status Modern']=='On') | (meta['Scenario'][iScn]['MPB Status Future']=='On'):
asm.SimulateIBMFromAAO(meta,inv)
for iScn in range(meta['Project']['N Scenario']):
for iEns in range(meta['Project']['N Ensemble']):
# Import wildfire simulations from Taz
if (meta['Scenario'][iScn]['Wildfire Status Pre-modern']=='On') | (meta['Scenario'][iScn]['Wildfire Status Modern']=='On') | (meta['Scenario'][iScn]['Wildfire Status Future']=='On'):
wf_sim=gu.ipickle(meta['Paths']['Project'] + '\\Inputs\\Ensembles\\wf_sim_Scn' + FixFileNum(iScn) + '_Ens' + FixFileNum(iEns) + '.pkl')
if 'idx' in wf_sim:
idx=wf_sim['idx']
tmp=wf_sim.copy()
for v in ['Occurrence','Mortality']:
wf_sim[v]=np.zeros((meta['Project']['N Time'],meta['Project']['N Stand']),dtype='int16')
wf_sim[v][idx[0],idx[1]]=tmp[v]
del tmp
# Import IBM
if (meta['Scenario'][iScn]['MPB Status Pre-modern']=='On') | (meta['Scenario'][iScn]['MPB Status Modern']=='On') | (meta['Scenario'][iScn]['MPB Status Future']=='On'):
ibm_sim=gu.ipickle(meta['Paths']['Project'] + '\\Inputs\\Ensembles\\ibm_sim_Scn' + FixFileNum(iScn) + '_Ens' + FixFileNum(iEns) + '.pkl')
if 'idx' in ibm_sim:
idx=ibm_sim['idx']
tmp=ibm_sim.copy()
for v in ['Occurrence','Mortality']:
ibm_sim[v]=np.zeros((meta['Project']['N Time'],meta['Project']['N Stand']),dtype='int16')
ibm_sim[v][idx[0],idx[1]]=tmp[v]
del tmp
for iBat in range(meta['Project']['N Batch']):
# Index to batch
indBat=IndexToBatch(meta,iBat)
# Always just one stand
iS=0
tv=np.arange(meta['Project']['Year Start'],meta['Project']['Year End']+1,1)
# Initialize dictionary
ec={}
ec['ID_Type']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
ec['MortalityFactor']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
ec['GrowthFactor']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
ec['ID_GrowthCurve']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
#----------------------------------------------------------
# Add spinup events
#----------------------------------------------------------
ivl_spin=meta['Project']['Spinup Disturbance Return Inverval']
YearRef=meta['Scenario'][iScn]['Year1_DisFromInv']
AgeRef=meta['Scenario'][iScn]['Age1_DisFromInv']
if AgeRef>=0:
Year=np.arange(YearRef-AgeRef-100*ivl_spin,YearRef-AgeRef+ivl_spin,ivl_spin)
else:
Year1=meta['Project']['Year Start']+ivl_spin
Year2=meta['Project']['Spinup Year End']
Year=np.arange(Year1,Year2+1,meta['Project']['Spinup Disturbance Return Inverval'])
for iYr in range(Year.size):
iT=np.where(tv==Year[iYr])[0]
ec['ID_Type'][iT,iS,0]=meta['LUT']['Dist'][meta['Project']['Spinup Disturbance Type']]
ec['MortalityFactor'][iT,iS,0]=100
ec['GrowthFactor'][iT,iS,0]=0
ec['ID_GrowthCurve'][iT,iS,0]=meta['Project']['Spinup Growth Curve ID']
#----------------------------------------------------------
# Add events from inventory
#----------------------------------------------------------
for iYr in range(1,8):
if np.isnan(meta['Scenario'][iScn]['Year' + str(iYr) + '_DisFromInv'])==True:
continue
# If IDW, convert IDW class to growth and mortality factor
sc=np.array(['IDW-T','IDW-L','IDW-M','IDW-S','IDW-V','IDW-MM','IDW-MS','IDW-MV','IDW-SS','IDW-SV','IDW-VV'])
flg_i=0
indSc=np.where(sc==meta['Scenario'][iScn]['Type' + str(iYr) + '_DisFromInv'])[0]
if indSc.size!=0:
if flg_i==0:
dfParDistBySC=pd.read_excel(meta['Paths']['Model Code'] + '\\Parameters\\Parameters_DisturbanceBySeverityClass.xlsx')
flg_i=1
indPar=np.where( (dfParDistBySC['Name']=='IDW') & (dfParDistBySC['SeverityCD']==sc[indSc[0]][4:]) )[0]
ID_TypeN=meta['LUT']['Dist']['IDW']
MF=dfParDistBySC.loc[indPar,'MortalityFactor']
GF=dfParDistBySC.loc[indPar,'GrowthFactor']
else:
ID_TypeS=meta['Scenario'][iScn]['Type' + str(iYr) + '_DisFromInv']
try:
ID_TypeN=meta['LUT']['Dist'][ID_TypeS]
except:
print(iScn)
print(iYr)
print(ID_TypeS)
MF=meta['Scenario'][iScn]['Severity' + str(iYr) + '_DisFromInv']
GF=0
Year=meta['Scenario'][iScn]['Year' + str(iYr) + '_DisFromInv']
iT=np.where(tv==Year)[0]
if iT.size==0:
print('Warning: An event was scheduled outside the timeframe of the simulation.')
iE=np.where(ec['ID_Type'][iT,iS,:]==0)[1]
ec['ID_Type'][iT,iS,iE[0]]=ID_TypeN
ec['MortalityFactor'][iT,iS,iE[0]]=MF
ec['GrowthFactor'][iT,iS,iE[0]]=GF
ec['ID_GrowthCurve'][iT,iS,iE[0]]=meta['Scenario'][iScn]['GrowthCurve' + str(iYr) + '_DisFromInv']
#----------------------------------------------------------
# Add simulated wildfire from Taz
#----------------------------------------------------------
ind=np.array([],dtype=int)
if meta['Scenario'][iScn]['Wildfire Status Pre-modern']=='On':
ind0=np.where( (wf_sim['Occurrence'][:,iS]==1) & (meta['Year']<1920) )[0]
ind=np.append(ind,ind0)
if meta['Scenario'][iScn]['Wildfire Status Modern']=='On':
ind0=np.where( (wf_sim['Occurrence'][:,iS]==1) & (meta['Year']>=1920) & (meta['Year']<meta['Project']['Year Project']) )[0]
ind=np.append(ind,ind0)
if meta['Scenario'][iScn]['Wildfire Status Future']=='On':
ind0=np.where( (wf_sim['Occurrence'][:,iS]==1) & (meta['Year']>=meta['Project']['Year Project']) )[0]
ind=np.append(ind,ind0)
if ind.size>0:
ID_Type=meta['LUT']['Dist']['Wildfire']*np.ones(ind.size)
Year=tv[ind]
MortF=wf_sim['Mortality'][ind,iS]
GrowthF=0*np.ones(ind.size)
ID_GrowthCurve=1*np.ones(ind.size)
for iYr in range(Year.size):
iT=np.where(tv==Year[iYr])[0]
ec['ID_Type'][iT,iS,0]=ID_Type[iYr]
ec['MortalityFactor'][iT,iS,0]=MortF[iYr]
ec['GrowthFactor'][iT,iS,0]=GrowthF[iYr]
ec['ID_GrowthCurve'][iT,iS,0]=ID_GrowthCurve[iYr]
#----------------------------------------------------------
# Add simulated MPB from Taz
#----------------------------------------------------------
ind=np.array([],dtype=int)
if meta['Scenario'][iScn]['MPB Status Pre-modern']=='On':
ind0=np.where( (ibm_sim['Occurrence'][:,iS]==1) & (meta['Year']<1920) )[0]
ind=np.append(ind,ind0)
if meta['Scenario'][iScn]['MPB Status Modern']=='On':
ind0=np.where( (ibm_sim['Occurrence'][:,iS]==1) & (meta['Year']>=1920) & (meta['Year']<meta['Project']['Year Project']) )[0]
ind=np.append(ind,ind0)
if meta['Scenario'][iScn]['MPB Status Future']=='On':
ind0=np.where( (ibm_sim['Occurrence'][:,iS]==1) & (meta['Year']>=meta['Project']['Year Project']) )[0]
ind=np.append(ind,ind0)
if ind.size>0:
ID_Type=meta['LUT']['Dist']['IBM']*np.ones(ind.size)
Year=tv[ind]
MortF=ibm_sim['Mortality'][ind,iS]
GrowthF=0*np.ones(ind.size)
ID_GrowthCurve=1*np.ones(ind.size)
for iYr in range(Year.size):
iT=np.where(tv==Year[iYr])[0]
ec['ID_Type'][iT,iS,0]=ID_Type[iYr]
ec['MortalityFactor'][iT,iS,0]=MortF[iYr]
ec['GrowthFactor'][iT,iS,0]=GrowthF[iYr]
ec['ID_GrowthCurve'][iT,iS,0]=ID_GrowthCurve[iYr]
#--------------------------------------------------------------
# Save to file
#--------------------------------------------------------------
gu.opickle(meta['Paths']['Input Scenario'][iScn] + '\\Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl',ec)
return
#def BuildEventChronologyFromSpreadsheet_old(meta):
#
# #--------------------------------------------------------------------------
# # Simulated wildfire (different among stands, the same among scenarios)
# #--------------------------------------------------------------------------
#
# iScn=0
# iBat=0
#
# # Import inventory to get BGC zone
# inv=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Inventory_Bat' + FixFileNum(iBat) + '.pkl')
#
# for iScn in range(meta['Project']['N Scenario']):
#
# wf_sim=asm.SimulateWildfireFromAAO_StandsActAsEnsembles(meta,inv,iScn)
#
# for iEns in range(meta['Project']['N Ensemble']):
# for iBat in range(meta['Project']['N Batch']):
#
# # Index to batch
# indBat=IndexToBatch(meta,iBat)
# N_StandsInBatch=len(indBat)
#
# tv=np.arange(meta['Project']['Year Start'],meta['Project']['Year End']+1,1)
#
# # Initialize dictionary
# ec={}
# ec['ID_Type']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
# ec['MortalityFactor']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
# ec['GrowthFactor']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
# ec['ID_GrowthCurve']=np.zeros((meta['Year'].size,indBat.size,meta['Core']['Max Events Per Year']),dtype='int16')
#
# for iS in range(N_StandsInBatch):
#
# #----------------------------------------------------------
# # Add spinup events
# #----------------------------------------------------------
#
# ivl_spin=meta['Project']['Spinup Disturbance Return Inverval']
# YearRef=meta['Scenario'][iScn]['Year1_DisFromInv']
# AgeRef=meta['Scenario'][iScn]['Age1_DisFromInv']
# if AgeRef>=0:
# Year=np.arange(YearRef-AgeRef-100*ivl_spin,YearRef-AgeRef+ivl_spin,ivl_spin)
# else:
# Year1=meta['Project']['Year Start']+ivl_spin
# Year2=meta['Project']['Spinup Year End']
# Year=np.arange(Year1,Year2+1,meta['Project']['Spinup Disturbance Return Inverval'])
#
# for iYr in range(Year.size):
# iT=np.where(tv==Year[iYr])[0]
# ec['ID_Type'][iT,iS,0]=meta['LUT']['Dist'][meta['Project']['Spinup Disturbance Type']]
# ec['MortalityFactor'][iT,iS,0]=100
# ec['GrowthFactor'][iT,iS,0]=0
# ec['ID_GrowthCurve'][iT,iS,0]=meta['Project']['Spinup Growth Curve ID']
#
# #----------------------------------------------------------
# # Add simulated constant disturbances
# #----------------------------------------------------------
#
## # Historical disturbance from simulation 1
## ri=meta['Scenario'][iScn]['ReturnInterval1_Hist_DisFromSim']
## if (ri!=0) & (np.isnan(ri)==False):
##
## p_Dist=1/ri
## p_Rand=np.random.uniform(0,1,size=(meta['Year'].size))
## it=np.where((p_Rand<p_Dist) & (meta['Year']>meta['Project']['Spinup Year End']) & (meta['Year']<meta['Project']['Year Project']))[0]
## Year=meta['Year'][it]
## for iYr in range(Year.size):
## iT=np.where(tv==Year[iYr])[0]
## ec['ID_Type'][iT,iS,0]=meta['LUT']['Dist'][meta['Scenario'][iScn]['Type1_Hist_DisFromSim']]
## ec['MortalityFactor'][iT,iS,0]=100
## ec['GrowthFactor'][iT,iS,0]=0
## ec['ID_GrowthCurve'][iT,iS,0]=meta['Project']['Spinup Growth Curve ID']
#
## # Historical disturbance from simulation 2
## ri=meta['Scenario'][iScn]['ReturnInterval2_Hist_DisFromSim']
## if (ri!=0) & (np.isnan(ri)==False):
##
## p_Dist=1/ri
## p_Rand=np.random.uniform(0,1,size=(meta['Year'].size))
## it=np.where((p_Rand<p_Dist) & (meta['Year']>meta['Project']['Spinup Year End']) & (meta['Year']<meta['Project']['Year Project']))[0]
## Year=meta['Year'][it]
## for iYr in range(Year.size):
## iT=np.where(tv==Year[iYr])[0]
## ec['ID_Type'][iT,iS,0]=meta['LUT']['Dist'][meta['Scenario'][iScn]['Type2_Hist_DisFromSim']]
## ec['MortalityFactor'][iT,iS,0]=100
## ec['GrowthFactor'][iT,iS,0]=0
## ec['ID_GrowthCurve'][iT,iS,0]=meta['Project']['Spinup Growth Curve ID']
#
# #----------------------------------------------------------
# # Add events from inventory
# #----------------------------------------------------------
#
# for iYr in range(1,8):
#
# if np.isnan(meta['Scenario'][iScn]['Year' + str(iYr) + '_DisFromInv'])==True:
# continue
#
# # If IDW, convert IDW class to growth and mortality factor
# sc=np.array(['IDW-T','IDW-L','IDW-M','IDW-S','IDW-V','IDW-MM','IDW-MS','IDW-MV','IDW-SS','IDW-SV','IDW-VV'])
# flg_i=0
# indSc=np.where(sc==meta['Scenario'][iScn]['Type' + str(iYr) + '_DisFromInv'])[0]
# if indSc.size!=0:
# if flg_i==0:
# dfParDistBySC=pd.read_excel(meta['Paths']['Model Code'] + '\\Parameters\\Parameters_DisturbanceBySeverityClass.xlsx')
# flg_i=1
# indPar=np.where( (dfParDistBySC['Name']=='IDW') & (dfParDistBySC['SeverityCD']==sc[indSc[0]][4:]) )[0]
# ID_TypeN=meta['LUT']['Dist']['IDW']
# MF=dfParDistBySC.loc[indPar,'MortalityFactor']
# GF=dfParDistBySC.loc[indPar,'GrowthFactor']
# else:
# ID_TypeS=meta['Scenario'][iScn]['Type' + str(iYr) + '_DisFromInv']
# ID_TypeN=meta['LUT']['Dist'][ID_TypeS]
# MF=meta['Scenario'][iScn]['Severity' + str(iYr) + '_DisFromInv']
# GF=0
#
# Year=meta['Scenario'][iScn]['Year' + str(iYr) + '_DisFromInv']
# iT=np.where(tv==Year)[0]
#
# iE=np.where(ec['ID_Type'][iT,iS,:]==0)[1]
#
# ec['ID_Type'][iT,iS,iE[0]]=ID_TypeN
# ec['MortalityFactor'][iT,iS,iE[0]]=MF
# ec['GrowthFactor'][iT,iS,iE[0]]=GF
# ec['ID_GrowthCurve'][iT,iS,iE[0]]=meta['Scenario'][iScn]['GrowthCurve' + str(iYr) + '_DisFromInv']
#
# #----------------------------------------------------------
# # Add simulated constant future disturbances
# #----------------------------------------------------------
#
## # Future disturbance from simulation 1
## ri=meta['Scenario'][iScn]['ReturnInterval1_Fut_DisFromSim']
## if (ri!=0) & (np.isnan(ri)==False):
##
## p_Dist=1/ri
## p_Rand=np.random.uniform(0,1,size=(meta['Year'].size))
## it=np.where((p_Rand<p_Dist) & (meta['Year']>meta['Project']['Year Project']))[0]
## Year=meta['Year'][it]
## for iYr in range(Year.size):
## iT=np.where(tv==Year[iYr])[0]
## ec['ID_Type'][iT,iS,0]=meta['LUT']['Dist'][meta['Scenario'][iScn]['Type1_Fut_DisFromSim']]
## ec['MortalityFactor'][iT,iS,0]=100
## ec['GrowthFactor'][iT,iS,0]=0
## ec['ID_GrowthCurve'][iT,iS,0]=meta['Spinup Growth Curve ID']
##
## # Future disturbance from simulation 2
## ri=meta['Scenario'][iScn]['ReturnInterval2_Fut_DisFromSim']
## if (ri!=0) & (np.isnan(ri)==False):
##
## p_Dist=1/ri
## p_Rand=np.random.uniform(0,1,size=(meta['Year'].size))
## it=np.where((p_Rand<p_Dist) & (meta['Year']>meta['Project']['Year Project']))[0]
## Year=meta['Year'][it]
## for iYr in range(Year.size):
## iT=np.where(tv==Year[iYr])[0]
## ec['ID_Type'][iT,iS,0]=meta['LUT']['Dist'][meta['Scenario'][iScn]['Type2_Fut_DisFromSim']]
## ec['MortalityFactor'][iT,iS,0]=100
## ec['GrowthFactor'][iT,iS,0]=0
## ec['ID_GrowthCurve'][iT,iS,0]=meta['Spinup Growth Curve ID']
#
# #----------------------------------------------------------
# # Add simulated wildfire from Taz
# #----------------------------------------------------------
#
# ind=np.array([])
# if meta['Scenario'][iScn]['Wildfire Status Pre-modern']=='On':
# ind0=np.where( (wf_sim['Occurrence'][:,iS]==1) & (meta['Year']<1920) )[0]
# ind=np.append(ind,ind0)
# if meta['Scenario'][iScn]['Wildfire Status Modern']=='On':
# ind0=np.where( (wf_sim['Occurrence'][:,iS]==1) & (meta['Year']>=1920) & (meta['Year']<meta['Project']['Year Project']) )[0]
# ind=np.append(ind,ind0)
# if meta['Scenario'][iScn]['Wildfire Status Future']=='On':
# ind0=np.where( (wf_sim['Occurrence'][:,iS]==1) & (meta['Year']>=meta['Project']['Year Project']) )[0]
# ind=np.append(ind,ind0)
#
# if ind.size>0:
#
# ID_Type=meta['LUT']['Dist']['Wildfire']*np.ones(ind.size)
# Year=tv[ind]
# MortF=wf_sim['Mortality'][ind,iS]
# GrowthF=0*np.ones(ind.size)
# ID_GrowthCurve=1*np.ones(ind.size)
#
# for iYr in range(Year.size):
# iT=np.where(tv==Year[iYr])[0]
# ec['ID_Type'][iT,iS,0]=ID_Type[iYr]
# ec['MortalityFactor'][iT,iS,0]=MortF[iYr]
# ec['GrowthFactor'][iT,iS,0]=GrowthF[iYr]
# ec['ID_GrowthCurve'][iT,iS,0]=ID_GrowthCurve[iYr]
#
# #----------------------------------------------------------
# # Add simulated MPB from Taz
# #----------------------------------------------------------
#
# # To do list...
#
# #--------------------------------------------------------------
# # Save to file
# #--------------------------------------------------------------
#
# gu.opickle(meta['Paths']['Input Scenario'][iScn] + '\\Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl',ec)
#%% Decompress event chronology
def EventChronologyDecompress(meta,ec,iScn,iEns,iBat):
# Uncompress event chronology if it has been compressed
if 'idx' in ec:
idx=ec['idx']
tmp=ec.copy()
for v in ['ID_Type','MortalityFactor','GrowthFactor','ID_GrowthCurve']:
ec[v]=np.zeros((meta['Project']['N Time'],meta['Project']['Batch Size'][iBat],meta['Core']['Max Events Per Year']),dtype='int16')
ec[v][idx[0],idx[1],idx[2]]=tmp[v]
del tmp
return ec
#%% Fix ensemble name and numbering
def FixFileNum(ind):
indStrFixed=str(ind+1)
if len(indStrFixed)==1:
indStrFixed='000' + indStrFixed
elif len(indStrFixed)==2:
indStrFixed='00' + indStrFixed
elif len(indStrFixed)==3:
indStrFixed='0' + indStrFixed
return indStrFixed
#%% Configure project
def ImportProjectConfig(meta):
#--------------------------------------------------------------------------
# Initialize nested dictionaries
#--------------------------------------------------------------------------
if 'Project' not in meta:
meta['Project']={}
if 'Core' not in meta:
meta['Core']={}
#--------------------------------------------------------------------------
# Import project parameters from spreadsheet
#--------------------------------------------------------------------------
df=pd.read_excel(meta['Paths']['Project'] + '\\Inputs\\ProjectConfig.xlsx',sheet_name='Project')
for i in range(len(df)):
Name=df['Name'].iloc[i]
Value=df['Value'].iloc[i]
if Name[-1]==':':
# Exclude headers
continue
meta['Project'][Name]=Value
#--------------------------------------------------------------------------
# Import look-up tables
#--------------------------------------------------------------------------
meta=invu.Load_LUTs(meta)
#--------------------------------------------------------------------------
# Define pool names
#--------------------------------------------------------------------------
# Pool names (ecosystem)
# *** If you change this, you need to change the same list in "Update Parameters" ***
meta['Core']['Name Pools Eco']=['StemMerch','StemNonMerch','Foliage','Branch','Bark','RootCoarse','RootFine', \
'PiledStemMerch','PiledStemNonMerch','PiledBranch','PiledBark','PiledSnagStem','PiledSnagBranch', \
'LitterVF','LitterF','LitterM','LitterS','SnagStem','SnagBranch','SoilVF','SoilF','SoilS']
# Used in fert work: ,'LitterDecomp'
# Number of ecosystem pools
meta['Core']['N Pools Eco']=len(meta['Core']['Name Pools Eco'])
# Pool names (products)
meta['Core']['Name Pools Pro']=['SFH','MFH','Comm','Furn','Ship','Repairs', \
'Other','Paper','EffluentPulp','PowerFacilityDom','PowerFacilityFor','PowerGrid', \
'Pellets','LogExport','FirewoodDom','FirewoodFor','DumpWood', \
'DumpPaper','LandfillWoodDegradable','LandfillWoodNonDegradable', \
'LandfillPaperDegradable','LandfillPaperNonDegradable','E_CO2','E_CH4']
# Number of product pools
meta['Core']['N Pools Pro']=len(meta['Core']['Name Pools Pro'])
#--------------------------------------------------------------------------
# Define indices to each pool
#--------------------------------------------------------------------------
# Indices to ecosystem pools pools
meta['Core']['iEP']={}; cnt=0
for nam in meta['Core']['Name Pools Eco']:
meta['Core']['iEP'][nam]=cnt
cnt=cnt+1
iEP=meta['Core']['iEP']
meta['Core']['iEP']['BiomassTotal']=np.array([iEP['StemMerch'],iEP['StemNonMerch'],iEP['Foliage'],iEP['Branch'],iEP['Bark'],iEP['RootCoarse'],iEP['RootFine']])
meta['Core']['iEP']['BiomassAboveground']=np.array([iEP['StemMerch'],iEP['StemNonMerch'],iEP['Foliage'],iEP['Branch'],iEP['Bark']])
meta['Core']['iEP']['BiomassBelowground']=np.array([iEP['RootCoarse'],iEP['RootFine']])
meta['Core']['iEP']['DeadWood']=np.array([iEP['PiledStemMerch'],iEP['PiledStemNonMerch'],iEP['PiledBranch'],iEP['PiledBark'],iEP['SnagStem'],iEP['SnagBranch']])
meta['Core']['iEP']['Litter']=np.array([iEP['LitterVF'],iEP['LitterF'],iEP['LitterM'],iEP['LitterS']])
meta['Core']['iEP']['Piled']=np.array([iEP['PiledStemMerch'],iEP['PiledStemNonMerch'],iEP['PiledBranch'],iEP['PiledBark'],iEP['PiledSnagStem'],iEP['PiledSnagBranch']])
meta['Core']['iEP']['Soil']=np.array([iEP['SoilVF'],iEP['SoilF'],iEP['SoilS']])
# Indices to produce pools pools
meta['Core']['iPP']={}; cnt=0
for nam in meta['Core']['Name Pools Pro']:
meta['Core']['iPP'][nam]=cnt
cnt=cnt+1
iPP=meta['Core']['iPP']
meta['Core']['iPP']['InUse']=np.array([ iPP['SFH'],iPP['MFH'],iPP['Comm'],iPP['Furn'],iPP['Ship'],iPP['Repairs'],iPP['Other'],iPP['Paper'] ])
meta['Core']['iPP']['DumpLandfill']=np.array([ iPP['DumpWood'],iPP['DumpPaper'],iPP['LandfillWoodDegradable'],iPP['LandfillWoodNonDegradable'],iPP['LandfillPaperDegradable'],iPP['LandfillPaperNonDegradable'] ])
#--------------------------------------------------------------------------
# Maximum number of events per year
# 8 appears to be sufficient but this may need to be changed for some
# special projects
#--------------------------------------------------------------------------
meta['Core']['Max Events Per Year']=8
#--------------------------------------------------------------------------
# Define time
#--------------------------------------------------------------------------
# Calendar year
meta['Year']=np.arange(meta['Project']['Year Start'],meta['Project']['Year End']+1,1)
meta['Project']['N Time']=meta['Year'].size
#--------------------------------------------------------------------------
# Dimensions of simulation
#--------------------------------------------------------------------------
# Number of stands
if meta['Project']['Scenario Source']=='Spreadsheet':
meta['Project']['N Stand']=1
elif meta['Project']['Scenario Source']=='Portfolio':
meta['Project']['N Stand']=meta['Project']['N Stand per Activity Type']*meta['Project']['AIL']['N AT']*meta['Project']['AIL']['N Years']
# Number of batches
meta['Project']['N Batch']=np.ceil(meta['Project']['N Stand']/meta['Project']['Batch Interval']).astype(int)
# Initialize list that can keep track of batch sizes
meta['Project']['Batch Size']=[None]*meta['Project']['N Batch']
for iBat in range(meta['Project']['N Batch']):
meta['Project']['Batch Size'][iBat]=IndexToBatch(meta,iBat).size
#--------------------------------------------------------------------------
# Import model parameters
#--------------------------------------------------------------------------
meta=ImportParameters(meta)
#--------------------------------------------------------------------------
# Define scenario parameters
#--------------------------------------------------------------------------
if meta['Project']['Scenario Source']!='Portfolio':
df=pd.read_excel(meta['Paths']['Project'] + '\\Inputs\\ProjectConfig.xlsx',sheet_name='Scenarios',usecols='A:OM')
df=df.iloc[:,df.iloc[0,:].isnull().values==False]
meta['Scenario']=list()
for i in range(1,df.shape[1]):
pScn0={}
for j in range(df.shape[0]):
if df.iloc[j,0][-1]==':':
# Exclude headers
continue
pScn0.update({df.iloc[j,0]:df.iat[j,i]})
meta['Scenario'].append(pScn0)
# Number of scenarios
meta['Project']['N Scenario']=np.sum([i['Scenario Status']=='On' for i in meta['Scenario']])
#--------------------------------------------------------------------------
# Initialize project folders if they do not exist
#--------------------------------------------------------------------------
meta['Paths']['Input Scenario']=[]
meta['Paths']['Output Scenario']=[]
for iScn in range(0,meta['Project']['N Scenario']):
meta['Paths']['Input Scenario'].append(meta['Paths']['Project'] + '\\Inputs\\Scenario' + FixFileNum(iScn))
if os.path.exists(meta['Paths']['Input Scenario'][iScn])==False:
os.mkdir(meta['Paths']['Input Scenario'][iScn])
meta['Paths']['Output Scenario'].append(meta['Paths']['Project'] + '\\Outputs\\Scenario' + FixFileNum(iScn))
if os.path.exists(meta['Paths']['Output Scenario'][iScn])==False:
os.mkdir(meta['Paths']['Output Scenario'][iScn])
#--------------------------------------------------------------------------
# Scale factors
#--------------------------------------------------------------------------
# *** Scale factor for saving results (this needs to be 100, 10 does not
# capture carbon fluxes and it will affect GHG benefit estimates) ***
# One variable ('CO2e_E_Products') requires the big one
meta['Core']['Scale Factor Export Small']=0.001
meta['Core']['Scale Factor Export Big']=0.001
#--------------------------------------------------------------------------
# Growth curve information
#--------------------------------------------------------------------------
meta['GC']={}
meta['GC']['N Growth Curves']=5
meta['GC']['ID GC Unique']=np.array([1,2,3,4,5])
meta['GC']['BatchTIPSY Maximum Age']=200
meta['GC']['BatchTIPSY Column Names']=['Age','VolTot0','VolMerch125',
'VolMerch175','ODT_Bark','ODT_Branch','ODT_Foliage','ODT_Roots',
'ODT_Stem','MortalityVolumeTotal']
# Scale factor for growth curves
# Note: Do not change this to 0.1 - aerial fertilization response will not work properly at 0.1
meta['GC']['Scale Factor']=0.001
#--------------------------------------------------------------------------
# Growth factor information
# *** Not currently used ***
#--------------------------------------------------------------------------
# # Default status of growth factors
# meta['Scenario Switch']['Net Growth Factor Status']=[None]*meta['Project']['N Scenario']
# meta['Scenario Switch']['Mortality Factor Status']=[None]*meta['Project']['N Scenario']
# for iScn in range(0,meta['Project']['N Scenario']):
# meta['Scenario Switch']['Net Growth Factor Status'][iScn]='Off'
# meta['Scenario Switch']['Mortality Factor Status'][iScn]='Off'
# #meta['Scenario Switch'][iScn]['Status Net Growth Factor']='Off'
# #meta['Scenario'][iScn]['Status Mortality Factor']='Off'
#--------------------------------------------------------------------------
# Harvested wood product information
# Year to start calling annual HWP methods - running it before 1800 is a
# waste of time.
#--------------------------------------------------------------------------
meta['Core']['HWP Year Start']=1800
#--------------------------------------------------------------------------
# Nutrient management information (for compatibility with "silviculture" module)
#--------------------------------------------------------------------------
# Initialize dictionary
meta['Nutrient Management']={}
# Initialize index to stands affected by nutrient application
meta['Nutrient Management']['iApplication']=[]
# BGC zone exclusions (for on-the-fly application scheduler)
meta['Nutrient Management']['BGC Zone Exclusion CD']=['PP','IDF','MH','BAFA','BG','CMA','IMA']
meta['Nutrient Management']['BGC Zone Exclusion ID']=np.zeros(len(meta['Nutrient Management']['BGC Zone Exclusion CD']))
for iZ in range(len(meta['Nutrient Management']['BGC Zone Exclusion CD'])):
meta['Nutrient Management']['BGC Zone Exclusion ID'][iZ]=meta['LUT']['VRI']['BEC_ZONE_CODE'][ meta['Nutrient Management']['BGC Zone Exclusion CD'][iZ] ]
#--------------------------------------------------------------------------
# Simulate random numbers that can be used for simulating harvest on the fly
# The annual numbers will be the same among scenarios, but vary by ensemble
#--------------------------------------------------------------------------
# *** If you assign completely random numbers, random variation will occur among
# scenarios, which can add considerable noise and demands many ensembles.
# Conversely if you assign these pre-set sequeences, the random component will
# vary among ensembles, but not among scenarios.
meta['Project']['On the Fly']={}
meta['Project']['On the Fly']['Random Numbers']={}
meta['Project']['On the Fly']['Random Numbers']['Scale Factor']=0.0001
# Only create these files if they will be used
# Not needed for portfolio projects
if meta['Project']['Scenario Source']!='Portfolio':
flg_h=0
for iScn in range(meta['Project']['N Scenario']):
if (meta['Scenario'][iScn]['Harvest Status Historical']=='On') | (meta['Scenario'][iScn]['Harvest Status Future']=='On'):
flg_h=1
break
flg_b=0
for iScn in range(meta['Project']['N Scenario']):
if (meta['Scenario'][iScn]['Breakup Status']=='On'):
flg_b=1
break
# Create random numbers and save them
for iEns in range(meta['Project']['N Ensemble']):
for iBat in range(meta['Project']['N Batch']):
if flg_h==1:
rn=np.random.random( (meta['Project']['N Time'],meta['Project']['Batch Size'][iBat]) )
rn=rn/meta['Project']['On the Fly']['Random Numbers']['Scale Factor']
rn=rn.astype('int16')
gu.opickle(meta['Paths']['Project'] + '\\Inputs\\Ensembles\\RandomNumbers_Harvest_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl',rn)
if flg_b==1:
rn=np.random.random( (meta['Project']['N Time'],meta['Project']['Batch Size'][iBat]) )
rn=rn/meta['Project']['On the Fly']['Random Numbers']['Scale Factor']
rn=rn.astype('int16')
gu.opickle(meta['Paths']['Project'] + '\\Inputs\\Ensembles\\RandomNumbers_Breakup_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl',rn)
#meta['Project']['On the Fly']['Random Numbers']['Harvest']=np.random.random((meta['Project']['N Time'],meta['Project']['N Ensemble']))
#meta['Project']['On the Fly']['Random Numbers']['Breakup']=np.random.random((meta['Project']['N Time'],meta['Project']['N Ensemble']))
#--------------------------------------------------------------------------
# Parameter uncertainty by ensemble
#--------------------------------------------------------------------------
# Initialize list
meta['Param']['By Ensemble']=[None]*meta['Project']['N Ensemble']
for iEns in range(meta['Project']['N Ensemble']):
# Initialize dictionary
meta['Param']['By Ensemble'][iEns]={}
#----------------------------------------------------------------------
# Biomass turnover
#----------------------------------------------------------------------
if meta['Project']['Uncertainty Status Biomass Turnover']=='On':
meta['Param']['By Ensemble'][iEns]['Biomass Turnover']={}
for k in meta['Param']['BE']['Biomass Turnover'].keys():
mu=meta['Param']['BE']['Biomass Turnover'][k]
sig=meta['Param']['Sigma']['Biomass Turnover'][k]
bl=meta['Param']['BL']['Biomass Turnover'][k]
bu=meta['Param']['BU']['Biomass Turnover'][k]
r=np.random.normal(loc=mu,scale=mu*sig)
if (bl!=-9999) & (bu!=-9999):
r=gu.Clamp(r,bl,bu)
elif (bl!=-9999) & (bu==-9999):
r=np.maximum(bl,r)
elif (bl==-9999) & (bu!=-9999):
r=np.minimum(bu,r)
else:
r=r
meta['Param']['By Ensemble'][iEns]['Biomass Turnover'][k]=r
#----------------------------------------------------------------------
# Decomposition
#----------------------------------------------------------------------
if meta['Project']['Uncertainty Status Decomposition']=='On':
meta['Param']['By Ensemble'][iEns]['Decomp']={}
for k in meta['Param']['BE']['Decomp'].keys():
mu=meta['Param']['BE']['Decomp'][k]
sig=meta['Param']['Sigma']['Decomp'][k]
bl=meta['Param']['BL']['Decomp'][k]
bu=meta['Param']['BU']['Decomp'][k]
r=np.random.normal(loc=mu,scale=mu*sig)
if (bl!=-9999) & (bu!=-9999):
r=gu.Clamp(r,bl,bu)
elif (bl!=-9999) & (bu==-9999):
r=np.maximum(bl,r)
elif (bl==-9999) & (bu!=-9999):
r=np.minimum(bu,r)
else:
r=r
meta['Param']['By Ensemble'][iEns]['Decomp'][k]=r
#----------------------------------------------------------------------
# Harvesting
#----------------------------------------------------------------------
if meta['Project']['Uncertainty Status Harvest Utilization']=='On':
meta['Param']['By Ensemble'][iEns]['Dist']={}
EventList=['Harvest','Harvest Salvage']
for Event in EventList:
ID_Type=meta['LUT']['Dist'][Event]
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]={}
#--------------------------------------------------------------
# Biomass merch
#--------------------------------------------------------------
# Removed fraction
mu=meta['Param']['BE']['Dist'][ID_Type]['BiomassMerch_Removed']
sig=np.array([0.1])
bl=np.array([0.0])
bu=np.array([1.0])
r_Removed=np.random.normal(loc=mu,scale=mu*sig)
r_Removed=gu.Clamp(r_Removed,bl,bu)
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassMerch_Removed']=r_Removed
# Total fraction that is piled and dispersed
r_PiledAndDispersed=1.0-r_Removed
# Specific piled fraction
mu=np.array([0.60]) # Specific fraction that is piled
sig=np.array([0.1])
bl=np.array([0.0])
bu=np.array([1.0])
rSpecific_Piled=np.random.normal(loc=mu,scale=mu*sig)
rSpecific_Piled=gu.Clamp(rSpecific_Piled,bl,bu)
# Piled fraction
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassMerch_Piled']=r_PiledAndDispersed*rSpecific_Piled
# Specific dispersed fraction
rSpecific_Dispersed=1.0-rSpecific_Piled
# Dispersed fraction
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassMerch_LeftOnSite']=r_PiledAndDispersed*rSpecific_Dispersed
#print(meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassMerch_Removed']+meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassMerch_Piled']+meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassMerch_LeftOnSite'])
#--------------------------------------------------------------
# Biomass non-merch
#--------------------------------------------------------------
# Removed fraction
mu=meta['Param']['BE']['Dist'][ID_Type]['BiomassNonMerch_Removed']
sig=np.array([0.1])
bl=np.array([0.0])
bu=np.array([1.0])
r_Removed=np.random.normal(loc=mu,scale=mu*sig)
r_Removed=gu.Clamp(r_Removed,bl,bu)
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassNonMerch_Removed']=r_Removed
# Total fraction that is piled and dispersed
r_PiledAndDispersed=1.0-r_Removed
# Specific piled fraction
mu=np.array([0.60]) # Specific fraction that is piled
sig=np.array([0.1])
bl=np.array([0.0])
bu=np.array([1.0])
rSpecific_Piled=np.random.normal(loc=mu,scale=mu*sig)
rSpecific_Piled=gu.Clamp(rSpecific_Piled,bl,bu)
# Piled fraction
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassNonMerch_Piled']=r_PiledAndDispersed*rSpecific_Piled
# Specific dispersed fraction
rSpecific_Dispersed=1.0-rSpecific_Piled
# Dispersed fraction
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassNonMerch_LeftOnSite']=r_PiledAndDispersed*rSpecific_Dispersed
#print(meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassNonMerch_Removed']+meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassNonMerch_Piled']+meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['BiomassNonMerch_LeftOnSite'])
#--------------------------------------------------------------
# Snags
#--------------------------------------------------------------
# Removed fraction
mu=meta['Param']['BE']['Dist'][ID_Type]['Snags_Removed']
sig=np.array([0.1])
bl=np.array([0.0])
bu=np.array([1.0])
r_Removed=np.random.normal(loc=mu,scale=mu*sig)
r_Removed=gu.Clamp(r_Removed,bl,bu)
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['Snags_Removed']=r_Removed
# Total fraction that is piled and dispersed
r_PiledAndDispersed=1.0-r_Removed
# Specific piled fraction
mu=np.array([0.60]) # Specific fraction that is piled
sig=np.array([0.1])
bl=np.array([0.0])
bu=np.array([1.0])
rSpecific_Piled=np.random.normal(loc=mu,scale=mu*sig)
rSpecific_Piled=gu.Clamp(rSpecific_Piled,bl,bu)
# Piled fraction
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['Snags_Piled']=r_PiledAndDispersed*rSpecific_Piled
# Specific dispersed fraction
rSpecific_Dispersed=1.0-rSpecific_Piled
# Dispersed fraction
meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['Snags_LeftOnSite']=r_PiledAndDispersed*rSpecific_Dispersed
#print(meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['Snags_Removed']+meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['Snags_Piled']+meta['Param']['By Ensemble'][iEns]['Dist'][ID_Type]['Snags_LeftOnSite'])
#----------------------------------------------------------------------
# Substitution effects
#----------------------------------------------------------------------
if meta['Project']['Uncertainty Status Substitution']=='On':
meta['Param']['By Ensemble'][iEns]['Substitution']={}
lv=['LumberDisplacementFactor','PanelDisplacementFactor']
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=np.maximum(0,r)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
lv0=np.array(['PowerFacilityDomFracDisplacingRenewables','PowerFacilityDomFracDisplacingCoal','PowerFacilityDomFracDisplacingDiesel','PowerFacilityDomFracDisplacingNaturalGas','PowerFacilityDomFracDisplacingOil'])
lv=lv0[np.argsort(np.random.random(len(lv0)))]
r_remain=1.0
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=gu.Clamp(r,0.0,r_remain)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
r_remain=r_remain-r
lv0=np.array(['PowerFacilityForFracDisplacingRenewables','PowerFacilityForFracDisplacingCoal','PowerFacilityForFracDisplacingDiesel','PowerFacilityForFracDisplacingNaturalGas','PowerFacilityForFracDisplacingOil'])
lv=lv0[np.argsort(np.random.random(len(lv0)))]
r_remain=1.0
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=gu.Clamp(r,0.0,r_remain)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
r_remain=r_remain-r
lv0=np.array(['PelletFracDisplacingRenewables','PelletFracDisplacingCoal','PelletFracDisplacingDiesel','PelletFracDisplacingNaturalGas','PelletFracDisplacingOil'])
lv=lv0[np.argsort(np.random.random(len(lv0)))]
r_remain=1.0
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=gu.Clamp(r,0.0,r_remain)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
r_remain=r_remain-r
lv0=np.array(['FirewoodDomFracDisplacingRenewables','FirewoodDomFracDisplacingCoal','FirewoodDomFracDisplacingDiesel','FirewoodDomFracDisplacingNaturalGas','FirewoodDomFracDisplacingOil'])
lv=lv0[np.argsort(np.random.random(len(lv0)))]
r_remain=1.0
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=gu.Clamp(r,0.0,r_remain)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
r_remain=r_remain-r
lv0=np.array(['FirewoodForFracDisplacingRenewables','FirewoodForFracDisplacingCoal','FirewoodForFracDisplacingDiesel','FirewoodForFracDisplacingNaturalGas','FirewoodForFracDisplacingOil'])
lv=lv0[np.argsort(np.random.random(len(lv0)))]
r_remain=1.0
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=gu.Clamp(r,0.0,r_remain)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
r_remain=r_remain-r
lv0=np.array(['PowerGridFracDisplacingRenewables','PowerGridFracDisplacingCoal','PowerGridFracDisplacingDiesel','PowerGridFracDisplacingNaturalGas','PowerGridFracDisplacingOil'])
lv=lv0[np.argsort(np.random.random(len(lv0)))]
r_remain=1.0
for k in lv:
mu=meta['Param']['BE']['Substitution'][k]
sig=meta['Param']['Sigma']['Substitution'][k]
r=np.array(np.random.normal(loc=mu,scale=mu*sig),dtype=float)
r=gu.Clamp(r,0.0,r_remain)
meta['Param']['By Ensemble'][iEns]['Substitution'][k]=r
r_remain=r_remain-r
#----------------------------------------------------------------------
# Nutrient mmanagement
#----------------------------------------------------------------------
if meta['Project']['Uncertainty Status Nutrient Application']=='On':
meta['Param']['By Ensemble'][iEns]['Nutrient Management']={}
for k in meta['Param']['BE']['Nutrient Management'].keys():
mu=meta['Param']['BE']['Nutrient Management'][k]
sig=meta['Param']['Sigma']['Nutrient Management'][k]
r=np.random.normal(loc=mu,scale=mu*sig)
r=np.maximum(0,r)
meta['Param']['By Ensemble'][iEns]['Nutrient Management'][k]=r
#--------------------------------------------------------------------------
# Parameter override options
#--------------------------------------------------------------------------
meta['Project']['Override Default Parameters']={}
#--------------------------------------------------------------------------
# Scenario info for portfolio projects
#--------------------------------------------------------------------------
if meta['Project']['Scenario Source']=='Portfolio':
# Index to rows with implementation
indAT=np.where(np.sum(meta['Project']['AIL']['Area'],axis=0)>0)[0]
meta['Project']['Portfolio']={}
meta['Project']['Portfolio']['ID Portfolio']=np.zeros(meta['Project']['N Stand'],dtype=int)
meta['Project']['Portfolio']['ID AT']=np.zeros(meta['Project']['N Stand'],dtype=int)
meta['Project']['Portfolio']['ID AT Unique']=np.zeros(meta['Project']['N Stand'],dtype=int)
meta['Project']['Portfolio']['Area']=np.zeros(meta['Project']['N Stand'])
meta['Project']['Portfolio']['Year']=np.zeros(meta['Project']['N Stand'])
cnt=0
for iA in range(meta['Project']['AIL']['N AT']):
for iY in range(meta['Project']['AIL']['N Years']):
for iS in range(meta['Project']['N Stand per Activity Type']):
meta['Project']['Portfolio']['ID Portfolio'][cnt]=meta['Project']['AIL']['ID Portfolio'][indAT[iA]]
meta['Project']['Portfolio']['ID AT'][cnt]=meta['Project']['AIL']['ID AT'][indAT[iA]]
meta['Project']['Portfolio']['ID AT Unique'][cnt]=meta['Project']['AIL']['ID AT Unique'][indAT[iA]]
meta['Project']['Portfolio']['Area'][cnt]=meta['Project']['AIL']['Area'][iY,indAT[iA]]
meta['Project']['Portfolio']['Year'][cnt]=meta['Project']['AIL']['Year'][iY]
cnt=cnt+1
# Scenario information
# Will this tool ever be used with on-the-fly disturbances? Current set
# to "off".
meta['Scenario']=[None]*meta['Project']['N Scenario']
for iScn in range(meta['Project']['N Scenario']):
meta['Scenario'][iScn]={}
# *** This is super awkward - simulations can't change between activities or scenarios!!!
# Do we need that type of functionality for the PT? ***
meta['Scenario'][iScn]['Wildfire Scenario ID']=meta['Project']['Activities']['Wildfire Scenario ID'][0]
meta['Scenario'][iScn]['Wildfire Status Pre-modern']=meta['Project']['Activities']['Wildfire Status Pre-modern'][0]
meta['Scenario'][iScn]['Wildfire Status Modern']=meta['Project']['Activities']['Wildfire Status Modern'][0]
meta['Scenario'][iScn]['Wildfire Status Future']=meta['Project']['Activities']['Wildfire Status Future'][0]
meta['Scenario'][iScn]['Harvest Status Historical']='Off'
meta['Scenario'][iScn]['Harvest Status Future']='Off'
meta['Scenario'][iScn]['Breakup Status']='Off'
meta['Scenario'][iScn]['Nutrient Application Status']='Off'
return meta
#%% Load scenario results
# Return a list of dictionaries for each scenario. If multiple ensemble were run,
# the function will retun the average.
def LoadSingleOutputFile(meta,iScn,iEns,iBat):
# Extract indices
iEP=meta['Core']['iEP']
# Extract biophysical parameters
bB=meta['Param']['BEV']['Biophysical']
# Open batch results
pth=meta['Paths']['Project'] + '\\Outputs\\Scenario' + FixFileNum(iScn) + '\\Data_Scn' + FixFileNum(iScn) + '_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl'
v0=gu.ipickle(pth)
# Convert to float and apply scale factor
for k in v0.keys():
# Skip mortality summary by agent
if (k=='C_M_ByAgent'):
continue
v0[k]=v0[k].astype(float)
if (k=='E_CO2e_LULUCF_HWP') | (k=='E_CO2e_ESC_Comb') | (k=='E_CO2e_ET_Comb') | (k=='E_CO2e_IPPU_Comb'):
v0[k]=v0[k]*meta['Core']['Scale Factor Export Big']
else:
v0[k]=v0[k]*meta['Core']['Scale Factor Export Small']
#--------------------------------------------------------------------------
# Add derived variables
#--------------------------------------------------------------------------
if meta['Project']['Save Biomass Pools']=='On':
# Aggregate variables not yet generated
# Aggregate pools
v0['C_Biomass_Tot']=np.sum(v0['C_Eco_Pools'][:,:,iEP['BiomassTotal']],axis=2)
v0['C_Piled_Tot']=np.sum(v0['C_Eco_Pools'][:,:,iEP['Piled']],axis=2)
v0['C_Litter_Tot']=np.sum(v0['C_Eco_Pools'][:,:,iEP['Litter']],axis=2)
v0['C_DeadWood_Tot']=np.sum(v0['C_Eco_Pools'][:,:,iEP['DeadWood']],axis=2)
v0['C_Soil_Tot']=np.sum(v0['C_Eco_Pools'][:,:,iEP['Soil']],axis=2)
v0['C_InUse_Tot']=np.sum(v0['C_Pro_Pools'][:,:,meta['Core']['iPP']['InUse']],axis=2)
v0['C_DumpLandfill_Tot']=np.sum(v0['C_Pro_Pools'][:,:,meta['Core']['iPP']['DumpLandfill']],axis=2)
# Aggregate fluxes
v0['C_G_Gross_Tot']=np.sum(v0['C_G_Gross'],axis=2)
v0['C_G_Net_Tot']=np.sum(v0['C_G_Net'],axis=2)
v0['C_M_Reg_Tot']=np.sum(v0['C_M_Reg'],axis=2)
v0['C_LF_Tot']=np.sum(v0['C_LF'],axis=2)
v0['C_RH_Tot']=np.sum(v0['C_RH'],axis=2)
# Net primary productivity (MgC/ha/yr)
v0['C_NPP_Tot']=v0['C_G_Net_Tot']+v0['C_M_Reg_Tot']+v0['C_LF_Tot']
# Harvest removals (MgC/ha/yr)
v0['C_ToMill']=v0['C_ToMillMerch']+v0['C_ToMillNonMerch']+v0['C_ToMillSnagStem']
# Net ecosystem production (tCO2e/ha/yr)
v0['E_CO2e_LULUCF_NEE']=-1*bB['Ratio_CO2_to_C']*(v0['C_NPP_Tot']-v0['C_RH_Tot'])
# Ecosystem fire total (tCO2e/ha/yr)
v0['E_CO2e_LULUCF_Fire']=v0['E_CO2e_LULUCF_Wildfire']+v0['E_CO2e_LULUCF_OpenBurning']
# For unknown reasons, trying to reverse sign when calculated messes up the array
# Do it here instead.
v0['E_CO2e_ESC_SubBM']=-1*v0['E_CO2e_ESC_SubBM']
v0['E_CO2e_ESC_SubE']=-1*v0['E_CO2e_ESC_SubE']
# Atmospheric GHG balance (tCO2e/ha/yr)
v0['E_CO2e_AGHGB_WSub']=v0['E_CO2e_LULUCF_NEE']+v0['E_CO2e_LULUCF_Wildfire']+v0['E_CO2e_LULUCF_OpenBurning']+ \
v0['E_CO2e_LULUCF_EcoOther']+v0['E_CO2e_LULUCF_HWP']+v0['E_CO2e_ESC_SubE']+v0['E_CO2e_ESC_SubBM']+v0['E_CO2e_ESC_Comb']+v0['E_CO2e_ET_Comb']+v0['E_CO2e_IPPU_Comb']
v0['E_CO2e_AGHGB_WOSub']=v0['E_CO2e_LULUCF_NEE']+v0['E_CO2e_LULUCF_Wildfire']+v0['E_CO2e_LULUCF_OpenBurning']+ \
v0['E_CO2e_LULUCF_EcoOther']+v0['E_CO2e_LULUCF_HWP']+v0['E_CO2e_ESC_Comb']+v0['E_CO2e_ET_Comb']+v0['E_CO2e_IPPU_Comb']
# Add cumulative
v0['E_CO2e_AGHGB_WSub_cumu']=np.cumsum(v0['E_CO2e_AGHGB_WSub'],axis=0)
v0['E_CO2e_AGHGB_WOSub_cumu']=np.cumsum(v0['E_CO2e_AGHGB_WOSub'],axis=0)
# Add year
it=np.where(meta['Year']>=meta['Project']['Year Start Saving'])[0]
v0['Year']=meta['Year'][it]
# Add cumulative (starting from a specified start year)
iT=np.where(v0['Year']>=meta['Project']['Year Start Cumulative'])[0]
v0['E_CO2e_AGHGB_WSub_cumu_from_tref']=np.zeros(v0['A'].shape)
v0['E_CO2e_AGHGB_WSub_cumu_from_tref'][iT,:]=np.cumsum(v0['E_CO2e_AGHGB_WSub'][iT,:],axis=0)
v0['E_CO2e_AGHGB_WOSub_cumu_from_tref']=np.zeros(v0['A'].shape)
v0['E_CO2e_AGHGB_WOSub_cumu_from_tref'][iT,:]=np.cumsum(v0['E_CO2e_AGHGB_WOSub'][iT,:],axis=0)
return v0
#%% LOAD SCENARIO RUSULTS
# Return a list of dictionaries for each scenario. If multiple ensemble were run,
# the function will retun the average.
def LoadScenarioResults(meta):
# Initialize list that will contain scenarios
v1=[]
for iScn in range(meta['Project']['N Scenario']):
for iEns in range(meta['Project']['N Ensemble']):
for iBat in range(meta['Project']['N Batch']):
#--------------------------------------------------------------
# Open batch results
#--------------------------------------------------------------
data_batch=LoadSingleOutputFile(meta,iScn,iEns,iBat)
# Import event chronology
if (meta['Scenario'][iScn]['Harvest Status Future']=='On') | (meta['Scenario'][iScn]['Breakup Status']=='On'):
ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Modified_Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
else:
ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
# Uncompress event chronology if it has been compressed
ec=EventChronologyDecompress(meta,ec,iScn,iEns,iBat)
# Inventory
inv=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Inventory_Bat' + FixFileNum(iBat) + '.pkl')
# Cashflow
econ=econo.CalculateNetRevenue(meta,iScn,iEns,iBat,inv,ec,data_batch)
data_batch.update(econ)
#--------------------------------------------------------------
# Accumulate data in each batch
#--------------------------------------------------------------
if iBat==0:
data_all=data_batch
else:
for key1 in data_batch.keys():
if key1=='Year':
# Only needed once
continue
elif (key1=='C_M_ByAgent'):
# Nested dictionary
for key2 in data_batch[key1].keys():
data_all[key1][key2]=np.append(data_all[key1][key2],data_batch[key1][key2],axis=1)
else:
# No nested dictionary
data_all[key1]=np.append(data_all[key1],data_batch[key1],axis=1)
#------------------------------------------------------------------
# Sum across ensembles
#------------------------------------------------------------------
if iEns==0:
data_sum2ave=data_all
else:
for key1 in data_batch.keys():
if (key1=='C_M_ByAgent'):
# Nested dictionary
for key2 in data_batch[key1].keys():
data_sum2ave[key1][key2]=data_sum2ave[key1][key2]+data_all[key1][key2]
else:
# No nested dictionary
data_sum2ave[key1]=data_sum2ave[key1]+data_all[key1]
#----------------------------------------------------------------------
# If the simulation includes ensembles, calculate average
#----------------------------------------------------------------------
for key1 in data_batch.keys():
# Skip mortality summary by agent
if (key1=='C_M_ByAgent'):
# Nested dictioanry
for key2 in data_batch[key1].keys():
data_sum2ave[key1][key2]=data_sum2ave[key1][key2]/meta['Project']['N Ensemble']
else:
# No nested dictionary
data_sum2ave[key1]=data_sum2ave[key1]/meta['Project']['N Ensemble']
#----------------------------------------------------------------------
# Add year
#----------------------------------------------------------------------
it=np.where(meta['Year']>=meta['Project']['Year Start Saving'])[0]
data_sum2ave['Year']=meta['Year'][it]
#----------------------------------------------------------------------
# Append to list
#----------------------------------------------------------------------
v1.append(data_sum2ave)
return v1
#%% Model Output Statistics
def ModelOutputStats(meta,**kwargs):
#--------------------------------------------------------------------------
# Kewword argumenst
#--------------------------------------------------------------------------
if 'KeepEachEnsemble' in kwargs.keys():
flag_KeepEnsembles=1
else:
# Default is to not save individual ensembles
flag_KeepEnsembles=0
if 'StandsToInclude' in kwargs.keys():
flag_stands_to_include=kwargs['StandsToInclude']
else:
flag_stands_to_include=[]
if 'IncludeArea' in kwargs.keys():
flag_Area=1
else:
# Default is to not inlcude area
flag_Area=0
if 'IncludeMortalityByAgents' in kwargs.keys():
flag_MortAg=1
else:
# Default is to not inlcude mortality by agents
flag_MortAg=0
flag_save=1
# Confidence intervals based on multiplier x S.E.
sigma_multiplier=2.0
#--------------------------------------------------------------------------
# Time
#--------------------------------------------------------------------------
tv=np.arange(meta['Project']['Year Start Saving'],meta['Project']['Year End']+1,1)
tv_full=np.arange(meta['Project']['Year Start'],meta['Project']['Year End']+1,1)
#--------------------------------------------------------------------------
# Initialize MOS list
#--------------------------------------------------------------------------
mos=[None]*meta['Project']['N Scenario']
#--------------------------------------------------------------------------
# Loop through scenarios
#--------------------------------------------------------------------------
for iScn in range(meta['Project']['N Scenario']):
#t0=time.time()
# Operations
oper=['Mean','Sum']
#----------------------------------------------------------------------
# Initialize structure
#----------------------------------------------------------------------
mosScn={}
# GHG balance
mosScn['v1']={}
mosScn['v1']['Mean']={}
mosScn['v1']['Sum']={}
d1=LoadSingleOutputFile(meta,0,0,0)
# Varialbe to excluse
v2exclude_v1=['Year','C_M_ByAgent']
for k in d1.keys():
if np.isin(k,v2exclude_v1)==True:
continue
for op in oper:
mosScn['v1'][op][k]={}
mosScn['v1'][op][k]['Ensembles']=np.zeros((tv.size,meta['Project']['N Ensemble']))
mosScn['v1'][op][k]['Ensemble Mean']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble SD']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble CIL']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble CIH']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble P1']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble P10']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble P90']=np.zeros(tv.size)
mosScn['v1'][op][k]['Ensemble P99']=np.zeros(tv.size)
# Cashflow
mosScn['Cashflow']={}
mosScn['Cashflow']['Mean']={}
mosScn['Cashflow']['Sum']={}
# Import event chronology
iEns=0
iBat=0
if (meta['Scenario'][iScn]['Harvest Status Future']=='On') | (meta['Scenario'][iScn]['Breakup Status']=='On'):
ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Modified_Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
else:
ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
# Uncompress event chronology if it has been compressed
ec=EventChronologyDecompress(meta,ec,iScn,iEns,iBat)
# Inventory
inv=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Inventory_Bat' + FixFileNum(iBat) + '.pkl')
# Cashflow
econ0=econo.CalculateNetRevenue(meta,iScn,iEns,iBat,inv,ec,d1)
del ec,inv
# Varialbe to excluse
v2exclude_cashflow=['Price Lumber','Price Plywood','Price OSB','Price MDF','Price Newsprint','Price PowerFacilityDom','Price PowerGrid','Price Pellets','Price LogExport','Price FirewoodDom','Exchange Rate US','Exchange Rate Euro']
for k in econ0.keys():
# Exclude
if np.isin(k,v2exclude_cashflow)==True:
continue
for op in oper:
mosScn['Cashflow'][op][k]={}
mosScn['Cashflow'][op][k]['Ensembles']=np.zeros((tv.size,meta['Project']['N Ensemble']))
mosScn['Cashflow'][op][k]['Ensemble Mean']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble SD']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble CIL']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble CIH']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble P1']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble P10']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble P90']=np.zeros(tv.size)
mosScn['Cashflow'][op][k]['Ensemble P99']=np.zeros(tv.size)
if flag_MortAg==1:
mosScn['C_M_ByAgent']={}
mosScn['C_M_ByAgent']['Mean']={}
mosScn['C_M_ByAgent']['Sum']={}
for k in d1['C_M_ByAgent'].keys():
for op in oper:
mosScn['C_M_ByAgent'][op][k]={}
mosScn['C_M_ByAgent'][op][k]['Ensembles']=np.zeros((tv.size,meta['Project']['N Ensemble']))
mosScn['C_M_ByAgent'][op][k]['Ensemble Mean']=np.zeros(tv.size)
mosScn['C_M_ByAgent'][op][k]['Ensemble SD']=np.zeros(tv.size)
if flag_Area==1:
mosScn['Area']={}
for k in meta['LUT']['Dist'].keys():
mosScn['Area'][k]={}
mosScn['Area'][k]['Ensembles']=np.zeros((tv.size,meta['Project']['N Ensemble']))
mosScn['Area'][k]['Ensemble Mean']=np.zeros(tv.size)
mosScn['Area'][k]['Ensemble SD']=np.zeros(tv.size)
#----------------------------------------------------------------------
# Loop through ensembles
#----------------------------------------------------------------------
for iEns in range(meta['Project']['N Ensemble']):
# Initialize dictionaries for each ensemble
v1={}
cashflow={}
C_M_ByAgent={}
N_Stands_Included=0
for iBat in range(meta['Project']['N Batch']):
# Include specific subset of stands
if len(flag_stands_to_include)==0:
ikp=np.arange(0,meta['Project']['Batch Size'][iBat],1,dtype=int)
else:
ikp=np.where(flag_stands_to_include[iScn][iEns][iBat]==1)[0]
N_Stands_Included=N_Stands_Included+ikp.size
# Load basic output
d1=LoadSingleOutputFile(meta,iScn,iEns,iBat)
# Some projects may elect to keep each biomass pool in output ("Save Biomass Pools"=On")
# This script doesn't handle it so they need to be summed first.
if meta['Project']['Save Biomass Pools']=='On':
List=['C_Eco_Pools','C_Pro_Pools','C_G_Gross','C_G_Net','C_M_Reg','C_LF','C_RH']
for iList in range(len(List)):
nam=List[iList]
d1[nam]=np.sum(d1[nam][:,ikp],axis=2)
for k in d1.keys():
if np.isin(k,v2exclude_v1)==True:
continue
if iBat==0:
v1[k]=np.sum(d1[k][:,ikp],axis=1)
else:
v1[k]=v1[k]+np.sum(d1[k][:,ikp],axis=1)
# Mortality summary by agent
if flag_MortAg==1:
for k in d1['C_M_ByAgent'].keys():
if iBat==0:
C_M_ByAgent[k]=d1['C_M_ByAgent'][k].flatten()
else:
C_M_ByAgent[k]=C_M_ByAgent[k]+d1['C_M_ByAgent'][k].flatten()
# Import event chronology
if (meta['Scenario'][iScn]['Harvest Status Future']=='On') | (meta['Scenario'][iScn]['Breakup Status']=='On'):
ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Modified_Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
else:
ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
# Uncompress event chronology if it has been compressed
ec=EventChronologyDecompress(meta,ec,iScn,iEns,iBat)
# Inventory
inv=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Inventory_Bat' + FixFileNum(iBat) + '.pkl')
# Cashflow
econ=econo.CalculateNetRevenue(meta,iScn,iEns,iBat,inv,ec,d1)
for k in econ0.keys():
if np.isin(k,v2exclude_cashflow)==True:
continue
if iBat==0:
cashflow[k]=np.sum(econ[k][:,ikp],axis=1)
else:
cashflow[k]=cashflow[k]+np.sum(econ[k][:,ikp],axis=1)
# Area
if flag_Area==1:
for iYr in range(tv_full.size):
it=np.where(tv==tv_full[iYr])[0]
if it.size==0:
continue
ID_Type0=ec['ID_Type'][iYr,ikp,:].flatten()
u=np.unique(ID_Type0)
for iU in range(u.size):
if u[iU]==0:
continue
id=lut_n2s(meta['LUT']['Dist'],u[iU])[0]
ind=np.where(ID_Type0==u[iU])[0]
mosScn['Area'][id]['Ensembles'][it,iEns]=mosScn['Area'][id]['Ensembles'][it,iEns]+ind.size
# Delete variables from workspace
del d1,ec,econ
garc.collect()
#------------------------------------------------------------------
# Populate MOS for each scenario
#------------------------------------------------------------------
for k in v1.keys():
if np.isin(k,v2exclude_v1)==True:
continue
mosScn['v1']['Sum'][k]['Ensembles'][:,iEns]=v1[k].copy()
mosScn['v1']['Mean'][k]['Ensembles'][:,iEns]=v1[k].copy()/N_Stands_Included #meta['Project']['N Stand']
for k in econ0.keys():
if np.isin(k,v2exclude_cashflow)==True:
continue
mosScn['Cashflow']['Sum'][k]['Ensembles'][:,iEns]=cashflow[k].copy()
mosScn['Cashflow']['Mean'][k]['Ensembles'][:,iEns]=cashflow[k].copy()/N_Stands_Included #meta['Project']['N Stand']
if flag_MortAg==1:
for k in C_M_ByAgent.keys():
mosScn['C_M_ByAgent']['Sum'][k]['Ensembles'][:,iEns]=C_M_ByAgent[k].copy()
mosScn['C_M_ByAgent']['Mean'][k]['Ensembles'][:,iEns]=C_M_ByAgent[k].copy()/N_Stands_Included #meta['Project']['N Stand']
#----------------------------------------------------------------------
# Calculate statistics
#----------------------------------------------------------------------
# Basic output
for k in v1.keys():
if np.isin(k,v2exclude_v1)==True:
continue
mosScn['v1']['Sum'][k]['Ensemble Mean']=np.mean(mosScn['v1']['Sum'][k]['Ensembles'],axis=1)
mosScn['v1']['Sum'][k]['Ensemble SD']=np.std(mosScn['v1']['Sum'][k]['Ensembles'],axis=1)
mosScn['v1']['Sum'][k]['Ensemble CIL']=np.mean(mosScn['v1']['Sum'][k]['Ensembles'],axis=1)-sigma_multiplier*np.std(mosScn['v1']['Sum'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['v1']['Sum'][k]['Ensemble CIH']=np.mean(mosScn['v1']['Sum'][k]['Ensembles'],axis=1)+sigma_multiplier*np.std(mosScn['v1']['Sum'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['v1']['Sum'][k]['Ensemble P1']=np.percentile(mosScn['v1']['Sum'][k]['Ensembles'],1,axis=1)
mosScn['v1']['Sum'][k]['Ensemble P10']=np.percentile(mosScn['v1']['Sum'][k]['Ensembles'],10,axis=1)
mosScn['v1']['Sum'][k]['Ensemble P90']=np.percentile(mosScn['v1']['Sum'][k]['Ensembles'],90,axis=1)
mosScn['v1']['Sum'][k]['Ensemble P99']=np.percentile(mosScn['v1']['Sum'][k]['Ensembles'],99,axis=1)
mosScn['v1']['Mean'][k]['Ensemble Mean']=np.mean(mosScn['v1']['Mean'][k]['Ensembles'],axis=1)
mosScn['v1']['Mean'][k]['Ensemble SD']=np.std(mosScn['v1']['Mean'][k]['Ensembles'],axis=1)
mosScn['v1']['Mean'][k]['Ensemble CIL']=np.mean(mosScn['v1']['Mean'][k]['Ensembles'],axis=1)-sigma_multiplier*np.std(mosScn['v1']['Mean'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['v1']['Mean'][k]['Ensemble CIH']=np.mean(mosScn['v1']['Mean'][k]['Ensembles'],axis=1)+sigma_multiplier*np.std(mosScn['v1']['Mean'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['v1']['Mean'][k]['Ensemble P1']=np.percentile(mosScn['v1']['Mean'][k]['Ensembles'],1,axis=1)
mosScn['v1']['Mean'][k]['Ensemble P10']=np.percentile(mosScn['v1']['Mean'][k]['Ensembles'],10,axis=1)
mosScn['v1']['Mean'][k]['Ensemble P90']=np.percentile(mosScn['v1']['Mean'][k]['Ensembles'],90,axis=1)
mosScn['v1']['Mean'][k]['Ensemble P99']=np.percentile(mosScn['v1']['Mean'][k]['Ensembles'],99,axis=1)
if flag_KeepEnsembles==0:
del mosScn['v1']['Sum'][k]['Ensembles']
del mosScn['v1']['Mean'][k]['Ensembles']
# By mortality agent
if flag_MortAg==1:
for k in C_M_ByAgent.keys():
mosScn['C_M_ByAgent']['Sum'][k]['Ensemble Mean']=np.mean(mosScn['C_M_ByAgent']['Sum'][k]['Ensembles'],axis=1)
mosScn['C_M_ByAgent']['Sum'][k]['Ensemble SD']=np.std(mosScn['C_M_ByAgent']['Sum'][k]['Ensembles'],axis=1)
mosScn['C_M_ByAgent']['Mean'][k]['Ensemble Mean']=np.mean(mosScn['C_M_ByAgent']['Mean'][k]['Ensembles'],axis=1)
mosScn['C_M_ByAgent']['Mean'][k]['Ensemble SD']=np.std(mosScn['C_M_ByAgent']['Mean'][k]['Ensembles'],axis=1)
if flag_KeepEnsembles==0:
del mosScn['C_M_ByAgent']['Sum'][k]['Ensembles']
del mosScn['C_M_ByAgent']['Mean'][k]['Ensembles']
# Area impacted
if flag_Area==1:
for k in mosScn['Area'].keys():
mosScn['Area'][k]['Ensemble Mean']=np.mean(mosScn['Area'][k]['Ensembles'],axis=1)
mosScn['Area'][k]['Ensemble SD']=np.std(mosScn['Area'][k]['Ensembles'],axis=1)
mosScn['Area'][k]['Ensemble Mean']=np.mean(mosScn['Area'][k]['Ensembles'],axis=1)
mosScn['Area'][k]['Ensemble SD']=np.std(mosScn['Area'][k]['Ensembles'],axis=1)
if flag_KeepEnsembles==0:
del mosScn['Area'][k]['Ensembles']
# Cashflow
for k in econ0.keys():
if np.isin(k,v2exclude_cashflow)==True:
continue
mosScn['Cashflow']['Sum'][k]['Ensemble Mean']=np.mean(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)
mosScn['Cashflow']['Sum'][k]['Ensemble SD']=np.std(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)
mosScn['Cashflow']['Sum'][k]['Ensemble CIL']=np.mean(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)-sigma_multiplier*np.std(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['Cashflow']['Sum'][k]['Ensemble CIH']=np.mean(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)+sigma_multiplier*np.std(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['Cashflow']['Sum'][k]['Ensemble P1']=np.percentile(mosScn['Cashflow']['Sum'][k]['Ensembles'],1,axis=1)
mosScn['Cashflow']['Sum'][k]['Ensemble P10']=np.percentile(mosScn['Cashflow']['Sum'][k]['Ensembles'],10,axis=1)
mosScn['Cashflow']['Sum'][k]['Ensemble P90']=np.percentile(mosScn['Cashflow']['Sum'][k]['Ensembles'],90,axis=1)
mosScn['Cashflow']['Sum'][k]['Ensemble P99']=np.percentile(mosScn['Cashflow']['Sum'][k]['Ensembles'],99,axis=1)
mosScn['Cashflow']['Mean'][k]['Ensemble Mean']=np.mean(mosScn['Cashflow']['Mean'][k]['Ensembles'],axis=1)
mosScn['Cashflow']['Mean'][k]['Ensemble SD']=np.std(mosScn['Cashflow']['Mean'][k]['Ensembles'],axis=1)
mosScn['Cashflow']['Mean'][k]['Ensemble CIL']=np.mean(mosScn['Cashflow']['Mean'][k]['Ensembles'],axis=1)-sigma_multiplier*np.std(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['Cashflow']['Mean'][k]['Ensemble CIH']=np.mean(mosScn['Cashflow']['Mean'][k]['Ensembles'],axis=1)+sigma_multiplier*np.std(mosScn['Cashflow']['Sum'][k]['Ensembles'],axis=1)/np.sqrt(meta['Project']['N Ensemble'])
mosScn['Cashflow']['Mean'][k]['Ensemble P1']=np.percentile(mosScn['Cashflow']['Mean'][k]['Ensembles'],1,axis=1)
mosScn['Cashflow']['Mean'][k]['Ensemble P10']=np.percentile(mosScn['Cashflow']['Mean'][k]['Ensembles'],10,axis=1)
mosScn['Cashflow']['Mean'][k]['Ensemble P90']=np.percentile(mosScn['Cashflow']['Mean'][k]['Ensembles'],90,axis=1)
mosScn['Cashflow']['Mean'][k]['Ensemble P99']=np.percentile(mosScn['Cashflow']['Mean'][k]['Ensembles'],99,axis=1)
if flag_KeepEnsembles==0:
del mosScn['Cashflow']['Sum'][k]['Ensembles']
del mosScn['Cashflow']['Mean'][k]['Ensembles']
#t1=time.time()
# Add scenario results to MOS dictionary
mos[iScn]=copy.deepcopy(mosScn)
#--------------------------------------------------------------------------
# Save
#--------------------------------------------------------------------------
if flag_save==1:
gu.opickle(meta['Paths']['Project'] + '\\Outputs\\MOS.pkl',mos)
return mos
#%% Save MOS GHG output variables by multipolygon subset
def MosByMPSubset_GHGB(meta,ListSubsetMP):
t0=time.time()
# Error multiplier
sigma_multiplier=1.0
# Import multipolygons
atu_multipolygons=gu.ipickle(meta['Paths']['Geospatial'] + '\\atu_multipolygons.pkl')
# Import sxy
sxy=gu.ipickle(meta['Paths']['Geospatial'] + '\\sxy.pkl')
# Create listed index (faster than indexing on the fly)
Crosswalk_sxy_to_mp=[None]*len(ListSubsetMP)
for iMP in range(len(ListSubsetMP)):
d={}
d['Index']=np.where(sxy['ID_atu_multipolygons']==ListSubsetMP[iMP])[0]
Crosswalk_sxy_to_mp[iMP]=d
# Area
Area=np.zeros( len(ListSubsetMP) )
for iMP in range(len(ListSubsetMP)):
Area0=atu_multipolygons[ListSubsetMP[iMP]]['ACTUAL_TREATMENT_AREA']
if Area0==None:
print('Encounterd no area, using zero')
Area0=0
Area[iMP]=Area0
# Time series of saved results
tv_saving=np.arange(meta['Project']['Year Start Saving'],meta['Project']['Year End']+1,1)
# Operations
oper=['Mean','Sum']
# GHG balance variables
d0=LoadSingleOutputFile(meta,0,0,0)
# All (excluding 'C_M_ByAgent' and 'Year')
v2include=['A', 'V_StemMerch', 'V_StemMerchToMill', 'LogSizeEnhancement', 'C_Biomass_Tot', 'C_Piled_Tot', 'C_Litter_Tot', 'C_DeadWood_Tot', 'C_Soil_Tot',
'C_InUse_Tot', 'C_DumpLandfill_Tot', 'C_M_Dist', 'C_G_Gross_Tot', 'C_G_Net_Tot', 'C_M_Reg_Tot', 'C_LF_Tot', 'C_RH_Tot', 'C_ToMillMerch',
'C_ToMillNonMerch', 'C_ToMillSnagStem', 'C_ToSlashpileBurn', 'C_ToLumber', 'C_ToPlywood', 'C_ToOSB', 'C_ToMDF', 'C_ToPaper', 'C_ToPowerFacilityDom',
'C_ToPowerFacilityFor', 'C_ToPowerGrid', 'C_ToPellets', 'C_ToFirewoodDom', 'C_ToFirewoodFor', 'C_ToLogExport', 'E_CO2e_LULUCF_NEE', 'E_CO2e_LULUCF_Wildfire',
'E_CO2e_LULUCF_OpenBurning', 'E_CO2e_LULUCF_EcoOther', 'E_CO2e_LULUCF_HWP', 'E_CO2e_ESC_Comb', 'E_CO2e_ESC_SubE', 'E_CO2e_ESC_SubBM', 'E_CO2e_ET_Comb',
'E_CO2e_IPPU_Comb', 'C_NPP_Tot', 'C_ToMill', 'E_CO2e_LULUCF_Fire', 'E_CO2e_AGHGB_WSub', 'E_CO2e_AGHGB_WOSub', 'E_CO2e_AGHGB_WSub_cumu', 'E_CO2e_AGHGB_WOSub_cumu',
'E_CO2e_AGHGB_WSub_cumu_from_tref', 'E_CO2e_AGHGB_WOSub_cumu_from_tref']
#--------------------------------------------------------------------------
# Initialize data by multipolygon structure
#--------------------------------------------------------------------------
mos=[None]*meta['Project']['N Scenario']
for iScn in range(meta['Project']['N Scenario']):
d={}
for op in oper:
d[op]={}
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
for op in oper:
d[op][k]={}
d[op][k]['Ensemble Mean']=np.zeros((tv_saving.size,len(ListSubsetMP)))
d[op][k]['Ensemble CIL']=np.zeros((tv_saving.size,len(ListSubsetMP)))
d[op][k]['Ensemble CIH']=np.zeros((tv_saving.size,len(ListSubsetMP)))
mos[iScn]=d
#--------------------------------------------------------------------------
# Loop through scenarios
#--------------------------------------------------------------------------
for iScn in range(meta['Project']['N Scenario']):
# Initialize data matrix
Data={}
for op in oper:
Data[op]=np.zeros( (tv_saving.size,meta['Project']['N Ensemble'],len(ListSubsetMP),len(v2include)) ,dtype=np.float)
#----------------------------------------------------------------------
# Loop through ensembles
#----------------------------------------------------------------------
for iEns in range(meta['Project']['N Ensemble']):
print(iEns)
#------------------------------------------------------------------
# Import batches
#------------------------------------------------------------------
# Initialize temporary data structure for full simulation
DataSXY=np.zeros( (tv_saving.size,meta['Project']['N Stand'],len(v2include)) ,dtype=np.float)
# Import batches
for iBat in range(meta['Project']['N Batch']):
indBat=IndexToBatch(meta,iBat)
d1=LoadSingleOutputFile(meta,iScn,iEns,iBat)
cnt_k=0
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
DataSXY[:,indBat,cnt_k]=d1[k].copy()
cnt_k=cnt_k+1
del d1
#garc.collect()
#------------------------------------------------------------------
# Calculate mean for multipolygon subset
#------------------------------------------------------------------
for iMP in range(len(ListSubsetMP)):
indMP=Crosswalk_sxy_to_mp[iMP]['Index']
mu=np.mean(DataSXY[:,indMP,:],axis=1)
for op in oper:
if op=='Mean':
Data[op][:,iEns,iMP,:]=mu
else:
Data[op][:,iEns,iMP,:]=np.sum(Area[iMP])*mu
#----------------------------------------------------------------------
# Add stats to MOS structure
#----------------------------------------------------------------------
cnt_k=0
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
for op in oper:
mu=np.mean(Data[op][:,:,:,cnt_k],axis=1)
sd=np.std(Data[op][:,:,:,cnt_k],axis=1)
cil=mu-sigma_multiplier*sd/np.sqrt(meta['Project']['N Ensemble'])
cih=mu+sigma_multiplier*sd/np.sqrt(meta['Project']['N Ensemble'])
mos[iScn][op][k]['Ensemble Mean']=mu
mos[iScn][op][k]['Ensemble CIL']=cil
mos[iScn][op][k]['Ensemble CIH']=cih
cnt_k=cnt_k+1
#--------------------------------------------------------------------------
# Save
#--------------------------------------------------------------------------
gu.opickle(meta['Paths']['Project'] + '\\Outputs\\MosByMPSubset_GHGB.pkl',mos)
t1=time.time()
print((t1-t0)/60)
return
#%% Save MOS GHG output variables by multipolygon
#def MosByMP_GHG(meta,switch_area):
#
# sigma_multiplier=1.0
#
# # Import multipolygons
# atu_multipolygons=gu.ipickle(meta['Paths']['Geospatial'] + '\\atu_multipolygons.pkl')
#
# # Import sxy
# sxy=gu.ipickle(meta['Paths']['Geospatial'] + '\\sxy.pkl')
#
# # Unique MPs
# uMP=np.unique(sxy['ID_atu_multipolygons'])
#
# # Create listed index (faster than indexing on the fly)
# Crosswalk_sxy_to_mp=[None]*uMP.size
# for iMP in range(uMP.size):
# d={}
# d['Index']=np.where(sxy['ID_atu_multipolygons']==uMP[iMP])[0]
# Crosswalk_sxy_to_mp[iMP]=d
#
# # Time series of saved results
# tv_full=np.arange(meta['Project']['Year Start'],meta['Project']['Year End']+1,1)
# tv_saving=np.arange(meta['Project']['Year Start Saving'],meta['Project']['Year End']+1,1)
# it=np.where( (tv_full>=tv_saving[0]) & (tv_full<=tv_saving[-1]) )[0]
#
# # Operations
# oper=['Mean','Sum']
#
# # GHG balance variables
# d0=LoadSingleOutputFile(meta,0,0,0)
#
# v2exclude_v1=['Year','C_M_ByAgent','LogSizeEnhancement','C_Piled_Tot','C_M_Dist','C_G_Gross_Tot','C_G_Net_Tot','C_M_Reg_Tot','C_LF_Tot','C_RH_Tot', \
# 'C_ToMillNonMerch','C_ToMillSnagStem','C_ToSlashpileBurn','C_ToLumber','C_ToPlywood','C_ToOSB','C_ToMDF','C_ToPaper','C_ToPowerFacilityDom','C_ToPowerFacilityFor', \
# 'C_ToPowerGrid','C_ToPellets','C_ToFirewoodDom','C_ToFirewoodFor','C_ToLogExport','E_CO2e_LULUCF_OpenBurning','E_CO2e_LULUCF_EcoOther','E_CO2e_LULUCF_HWP', \
# 'E_CO2e_ESC_Comb','E_CO2e_ESC_SubE','E_CO2e_ESC_SubBM','E_CO2e_ET_Comb','E_CO2e_IPPU_Comb','C_NPP_Tot','C_ToMill','E_CO2e_LULUCF_Fire']
#
# # Scale factor used to temporarily store data
# ScaleFactor=0.001
#
# #--------------------------------------------------------------------------
# # Initialize data by multipolygon structure
# #--------------------------------------------------------------------------
#
# MosByMP=[None]*meta['Project']['N Scenario']
#
# for iScn in range(meta['Project']['N Scenario']):
#
# d={}
# d['v1']={}
# d['v1']['Mean']={}
# d['v1']['Sum']={}
# for k in d0.keys():
# if np.isin(k,v2exclude_v1)==True:
# continue
# for op in oper:
# d['v1'][op][k]={}
# d['v1'][op][k]['Ensemble Mean']=np.zeros((tv_saving.size,uMP.size))
# d['v1'][op][k]['Ensemble CIL']=np.zeros((tv_saving.size,uMP.size))
# d['v1'][op][k]['Ensemble CIH']=np.zeros((tv_saving.size,uMP.size))
#
# if switch_area=='On':
# d['Area']={}
# for k in meta['LUT']['Dist'].keys():
# d['Area'][k]={}
# d['Area'][k]['Ensemble Mean']=np.zeros((tv_saving.size,uMP.size))
#
# MosByMP[iScn]=d
#
# #--------------------------------------------------------------------------
# # Loop through scenarios
# #--------------------------------------------------------------------------
#
# for iScn in range(meta['Project']['N Scenario']):
#
# for iEns in range(meta['Project']['N Ensemble']):
#
# #------------------------------------------------------------------
# # Initialize temporary data structure for full simulation
# #------------------------------------------------------------------
#
# Data={}
#
# Data['v1']={}
# for k in d0.keys():
# if np.isin(k,v2exclude_v1)==True:
# continue
# Data['v1'][k]=np.zeros((tv_saving.size,meta['Project']['N Stand']),dtype=int)
#
# if switch_area=='On':
# Data['Area']={}
# for k in MosByMP[iScn]['Area']:
# Data['Area'][k]=np.zeros((tv_saving.size,meta['Project']['N Stand']),dtype=int)
#
# #------------------------------------------------------------------
# # Populate full simulation results
# #------------------------------------------------------------------
#
# for iBat in range(meta['Project']['N Batch']):
#
# indBat=IndexToBatch(meta,iBat)
#
# d1=LoadSingleOutputFile(meta,iScn,iEns,iBat)
#
# for k in d0.keys():
# if np.isin(k,v2exclude_v1)==True:
# continue
# tmp=d1[k]/ScaleFactor
# Data['v1'][k][:,indBat]=tmp.copy().astype(int)
#
# if (switch_area=='On' ):
#
# # Import event chronology
# if (meta['Scenario'][iScn]['Harvest Status Future']=='On') | (meta['Scenario'][iScn]['Breakup Status']=='On'):
# ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Modified_Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
# else:
# ec=gu.ipickle(meta['Paths']['Input Scenario'][iScn] + '\\Events_Ens' + FixFileNum(iEns) + '_Bat' + FixFileNum(iBat) + '.pkl')
#
# # Uncompress event chronology if it has been compressed
# ec=EventChronologyDecompress(meta,ec,iScn,iEns,iBat)
#
# if switch_area=='On':
# for k in Data['Area'].keys():
# Data0=np.zeros((tv_saving.size,indBat.size))
# for iEY in range(meta['Core']['Max Events Per Year']):
# ind=np.where(ec['ID_Type'][it,:,iEY]==meta['LUT']['Dist'][k])[0]
# Data0[ind]=Data0[ind]+1
# Data['Area'][k][:,indBat]=Data0
#
# del d1,ec
# garc.collect()
#
# if switch_area=='On':
# # Populating the final structure with area data is slow - get a flag
# # indicator of whether each event ID can be skipped because it has no
# # info
# flag_Area={}
# for k in Data['Area'].keys():
# if np.sum(Data['Area'][k])>0:
# flag_Area[k]=1
# else:
# flag_Area[k]=0
#
# #------------------------------------------------------------------
# # Calculate stats and populate results for each treatment area
# #------------------------------------------------------------------
#
# for iMP in range(uMP.size):
#
# ATA=atu_multipolygons[uMP[iMP]]['ACTUAL_TREATMENT_AREA']
# if ATA==None:
# print('Encounterd no area, using zero')
# ATA=0
#
# ind=Crosswalk_sxy_to_mp[iMP]['Index']
#
# for k in d0.keys():
#
# if np.isin(k,v2exclude_v1)==True:
# continue
#
# tmp=ScaleFactor*Data['v1'][k][:,ind].astype(float)
#
# mu=np.mean(tmp,axis=1)
# sd=np.std(tmp,axis=1)
# cil=mu-sigma_multiplier*sd/np.sqrt(ind.size*meta['Project']['N Ensemble'])
# cih=mu+sigma_multiplier*sd/np.sqrt(ind.size*meta['Project']['N Ensemble'])
#
# MosByMP[iScn]['v1']['Mean'][k]['Ensemble Mean'][:,iMP]=MosByMP[iScn]['v1']['Mean'][k]['Ensemble Mean'][:,iMP]+mu
# MosByMP[iScn]['v1']['Mean'][k]['Ensemble CIL'][:,iMP]=MosByMP[iScn]['v1']['Mean'][k]['Ensemble CIL'][:,iMP]+cil
# MosByMP[iScn]['v1']['Mean'][k]['Ensemble CIH'][:,iMP]=MosByMP[iScn]['v1']['Mean'][k]['Ensemble CIH'][:,iMP]+cih
#
# MosByMP[iScn]['v1']['Sum'][k]['Ensemble Mean'][:,iMP]=MosByMP[iScn]['v1']['Sum'][k]['Ensemble Mean'][:,iMP]+ATA*mu
# MosByMP[iScn]['v1']['Sum'][k]['Ensemble CIL'][:,iMP]=MosByMP[iScn]['v1']['Sum'][k]['Ensemble CIL'][:,iMP]+ATA*cil
# MosByMP[iScn]['v1']['Sum'][k]['Ensemble CIH'][:,iMP]=MosByMP[iScn]['v1']['Sum'][k]['Ensemble CIH'][:,iMP]+ATA*cih
#
# if switch_area=='On':
# for k in MosByMP[iScn]['Area']:
# if flag_Area[k]==1:
# # Only continue if there are some events
# MosByMP[iScn]['Area'][k]['Ensemble Mean'][:,iMP]=MosByMP[iScn]['Area'][k]['Ensemble Mean'][:,iMP]+np.sum(Data['Area'][k][:,ind],axis=1)
#
# #--------------------------------------------------------------------------
# # Divide by number of ensembles
# #--------------------------------------------------------------------------
#
# for iScn in range(meta['Project']['N Scenario']):
#
# for k in d0.keys():
# if np.isin(k,v2exclude_v1)==True:
# continue
# for op in oper:
# MosByMP[iScn]['v1'][op][k]['Ensemble Mean']=MosByMP[iScn]['v1'][op][k]['Ensemble Mean']/meta['Project']['N Ensemble']
# MosByMP[iScn]['v1'][op][k]['Ensemble CIL']=MosByMP[iScn]['v1'][op][k]['Ensemble CIL']/meta['Project']['N Ensemble']
# MosByMP[iScn]['v1'][op][k]['Ensemble CIH']=MosByMP[iScn]['v1'][op][k]['Ensemble CIH']/meta['Project']['N Ensemble']
#
# if switch_area=='On':
# for iV in MosByMP[iScn]['Area']:
# MosByMP[iScn]['Area'][iV]['Ensemble Mean'][:,iMP]=MosByMP[iScn]['Area'][iV]['Ensemble Mean'][:,iMP]/meta['Project']['N Ensemble']
#
# #--------------------------------------------------------------------------
# # Apply scale factor to data
# #--------------------------------------------------------------------------
#
# ListV=['Ensemble Mean','Ensemble CIL','Ensemble CIH']
#
# for iScn in range(meta['Project']['N Scenario']):
#
# for k in d0.keys():
#
# if np.isin(k,v2exclude_v1)==True:
# continue
#
# for op in oper:
#
# for vnam in ListV:
#
# if (k=='E_CO2e_LULUCF_HWP') | (k=='E_CO2e_ESC_Comb') | (k=='E_CO2e_ET_Comb') | (k=='E_CO2e_IPPU_Comb'):
# MosByMP[iScn]['v1'][op][k][vnam]=MosByMP[iScn]['v1'][op][k][vnam]/meta['Core']['Scale Factor Export Big']
# else:
# MosByMP[iScn]['v1'][op][k][vnam]=MosByMP[iScn]['v1'][op][k][vnam]/meta['Core']['Scale Factor Export Small']
#
# if np.max(MosByMP[iScn]['v1'][op][k][vnam])<32767:
# MosByMP[iScn]['v1'][op][k][vnam]=MosByMP[iScn]['v1'][op][k][vnam].astype('int16')
# else:
# MosByMP[iScn]['v1'][op][k][vnam]=MosByMP[iScn]['v1'][op][k][vnam].astype(int)
#
# if switch_area=='On':
# for k in MosByMP[iScn]['Area']:
# MosByMP[iScn]['Area'][k]['Ensemble Mean'][:,iMP]=MosByMP[iScn]['Area'][k]['Ensemble Mean'][:,iMP]/meta['Core']['Scale Factor Export Small']
#
# #--------------------------------------------------------------------------
# # Save
# #--------------------------------------------------------------------------
#
# gu.opickle(meta['Paths']['Project'] + '\\Outputs\\MosByMP_GHGB.pkl',MosByMP)
# return
#%% Model output statistics by project type and year (GHG balance variables only)
def MosByPTAndYear_GHGB(meta):
t0=time.time()
# Error multiplier
sigma_multiplier=2.0
# Import multipolygons
atu_multipolygons=gu.ipickle(meta['Paths']['Geospatial'] + '\\atu_multipolygons.pkl')
# Import sxy
sxy=gu.ipickle(meta['Paths']['Geospatial'] + '\\sxy.pkl')
# Unique MPs
uMP=np.unique(sxy['ID_atu_multipolygons'])
# Unique project types
uPT=np.unique(meta['Project']['ProjectType'])
# Create listed index (faster than indexing on the fly)
Crosswalk_sxy_to_mp=[None]*uMP.size
for iMP in range(uMP.size):
d={}
d['Index']=np.where(sxy['ID_atu_multipolygons']==uMP[iMP])[0]
Crosswalk_sxy_to_mp[iMP]=d
#--------------------------------------------------------------------------
# Project type by uMP
#--------------------------------------------------------------------------
id_pt=np.zeros(uMP.size)
for iMP in range(uMP.size):
id_pt[iMP]=meta['Project']['ProjectTypeByMP'][uMP[iMP]]
#--------------------------------------------------------------------------
# Area
#--------------------------------------------------------------------------
ATA=np.zeros( uMP.size )
for iMP in range(uMP.size):
ata0=atu_multipolygons[uMP[iMP]]['ACTUAL_TREATMENT_AREA']
if ata0==None:
print('Encounterd no area, using zero')
ata0=0
ATA[iMP]=ata0
#--------------------------------------------------------------------------
# Year
#--------------------------------------------------------------------------
Year=np.zeros( uMP.size )
for iMP in range(uMP.size):
yr0=atu_multipolygons[uMP[iMP]]['Year']
if yr0==None:
print('Encounterd no area, using zero')
yr0=0
Year[iMP]=yr0
# Time series of saved results
tv_saving=np.arange(meta['Project']['Year Start Saving'],meta['Project']['Year End']+1,1)
# Years of implementation
uT=np.unique(Year)
tv_imp=np.arange(np.min(uT),np.max(uT)+1,1)
# Operations
oper=['Mean','Sum']
# GHG balance variables
d0=LoadSingleOutputFile(meta,0,0,0)
# All (excluding 'C_M_ByAgent' and 'Year')
v2include=['A', 'V_StemMerch', 'V_StemMerchToMill', 'LogSizeEnhancement', 'C_Biomass_Tot', 'C_Piled_Tot', 'C_Litter_Tot', 'C_DeadWood_Tot', 'C_Soil_Tot',
'C_InUse_Tot', 'C_DumpLandfill_Tot', 'C_M_Dist', 'C_G_Gross_Tot', 'C_G_Net_Tot', 'C_M_Reg_Tot', 'C_LF_Tot', 'C_RH_Tot', 'C_ToMillMerch',
'C_ToMillNonMerch', 'C_ToMillSnagStem', 'C_ToSlashpileBurn', 'C_ToLumber', 'C_ToPlywood', 'C_ToOSB', 'C_ToMDF', 'C_ToPaper', 'C_ToPowerFacilityDom',
'C_ToPowerFacilityFor', 'C_ToPowerGrid', 'C_ToPellets', 'C_ToFirewoodDom', 'C_ToFirewoodFor', 'C_ToLogExport', 'E_CO2e_LULUCF_NEE', 'E_CO2e_LULUCF_Wildfire',
'E_CO2e_LULUCF_OpenBurning', 'E_CO2e_LULUCF_EcoOther', 'E_CO2e_LULUCF_HWP', 'E_CO2e_ESC_Comb', 'E_CO2e_ESC_SubE', 'E_CO2e_ESC_SubBM', 'E_CO2e_ET_Comb',
'E_CO2e_IPPU_Comb', 'C_NPP_Tot', 'C_ToMill', 'E_CO2e_LULUCF_Fire', 'E_CO2e_AGHGB_WSub', 'E_CO2e_AGHGB_WOSub', 'E_CO2e_AGHGB_WSub_cumu', 'E_CO2e_AGHGB_WOSub_cumu',
'E_CO2e_AGHGB_WSub_cumu_from_tref', 'E_CO2e_AGHGB_WOSub_cumu_from_tref']
#--------------------------------------------------------------------------
# Initialize data by multipolygon structure (by PT)
#--------------------------------------------------------------------------
MosByPT=[None]*meta['Project']['N Scenario']
for iScn in range(meta['Project']['N Scenario']):
d={}
for op in oper:
d[op]={}
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
for op in oper:
d[op][k]={}
d[op][k]['Ensemble Mean']=np.zeros((tv_saving.size,uPT.size))
d[op][k]['Ensemble CIL']=np.zeros((tv_saving.size,uPT.size))
d[op][k]['Ensemble CIH']=np.zeros((tv_saving.size,uPT.size))
MosByPT[iScn]=d
#--------------------------------------------------------------------------
# Initialize data by multipolygon structure (by PT and Year)
#--------------------------------------------------------------------------
MosByPTAndYr=[None]*meta['Project']['N Scenario']
for iScn in range(meta['Project']['N Scenario']):
d={}
for t in tv_imp:
d[t]={}
for op in oper:
d[t][op]={}
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
for t in tv_imp:
for op in oper:
d[t][op][k]={}
d[t][op][k]['Ensemble Mean']=np.zeros((tv_saving.size,uPT.size))
d[t][op][k]['Ensemble CIL']=np.zeros((tv_saving.size,uPT.size))
d[t][op][k]['Ensemble CIH']=np.zeros((tv_saving.size,uPT.size))
MosByPTAndYr[iScn]=d
#--------------------------------------------------------------------------
# Loop through scenarios
#--------------------------------------------------------------------------
for iScn in range(meta['Project']['N Scenario']):
DataByPT={}
for op in oper:
DataByPT[op]=np.zeros( (tv_saving.size,meta['Project']['N Ensemble'],uPT.size,len(v2include)) ,dtype=np.float)
DataByPTAndYr={}
for t in tv_imp:
DataByPTAndYr[t]={}
for op in oper:
DataByPTAndYr[t][op]=np.zeros( (tv_saving.size,meta['Project']['N Ensemble'],uPT.size,len(v2include)) ,dtype=np.float)
for iEns in range(meta['Project']['N Ensemble']):
# Initialize temporary data structure for full simulation
DataSXY=np.zeros( (tv_saving.size,meta['Project']['N Stand'],len(v2include)) ,dtype=np.float)
# Import batches
for iBat in range(meta['Project']['N Batch']):
indBat=IndexToBatch(meta,iBat)
d1=LoadSingleOutputFile(meta,iScn,iEns,iBat)
cnt_k=0
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
DataSXY[:,indBat,cnt_k]=d1[k].copy()
cnt_k=cnt_k+1
del d1
garc.collect()
# Convert to MP
DataMP=np.zeros( (tv_saving.size,uMP.size,len(v2include)) )
for iMP in range(uMP.size):
indMP=Crosswalk_sxy_to_mp[iMP]['Index']
DataMP[:,iMP,:]=np.mean(DataSXY[:,indMP,:],axis=1)
for iPT in range(uPT.size):
# Summarize by PT
ind=np.where( (id_pt==uPT[iPT]) )[0]
for op in oper:
mu=np.mean(DataMP[:,ind,:],axis=1)
if op=='Mean':
DataByPT[op][:,iEns,iPT,:]=mu
else:
DataByPT[op][:,iEns,iPT,:]=np.sum(ATA[ind])*mu
# Summmarize by PT and year
for t in tv_imp:
ind=np.where( (id_pt==uPT[iPT]) & (Year==t) )[0]
for op in oper:
mu=np.mean(DataMP[:,ind,:],axis=1)
if op=='Mean':
DataByPTAndYr[t][op][:,iEns,iPT,:]=mu
else:
DataByPTAndYr[t][op][:,iEns,iPT,:]=np.sum(ATA[ind])*mu
cnt_k=0
for k in d0.keys():
if np.isin(k,v2include)==False:
continue
for op in oper:
# Mos by PT
mu=np.mean(DataByPT[op][:,:,:,cnt_k],axis=1)
sd=np.std(DataByPT[op][:,:,:,cnt_k],axis=1)
cil=mu-sigma_multiplier*sd/np.sqrt(meta['Project']['N Ensemble'])
cih=mu+sigma_multiplier*sd/np.sqrt(meta['Project']['N Ensemble'])
MosByPT[iScn][op][k]['Ensemble Mean']=mu
MosByPT[iScn][op][k]['Ensemble CIL']=cil
MosByPT[iScn][op][k]['Ensemble CIH']=cih
# Mos by PT and year
for t in tv_imp:
mu=np.mean(DataByPTAndYr[t][op][:,:,:,cnt_k],axis=1)
sd=np.std(DataByPTAndYr[t][op][:,:,:,cnt_k],axis=1)
cil=mu-sigma_multiplier*sd/
|
np.sqrt(meta['Project']['N Ensemble'])
|
numpy.sqrt
|
import numpy as np
import glfw
import math
from enum import IntEnum
from cyberdesk.paperspace import Paper
from cyberdesk.graphics2d import draw_text_centered
from cyberdesk.graphics3d import CanvasTexture, Material, quad_shader, QuadGeometry
from cyberdesk.input import Gamepad, GamepadButton, GamepadAxis
from cyberdesk import Color
large_button_size = 0.6
small_button_size = 0.4
gamepad_image_buttons = [
(np.array((2.5, 2.5)), large_button_size, GamepadButton.UP),
(
|
np.array((1.5, 3.5))
|
numpy.array
|
from sklearn.neighbors import KernelDensity as kde
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
class gaussian_kde(object):
def __init__(self, data, bandwidth=0.03):
self.training_data = data
self.bandwidth = bandwidth
self.kde = kde(kernel='gaussian', bandwidth=self.bandwidth).fit(self.training_data)
def update(self, new_data):
self.training_data = np.concatenate([self.training_data, new_data], axis = 0)
self.kde.fit(self.training_data)
return self
def comp_prob(self, x):
if isinstance(x, (float, np.float, np.float32, np.float64)):
x = np.array([[x]])
elif isinstance(x, (list, np.ndarray)):
x = np.expand_dims(np.array(x), axis=-1)
x = np.exp(self.kde.score_samples(x))
return x.squeeze()
class object_belief(object):
def __init__(self):
self.belief = np.array([0.5, 0.5])
def update(self, score, kde):
neg_prob = kde[0].comp_prob(score)
pos_prob = kde[1].comp_prob(score)
self.belief *= [neg_prob, pos_prob]
self.belief /= self.belief.sum()
return self.belief
def reset(self):
self.belief = np.array([0.5, 0.5])
if __name__=="__main__":
with open("../../density_esti_train_data.pkl") as f:
data = pkl.load(f)
data = data["ground"]
pos_data = []
neg_data = []
for d in data:
for i, score in enumerate(d["scores"]):
if str(i) in d["gt"]:
pos_data.append(score)
else:
neg_data.append(score)
pos_data = np.expand_dims(np.array(pos_data), axis=-1)
pos_data = np.sort(pos_data, axis=0)[5:-5]
neg_data = np.expand_dims(
|
np.array(neg_data)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 8 18:14:19 2020
@author: agarwal.270a
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import detrend
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
import pickle
import glob
import pandas as pd
from tensorflow.keras.activations import relu
import datetime
from utils import make_data_pipe, Rpeak2HR, sliding_window_fragmentation
tf.keras.backend.set_floatx('float32')
lrelu_pt2=lambda x: relu(x, alpha=0.2)
#tf.keras.backend.clear_session()
#TODO: Added to run only on CPU when needed
#tf.config.set_visible_devices([], 'GPU')
#%% Loading Data
def get_train_data(path,val_files=[],test_files=[],
win_len=8,step=1,Fs_pks=100):
'''
Use all files in the folder 'path' except the val_files and test_files
'''
def get_clean_ppg_and_ecg(files):
list_clean_ppg=[];list_arr_pks=[]
for i in range(len(files)):
df=pd.read_csv(files[i],header=None)
arr=df.values
if 'clean' in files[i]:
arr[:,41:45]=(detrend(arr[:,41:45].reshape(-1),0,'constant')
).reshape((-1,4))
list_clean_ppg+=[np.concatenate([arr[:,29:30],arr[:,41:45]],
axis=-1),arr[:,30:31],arr[:,39:40],arr[:,40:41]]
list_arr_pks+=[arr[:,45:49].reshape(-1)]
return list_clean_ppg,list_arr_pks
files=glob.glob(path+'*.csv')
#files=[fil for fil in files if 'WZ' in fil] #get wenxiao's data
#separate val and test files
s3=set(files);s4=set(val_files+test_files)
files_2=list(s3.difference(s4))
#files_2=[files_2[0]]
#files_2=[fil for fil in files if not((val_names[0] in fil))]
list_clean_ppg,list_arr_pks=get_clean_ppg_and_ecg(files_2)
dsample_factr=4
Fs_pks=int(Fs_pks/dsample_factr)
win_len=win_len*Fs_pks
list_r_pk_locs=[np.arange(len(arr_pks))[arr_pks.astype(bool)] for
arr_pks in list_arr_pks]
#get nearest dsampled idx
#TODO: Started using round instead of floor
list_r_pk_locs_dsampled=[np.round(r_pk_locs/dsample_factr).astype(int) for
r_pk_locs in list_r_pk_locs]
#print([np.max(r_pks) for r_pks in list_r_pk_locs_dsampled])
#print([len(ppg) for ppg in list_clean_ppg[::4]])
list_arr_pks_dsampled=[]
for j in range(len(list_arr_pks)):
arr_pks_dsampled=np.zeros([int(len(list_arr_pks[j])/dsample_factr),1])
#check & correct for rare rounding up issue in the last element
if list_r_pk_locs_dsampled[j][-1]==len(arr_pks_dsampled):
list_r_pk_locs_dsampled[j][-1]-=1
arr_pks_dsampled[list_r_pk_locs_dsampled[j]]=1
list_arr_pks_dsampled.append(arr_pks_dsampled)
#print([len(ppg) for ppg in list_arr_pks_dsampled])
list_HR=[4*[Rpeak2HR(arr_pks,win_len,step,Fs_pks)]
for arr_pks in list_arr_pks_dsampled]
list_HR=sum(list_HR,[])
#list_HR=[HR[::dsample_factr] for HR in list_HR]
return list_clean_ppg,list_HR
def get_test_data(file_path,win_len,step,Fs_pks):
df=pd.read_csv(file_path,header=None)
arr=df.values
test_in=[np.concatenate([arr[:,29:30],arr[:,41:45]],axis=-1),arr[:,30:31],
arr[:,39:40],arr[:,40:41]]
arr_pks=arr[:,45:49].reshape(-1)
dsample_factr=4
Fs_pks=int(Fs_pks/dsample_factr)
win_len=win_len*Fs_pks
r_pk_locs=np.arange(len(arr_pks))[arr_pks.astype(bool)]
#get nearest dsampled idx
#TODO: Started using round instead of floor
r_pk_locs_dsampled=np.round(r_pk_locs/dsample_factr).astype(int)
#print([np.max(r_pks) for r_pks in list_r_pk_locs_dsampled])
#print([len(ppg) for ppg in list_clean_ppg[::4]])
arr_pks_dsampled=np.zeros([len(test_in[0]),1])
#check & correct for rare rounding up issue in the last element
if r_pk_locs_dsampled[-1]==len(arr_pks_dsampled):
r_pk_locs_dsampled[-1]-=1
arr_pks_dsampled[r_pk_locs_dsampled]=1
#print([len(ppg) for ppg in list_arr_pks_dsampled])
list_HR=4*[Rpeak2HR(arr_pks_dsampled,win_len,step,Fs_pks)]
test_in=[ppg.astype('float32') for ppg in test_in]
test_out=[HR[:,0].astype('float32') for HR in list_HR]
return test_in,test_out
#%%
def create_model(in_shape,HR_win_len=200):
expand_dims = layers.Lambda(lambda x: tf.expand_dims(x,axis=-1),
name='expand_dims')
#RNN model via. Functional API
rnn = layers.GRU(64, return_sequences=True, return_state=True)
sig_in = layers.Input(shape=in_shape)
#x = expand_dims(sig_in)
x = sig_in
_, final_state=rnn(x[:,:-HR_win_len,:]) #warm-up RNN
rnn_out, _ = rnn(x[:,-HR_win_len:,:],initial_state=final_state)
HR_hat=layers.Conv1D(filters=1,kernel_size=1, strides=1,padding='same',
activation=None,name='Conv_{}'.format(1))(rnn_out)
HR_hat=layers.Flatten()(HR_hat)
model = keras.Model(sig_in, HR_hat, name='model_HR')
return model
def create_infer_model(in_shape):
#RNN model via. Functional API
rnn = layers.GRU(64, return_sequences=True, return_state=True)
initial_state = layers.Input(shape=(64,))
sig_in = layers.Input(shape=in_shape)
rnn_out, final_state = rnn(sig_in,initial_state=initial_state)
HR_hat=layers.Conv1D(filters=1,kernel_size=1, strides=1,padding='same',
activation=None,name='Conv_{}'.format(1))(rnn_out)
HR_hat=layers.Flatten()(HR_hat)
model = keras.Model([initial_state,sig_in],[final_state,HR_hat], name='model_infer_HR')
return model
#%%
def main():
#Get Train Data
plt.close('all')
path_prefix='data/pre-training'
exp_id='1_3'
log_prefix='data/post-training/experiments/{}'.format(exp_id)
path=(path_prefix+'/')
val_files=[path+'2019092801_3154_clean.csv']
test_files=[path+'2019092820_5701_clean.csv']
win_len=8 #in sec
step=1 #in n_samples
Fs_pks=100 #in Hz
#input_list,output_list=[],[]
list_sigs,list_HR=get_train_data(path,val_files,test_files,win_len,
step,Fs_pks)
#Pre-process data
dsample_factr=4;Fs_new=int(Fs_pks/dsample_factr)
sample_win_len,step_size=win_len*Fs_new,2*Fs_new
HR_win_len=sample_win_len*3 #TODO: Can change this later, 4 is arbitrary choice after profs suggestion
ppg_win_len=sample_win_len+HR_win_len
model_sigs_in,model_HR_out=[],[]
for j in range(len(list_HR)):
#HR=list_HR[j][list_arr_pks[j].astype(bool)]
ppg,HR=list_sigs[j][:,0:1],list_HR[j]
ppg=sliding_window_fragmentation([ppg],ppg_win_len,step_size)
HR=sliding_window_fragmentation([HR],HR_win_len,step_size)
print(len(ppg),len(HR))
model_sigs_in.append(ppg)
model_HR_out.append(HR[:len(ppg)]) #clipping extra HRs at the end
model_sigs_in=np.concatenate(model_sigs_in,axis=0)
model_HR_out=
|
np.concatenate(model_HR_out,axis=0)
|
numpy.concatenate
|
import re
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.linear_model import LogisticRegression
from .transform import WOETransformer, Combiner, ELSE_GROUP, EMPTY_BIN
from .utils import to_ndarray, bin_by_splits, save_json, read_json
RE_NUM = r'-?\d+(.\d+)?'
RE_SEP = r'[~-]'
RE_BEGIN = r'(-inf|{num})'.format(num = RE_NUM)
RE_END = r'(inf|{num})'.format(num = RE_NUM)
RE_RANGE = r'\[{begin}\s*{sep}\s*{end}\)'.format(
begin = RE_BEGIN,
end = RE_END,
sep = RE_SEP,
)
NUMBER_EMPTY = -9999999
NUMBER_INF = 1e10
FACTOR_EMPTY = 'MISSING'
FACTOR_UNKNOWN = 'UNKNOWN'
class ScoreCard(BaseEstimator):
def __init__(self, pdo = 60, rate = 2, base_odds = 35, base_score = 750,
card = None, combiner = {}, transer = None, **kwargs):
"""
Args:
card (dict|str|IOBase): dict of card or io to read json
combiner (toad.Combiner)
transer (toad.WOETransformer)
"""
self.pdo = pdo
self.rate = rate
self.base_odds = base_odds
self.base_score = base_score
self.factor = pdo / np.log(rate)
self.offset = base_score - self.factor * np.log(base_odds)
self.combiner = combiner
self.transer = transer
self.model = LogisticRegression(**kwargs)
if card is not None:
self.generate_card(card = card)
@property
def coef_(self):
""" coef of LR model
"""
return self.weight
def generate_card(self, card = None):
"""
Args:
card (dict|str|IOBase): dict of card or io to read json
"""
if card is not None:
if not isinstance(card, dict):
card = read_json(card)
return self.set_card(card)
rules = self._get_rules(self.combiner, self.transer)
self.set_combiner(rules)
map = self.generate_map(self.transer, self.model)
self.set_score(map)
return self
def fit(self, X, y):
"""
Args:
X (2D DataFrame)
Y (array-like)
"""
self.features_ = X.columns.tolist()
for f in self.features_:
if f not in self.transer._rules:
raise Exception('column \'{f}\' is not in transer'.format(f = f))
self.model.fit(X, y)
self.generate_card()
return self
def _transer_to_rules(self, transer):
c = dict()
for k in transer._rules:
c[k] = np.reshape(transer._rules[k]['value'], (-1, 1)).tolist()
return c
def _merge_combiner(self, cbs):
res = dict()
for item in cbs[::-1]:
if isinstance(item, Combiner):
item = item.export()
res.update(item)
return res
def _get_rules(self, combiner, transer):
transer_rules = self._transer_to_rules(transer)
if isinstance(combiner, list):
combiner = self._merge_combiner(combiner)
elif isinstance(combiner, Combiner):
combiner = combiner.export()
if self._check_rules(combiner, transer_rules):
transer_rules.update(combiner)
return transer_rules
def _check_rules(self, combiner, transer):
for col in combiner:
if col not in transer:
continue
l_c = len(combiner[col])
l_t = len(transer[col])
if l_c == 0:
continue
if isinstance(combiner[col][0], (int, float)):
if l_c != l_t - 1:
raise Exception('column \'{col}\' is not matched, assert {l_t} bins but given {l_c}'.format(col = col, l_t = l_t, l_c = l_c + 1))
else:
if l_c != l_t:
raise Exception('column \'{col}\' is not matched, assert {l_t} bins but given {l_c}'.format(col = col, l_t = l_t, l_c = l_c))
return True
def _parse_range(self, bins):
exp = re.compile(RE_RANGE)
l = list()
for item in bins:
if item == 'nan':
l.append(np.nan)
continue
m = exp.match(item)
# if is not range
if m is None:
return None
# get the end number of range
split = m.group(3)
if split == 'inf':
split = np.inf
else:
split = float(split)
l.append(split)
return np.array(l)
def _parse_card(self, card):
bins = card.keys()
scores = card.values()
scores = np.array(list(scores))
groups = self._parse_range(bins)
# if is continuous
if groups is not None:
ix = np.argsort(groups)
scores = scores[ix]
groups = groups[ix[:-1]]
else:
groups = list()
for item in bins:
if item == ELSE_GROUP:
groups.append(item)
else:
groups.append(item.split(','))
groups = np.array(groups)
return groups, scores
def set_card(self, card):
"""set card dict
"""
combiner = dict()
map = dict()
for feature in card:
bins, scores = self._parse_card(card[feature])
combiner[feature] = bins
map[feature] = scores
self.set_combiner(combiner)
self.set_score(map)
return self
def set_combiner(self, combiner):
"""set combiner
"""
if not isinstance(combiner, Combiner):
combiner = Combiner().load(combiner)
self.combiner = combiner
def set_score(self, map):
"""set score map by dict
"""
sm = dict()
for key in map:
s = map[key]
if isinstance(s, np.ndarray):
sm[key] = np.copy(s)
else:
sm[key] = np.array(s)
self.score_map = sm
def predict(self, X, **kwargs):
"""predict score
Args:
X (2D array-like): X to predict
return_sub (bool): if need to return sub score, default `False`
Returns:
array-like: predicted score
DataFrame: sub score for each feature
"""
select = list(self.score_map.keys())
bins = self.combine(X[select])
return self.bin_to_score(bins, **kwargs)
def proba_to_score(self, prob):
"""covert probability to score
"""
odds = (1 - prob) / prob
return self.factor * np.log(odds) + self.offset
def combine(self, X):
return self.combiner.transform(X)
def bin_to_score(self, bins, return_sub = False):
"""predict score from bins
"""
res = bins.copy()
for col in self.score_map:
s_map = self.score_map[col]
b = bins[col].values
# set default group to min score
b[b == EMPTY_BIN] = np.argmin(s_map)
# replace score
res[col] = s_map[b]
score = np.sum(res.values, axis = 1)
if return_sub is False:
return score
return score, res
def woe_to_score(self, woe, weight = None):
"""calculate score by woe
"""
woe = to_ndarray(woe)
if weight is None:
weight = self.weight
b = self.offset - self.factor * self.bias
s = -self.factor * weight * woe
# drop score whose weight is 0
mask = 1
if isinstance(weight, np.ndarray):
mask = (weight != 0).astype(int)
return (s + b / self.n_features_) * mask
def set_model(self, model):
"""set logistic regression model
"""
self.weight = model.coef_[0]
self.bias = model.intercept_[0]
self.n_features_ = (self.weight != 0).sum()
def generate_map(self, transer, model):
"""calculate score map by woe
"""
self.set_model(model)
s_map = dict()
for i, k in enumerate(self.features_):
weight = self.weight[i]
# skip feature whose weight is 0
if weight == 0:
continue
woe = transer._rules[k]['woe']
s_map[k] = self.woe_to_score(woe, weight = weight)
return s_map
def export(self, to_frame = False, to_json = None, to_csv = None, decimal = 2):
"""generate a scorecard object
Args:
to_frame (bool): return DataFrame of card
to_json (str|IOBase): io to write json file
to_csv (filepath|IOBase): file to write csv
Returns:
dict
"""
card = dict()
combiner = self.combiner.export(format = True)
for col in self.score_map:
bins = combiner[col]
card[col] = dict()
for i in range(len(bins)):
card[col][bins[i]] = round(self.score_map[col][i], decimal)
if to_json is not None:
save_json(card, to_json)
if to_frame or to_csv is not None:
rows = list()
for name in card:
for value, score in card[name].items():
rows.append({
'name': name,
'value': value,
'score': score,
})
card = pd.DataFrame(rows)
if to_csv is not None:
return card.to_csv(to_csv)
return card
def _generate_testing_frame(self, maps, size = 'max', mishap = True, gap = 1e-2):
"""
Args:
maps (dict): map of values or splits to generate frame
size (int|str): size of frame. 'max' (default), 'lcm'
mishap (bool): is need to add mishap patch to test frame
gap (float): size of gap for testing border
Returns:
DataFrame
"""
number_patch = np.array([NUMBER_EMPTY, NUMBER_INF])
factor_patch = np.array([FACTOR_EMPTY, FACTOR_UNKNOWN])
values = []
cols = []
for k, v in maps.items():
v = np.array(v)
if np.issubdtype(v.dtype, np.number):
items = np.concatenate((v, v - gap))
patch = number_patch
else:
# remove else group
mask = np.argwhere(v == ELSE_GROUP)
if mask.size > 0:
v =
|
np.delete(v, mask)
|
numpy.delete
|
#IMPORTS.......................................................................
import pandas as pd
from numpy import log2 as log
from sklearn.metrics import confusion_matrix
import seaborn as sns
import os # accessing directory structure
import numpy as np
import matplotlib.pyplot as plt
import random
eps = np.finfo(float).eps #Small value such that log won't fail
Directory = ""
os.chdir(Directory)
#%% DATASETS...................................................................
def get_wine_dataset():
df = pd.read_csv('wine.data', delimiter=',')
df.columns=['Class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids','Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity','Hue' , 'OD280/OD315 of diluted wines','Proline' ]
#df.to_excel("outputttt.xlsx")
return df
#%% LOAD DATASET...............................................................
df = get_wine_dataset()
class_attribute = 'Class'
class_column = df[class_attribute]
df.drop(labels=[class_attribute], axis=1,inplace = True) #Removes the column with class labels
df.insert(len(df.columns), class_attribute, class_column) # inserts class label column at the end of the dataframe
#df.to_excel("test_cont.xlsx")
print(df)
#%%FUNCTIONS...................................................................
def accuracy(y_true, y_predicted):
'''Reports the accuracy of two lists of true and predicted values'''
correct = 0
count = 0
for true, pred in zip(y_true, y_predicted):
if int(true) == int(pred):
correct += 1
else:
print(count)
count += 1
accuracy = correct/len(y_predicted)*100
print('Accuracy of classifer {:0.2f} %' .format(accuracy))
return accuracy
def print_conf_mat(y_true, y_predicted):
'''Prints the confusion matrix from the true and predicted class labels'''
mat1 = confusion_matrix(y_true, y_predicted) #labels=["positive", "negative"])
#true_mat = confusion_matrix(y_true,y_true,labels=["positive", "negative"])
#plt.figure(0)
ax= plt.subplot()
sns.heatmap(mat1, square=True, annot=True, cbar=False,fmt="d", ax = ax)
ax.set_title('Predicted Matrix')
ax.set_xlabel('predicted value')
ax.set_ylabel('true value')
plt.show()
return
def entropy(dataframe, target_attribute):
'''Calculates the Entropy of a dataset for the target attribute'''
entropy = 0 #Initialize Entropy
values = dataframe[target_attribute].unique() #Play has two options 'Yes', 'No'
play_data = list(dataframe[target_attribute].values)
for value in values:
proportion = play_data.count(value)/len(play_data) #Proportion of given value in the dataset
entropy += -proportion*
|
log(proportion)
|
numpy.log2
|
'''
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_admin_components as dac
from dash.dependencies import Input, Output, State, ALL
import plotly.graph_objs as go
'''
from helpers import constants, hertz_to_microtone
# import score_component as sc
import pickle
# import dash
# from dash.exceptions import PreventUpdate
import pretty_midi
import search_the_like
import numpy as np
import soundfile as sf
import base64
import io
from scipy.signal import convolve
from masking_slice import spectral_centroid
import alternate_mfcc
import combine_peaks
import maskingCurve_peakInput
from helpers.constants import instrument_data_path, ir_data_path
#path='/Users/admin-upu10438/Sync/Score-Tool'
#path='N:\Cloud folders (Dropbox - Sync and OneDrive)\Sync\Sync\Score-Tool'
#with open(path+'/database/no_data_orchestra.pickle', 'rb') as handle:
# orchestra = pickle.load(handle)
#app = dash.Dash(__name__)
#server = app.server
#instrument_data_path='N:/Score-Tool iowa samples/out'
# instrument_data_path = 'c:/sample_database'
#instrument_data_path='/home/uljas/sample_library'
#ir_data_path='N:/Score-Tool iowa samples'
# ir_data_path = 'c:/sample_database/musatalo'
#ir_data_path='/home/uljas/sample_library/musatalo'
from helpers.constants import instrument_data_path, ir_data_path
def cutSample(data):
fadeamount = 300
maxindex =
|
np.argmax(data > 0.01)
|
numpy.argmax
|
'''
Utility functions for GAIN.
(1) normalization: MinMax Normalizer
(2) renormalization: Recover the data from normalzied data
(3) rounding: Handlecategorical variables after imputation
(4) rmse_loss: Evaluate imputed data in terms of RMSE
(5) xavier_init: Xavier initialization
(6) binary_sampler: sample binary random variables
(7) uniform_sampler: sample uniform random variables
(8) sample_batch_index: sample random batch index
(9) show_results : Show results after training the model
(10) Crop : Cropping the data from center with a shape
'''
# Necessary packages
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from evaluations import nrmse
def data_preprocessor(data, miss_rate, x_dim, y_dim):
samples, rows, cols, _ = data.shape
startx, starty = cols//2, rows//2
temp_data = np.zeros(shape=(samples, x_dim, y_dim, 1), dtype=np.float32)
for s in range(samples):
temp_data[s] = data[s][startx-x_dim//2:starty+x_dim//2, startx-x_dim//2:starty+x_dim//2, :]
data_x = np.reshape(temp_data, [samples, x_dim*y_dim]).astype(np.float32)
# Parameters
no, dim = data_x.shape
data_x[0][0:3] = np.nan
# Introduce missing data
data_m = binary_sampler(1-miss_rate, no, dim)
miss_data_x = data_x.copy()
miss_data_x[data_m == 0] = np.nan
return data_x, miss_data_x, data_m
def normalization (data, parameters=None):
'''
Normalize data in [0, 1] range.
Args:
- data: original data
Returns:
- norm_data: normalized data
- norm_parameters: min_val, max_val for each feature for renormalization
'''
# Parameters
_, dim = data.shape
norm_data = data.copy()
if parameters is None:
# MixMax normalization
min_val = np.zeros(dim)
max_val = np.zeros(dim)
# For each dimension
for i in range(dim):
min_val[i] = np.nanmin(norm_data[:,i])
norm_data[:,i] = norm_data[:,i] - np.nanmin(norm_data[:,i])
max_val[i] = np.nanmax(norm_data[:,i])
norm_data[:,i] = norm_data[:,i] / (np.nanmax(norm_data[:,i]) + 1e-6)
# Return norm_parameters for renormalization
norm_parameters = {'min_val': min_val,
'max_val': max_val}
else:
min_val = parameters['min_val']
max_val = parameters['max_val']
# For each dimension
for i in range(dim):
norm_data[:,i] = norm_data[:,i] - min_val[i]
norm_data[:,i] = norm_data[:,i] / (max_val[i] + 1e-6)
norm_parameters = parameters
return norm_data, norm_parameters
def renormalization (norm_data, norm_parameters):
'''
Renormalize data from [0, 1] range to the original range.
Args:
- norm_data: normalized data
- norm_parameters: min_val, max_val for each feature for renormalization
Returns:
- renorm_data: renormalized original data
'''
min_val = norm_parameters['min_val']
max_val = norm_parameters['max_val']
_, dim = norm_data.shape
renorm_data = norm_data.copy()
for i in range(dim):
renorm_data[:,i] = renorm_data[:,i] * (max_val[i] + 1e-6)
renorm_data[:,i] = renorm_data[:,i] + min_val[i]
return renorm_data
def rounding (imputed_data, data_x):
'''
Round imputed data for categorical variables.
Args:
- imputed_data: imputed data
- data_x: original data with missing values
Returns:
- rounded_data: rounded imputed data
'''
_, dim = data_x.shape
rounded_data = imputed_data.copy()
for i in range(dim):
temp = data_x[~np.isnan(data_x[:, i]), i]
# Only for the categorical variable
if len(np.unique(temp)) < 20:
rounded_data[:, i] = np.round(rounded_data[:, i])
return rounded_data
def rmse_loss (ori_data, imputed_data, data_m):
'''
Compute RMSE loss between ori_data and imputed_data
Args:
- ori_data: original data without missing values
- imputed_data: imputed data
- data_m: indicator matrix for missingness
Returns:
- rmse: Root Mean Squared Error
'''
ori_data, norm_parameters = normalization(ori_data)
imputed_data, _ = normalization(imputed_data, norm_parameters)
# Only for missing values
nominator = np.sum(((1-data_m) * ori_data - (1-data_m) * imputed_data)**2)
denominator = np.sum(1-data_m)
rmse = np.sqrt(nominator/float(denominator))
return rmse
def xavier_init(size):
'''
Xavier initialization.
Args:
- size: vector size
Returns:
- initialized random vector.
'''
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
def binary_sampler(p, rows, cols):
'''
Sample binary random variables.
Args:
- p: probability of 1
- rows: the number of rows
- cols: the number of columns
Returns:
- binary_random_matrix: generated binary random matrix.
'''
unif_random_matrix = np.random.uniform(0., 1., size = [rows, cols])
binary_random_matrix = 1*(unif_random_matrix < p)
return binary_random_matrix
def uniform_sampler(low, high, rows, cols):
'''
Sample uniform random variables.
Args:
- low: low limit
- high: high limit
- rows: the number of rows
- cols: the number of columns
Returns:
- uniform_random_matrix: generated uniform random matrix.
'''
return np.random.uniform(low, high, size = [rows, cols])
def sample_batch_index(total, batch_size):
'''
Sample index of the mini-batch.
Args:
- total: total number of samples
- batch_size: batch size
Returns:
- batch_idx: batch index
'''
total_idx = np.random.permutation(total)
batch_idx = total_idx[:batch_size]
return batch_idx
def show_results(data_x, miss_data_x, data_m, imputed_data, num_examples=1, cmap=None):
for i in range(num_examples):
i = np.random.randint(0, len(imputed_data))
print('Image : %d'%i)
# drop original NaN
actual = data_x[i][np.logical_not(np.isnan(data_x[i]))]
mask = data_m[i][np.logical_not(np.isnan(data_x[i]))]
predicted = imputed_data[i][np.logical_not(
|
np.isnan(data_x[i])
|
numpy.isnan
|
"""
Item-based k-NN collaborative filtering.
"""
import pathlib
import logging
import pandas as pd
import numpy as np
import scipy.sparse as sps
import scipy.sparse.linalg as spla
from numba import njit, prange
from lenskit import util, matrix
from . import Predictor
_logger = logging.getLogger(__name__)
@njit(nogil=True)
def _predict_weighted_average(model, nitems, nrange, ratings, targets):
min_nbrs, max_nbrs = nrange
scores = np.full(nitems, np.nan, dtype=np.float_)
for i in prange(targets.shape[0]):
iidx = targets[i]
rptr = model.rowptrs[iidx]
rend = model.rowptrs[iidx + 1]
num = 0
denom = 0
nnbrs = 0
for j in range(rptr, rend):
nidx = model.colinds[j]
if np.isnan(ratings[nidx]):
continue
nnbrs = nnbrs + 1
num = num + ratings[nidx] * model.values[j]
denom = denom + np.abs(model.values[j])
if max_nbrs > 0 and nnbrs >= max_nbrs:
break
if nnbrs < min_nbrs:
continue
scores[iidx] = num / denom
return scores
@njit(nogil=True)
def _predict_sum(model, nitems, nrange, ratings, targets):
min_nbrs, max_nbrs = nrange
scores = np.full(nitems, np.nan, dtype=np.float_)
for i in prange(targets.shape[0]):
iidx = targets[i]
rptr = model.rowptrs[iidx]
rend = model.rowptrs[iidx + 1]
score = 0
nnbrs = 0
for j in range(rptr, rend):
nidx = model.colinds[j]
if np.isnan(ratings[nidx]):
continue
nnbrs = nnbrs + 1
score = score + model.values[j]
if max_nbrs > 0 and nnbrs >= max_nbrs:
break
if nnbrs < min_nbrs:
continue
scores[iidx] = score
return scores
_predictors = {
'weighted-average': _predict_weighted_average,
'sum': _predict_sum
}
class ItemItem(Predictor):
"""
Item-item nearest-neighbor collaborative filtering with ratings. This item-item implementation
is not terribly configurable; it hard-codes design decisions found to work well in the previous
Java-based LensKit code.
Attributes:
item_index_(pandas.Index): the index of item IDs.
item_means_(numpy.ndarray): the mean rating for each known item.
item_counts_(numpy.ndarray): the number of saved neighbors for each item.
sim_matrix_(matrix.CSR): the similarity matrix.
user_index_(pandas.Index): the index of known user IDs for the rating matrix.
rating_matrix_(matrix.CSR): the user-item rating matrix for looking up users' ratings.
"""
def __init__(self, nnbrs, min_nbrs=1, min_sim=1.0e-6, save_nbrs=None,
center=True, aggregate='weighted-average'):
"""
Args:
nnbrs(int):
the maximum number of neighbors for scoring each item (``None`` for unlimited)
min_nbrs(int): the minimum number of neighbors for scoring each item
min_sim(double): minimum similarity threshold for considering a neighbor
save_nbrs(double):
the number of neighbors to save per item in the trained model
(``None`` for unlimited)
center(bool):
whether to normalize (mean-center) rating vectors. Turn this off when working
with unary data and other data types that don't respond well to centering.
aggregate:
the type of aggregation to do. Can be ``weighted-average`` or ``sum``.
"""
self.nnbrs = nnbrs
if self.nnbrs is not None and self.nnbrs < 1:
self.nnbrs = -1
self.min_nbrs = min_nbrs
if self.min_nbrs is not None and self.min_nbrs < 1:
self.min_nbrs = 1
self.min_sim = min_sim
self.save_nbrs = save_nbrs
self.center = center
self.aggregate = aggregate
try:
self._predict_agg = _predictors[aggregate]
except KeyError:
raise ValueError('unknown aggregator {}'.format(aggregate))
def fit(self, ratings):
"""
Train a model.
The model-training process depends on ``save_nbrs`` and ``min_sim``, but *not* on other
algorithm parameters.
Args:
ratings(pandas.DataFrame):
(user,item,rating) data for computing item similarities.
"""
# Training proceeds in 2 steps:
# 1. Normalize item vectors to be mean-centered and unit-normalized
# 2. Compute similarities with pairwise dot products
self._timer = util.Stopwatch()
init_rmat, users, items = matrix.sparse_ratings(ratings)
n_items = len(items)
_logger.info('[%s] made sparse matrix for %d items (%d ratings from %d users)',
self._timer, len(items), init_rmat.nnz, len(users))
rmat, item_means = self._mean_center(ratings, init_rmat, items)
rmat = self._normalize(rmat)
_logger.info('[%s] computing similarity matrix', self._timer)
smat = self._compute_similarities(rmat)
_logger.info('[%s] got neighborhoods for %d of %d items',
self._timer, np.sum(np.diff(smat.rowptrs) > 0), n_items)
_logger.info('[%s] computed %d neighbor pairs', self._timer, smat.nnz)
self.item_index_ = items
self.item_means_ = item_means
self.item_counts_ = np.diff(smat.rowptrs)
self.sim_matrix_ = smat
self.user_index_ = users
self.rating_matrix_ = init_rmat
return self
def _mean_center(self, ratings, rmat, items):
if not self.center:
return rmat, None
item_means = ratings.groupby('item').rating.mean()
item_means = item_means.reindex(items).values
mcvals = rmat.values - item_means[rmat.colinds]
nmat = matrix.CSR(rmat.nrows, rmat.ncols, rmat.nnz,
rmat.rowptrs.copy(), rmat.colinds.copy(), mcvals)
_logger.info('[%s] computed means for %d items', self._timer, len(item_means))
return nmat, item_means
def _normalize(self, rmat):
rmat = matrix.csr_to_scipy(rmat)
# compute column norms
norms = spla.norm(rmat, 2, axis=0)
# and multiply by a diagonal to normalize columns
recip_norms = norms.copy()
is_nz = recip_norms > 0
recip_norms[is_nz] = np.reciprocal(recip_norms[is_nz])
norm_mat = rmat @ sps.diags(recip_norms)
assert norm_mat.shape[1] == rmat.shape[1]
# and reset NaN
norm_mat.data[np.isnan(norm_mat.data)] = 0
_logger.info('[%s] normalized rating matrix columns', self._timer)
return matrix.csr_from_scipy(norm_mat, False)
def _compute_similarities(self, rmat):
mkl = matrix.mkl_ops()
if mkl is None:
return self._scipy_similarities(rmat)
else:
return self._mkl_similarities(mkl, rmat)
def _scipy_similarities(self, rmat):
nitems = rmat.ncols
sp_rmat = matrix.csr_to_scipy(rmat)
_logger.info('[%s] multiplying matrix with scipy', self._timer)
smat = sp_rmat.T @ sp_rmat
smat = smat.tocoo()
rows, cols, vals = smat.row, smat.col, smat.data
rows = rows[:smat.nnz]
cols = cols[:smat.nnz]
vals = vals[:smat.nnz]
rows, cols, vals = self._filter_similarities(rows, cols, vals)
csr = self._select_similarities(nitems, rows, cols, vals)
return csr
def _mkl_similarities(self, mkl, rmat):
nitems = rmat.ncols
assert rmat.values is not None
_logger.info('[%s] multiplying matrix with MKL', self._timer)
smat = mkl.csr_syrk(rmat)
rows = matrix.csr_rowinds(smat)
cols = smat.colinds
vals = smat.values
rows, cols, vals = self._filter_similarities(rows, cols, vals)
del smat
nnz = len(rows)
_logger.info('[%s] making matrix symmetric (%d nnz)', self._timer, nnz)
rows = np.resize(rows, nnz * 2)
cols = np.resize(cols, nnz * 2)
vals = np.resize(vals, nnz * 2)
rows[nnz:] = cols[:nnz]
cols[nnz:] = rows[:nnz]
vals[nnz:] = vals[:nnz]
csr = self._select_similarities(nitems, rows, cols, vals)
return csr
def _filter_similarities(self, rows, cols, vals):
"Threshold similarites & remove self-similarities."
_logger.info('[%s] filtering %d similarities', self._timer, len(rows))
# remove self-similarity
mask = rows != cols
# remove too-small similarities
if self.min_sim is not None:
mask = np.logical_and(mask, vals >= self.min_sim)
_logger.info('[%s] filter keeps %d of %d entries', self._timer, np.sum(mask), len(rows))
return rows[mask], cols[mask], vals[mask]
def _select_similarities(self, nitems, rows, cols, vals):
_logger.info('[%s] ordering similarities', self._timer)
csr = matrix.csr_from_coo(rows, cols, vals, shape=(nitems, nitems))
csr.sort_values()
if self.save_nbrs is None or self.save_nbrs <= 0:
return csr
_logger.info('[%s] picking %d top similarities', self._timer, self.save_nbrs)
counts = csr.row_nnzs()
_logger.debug('have %d rows in size range [%d,%d]',
len(counts), np.min(counts), np.max(counts))
ncounts =
|
np.fmin(counts, self.save_nbrs)
|
numpy.fmin
|
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.autodiff import GradManager
from meg_networks import FullyConnectedLayer
batch_size = 2
in_channels = 512
w_dim = 512
lr = 0.1
# activation = 'linear'
# activation = 'lrelu'
# activation = 'relu'
# activation = 'tanh'
activation = 'sigmoid'
# activation = 'elu'
# activation = 'selu'
# activation = 'softplus'
# activation = 'swish'
model = FullyConnectedLayer(w_dim, in_channels, activation=activation, bias_init=1)
model.train()
optimizer = mge.optimizer.SGD(model.parameters(), lr=lr, momentum=0.9)
model.load_state_dict(mge.load("pytorch_fullyConnectedLayer.pkl", map_location="cpu"))
dic2 =
|
np.load('01_fullyConnectedLayer_grad.npz')
|
numpy.load
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Verify Rate of Change QC test
"""
import numpy as np
from numpy import ma
from cotede.qctests import RateOfChange, rate_of_change
from ..data import DummyData
from .compare import compare_feature_input_types, compare_input_types
def test_rate_of_change():
"""Basic test on feature rate of change
"""
x = [1, -1, 2, 2, 3, 2, 4]
y = rate_of_change(x)
output = [np.nan, -2.0, 3.0, 0.0, 1.0, -1.0, 2.0]
assert isinstance(y, np.ndarray)
assert np.allclose(y, output, equal_nan=True)
def test_feature_input_types():
x = np.array([1, -1, 2, 2, 3, 2, 4])
compare_feature_input_types(rate_of_change, x)
def test_standard_dataset():
"""Test RateOfChange procedure with a standard dataset
"""
profile = DummyData()
features = {
"rate_of_change": [
np.nan,
0.02,
0.0,
-0.03,
-0.32,
-1.53,
-1.61,
-3.9,
-2.56,
-4.31,
-4.15,
1,
-2.22,
-2.13,
np.nan,
]
}
flags = {"rate_of_change": [0, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, 1, 9]}
cfg = {"threshold": 4, "flag_good": 1, "flag_bad": 4}
y = RateOfChange(profile, "TEMP", cfg)
for f in features:
assert np.allclose(y.features[f], features[f], equal_nan=True)
for f in flags:
assert
|
np.allclose(y.flags[f], flags[f], equal_nan=True)
|
numpy.allclose
|
'''
To do:
test for endianness by checking for sane values, store in flag
Use:
data = SU("/path/to/inputFile.su")
data.read("/path/to/raw.npy")
'''
import numpy as np, sys, os
import mmap
from .headers import su_header_dtype
import pprint
def memory():
"""
Get node total memory and memory usage
"""
with open('/proc/meminfo', 'r') as mem:
ret = {}
tmp = 0
for i in mem:
sline = i.split()
if str(sline[0]) == 'MemTotal:':
ret['total'] = int(sline[1])
elif str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
tmp += int(sline[1])
ret['free'] = tmp
ret['used'] = int(ret['total']) - int(ret['free'])
return ret
class SU(object):
'''
reading and writing SU files, including those larger than RAM,
to and from .npy files
'''
def __init__(self, _file):
self.params = {}
self.params['byteswap'] = False
self._file = self.params['filename'] = _file
self.readNS()
self.calculateChunks()
self.report()
def readNS(self):
raw = open(self._file, 'rb').read(240)
self.ns = ns1 = np.fromstring(raw, dtype=su_header_dtype, count=1)['ns'][0]
ns2 =
|
np.fromstring(raw, dtype=su_header_dtype, count=1)
|
numpy.fromstring
|
from T2GEORES import geometry as geometry
import numpy as np
import re as re
import subprocess
import datetime
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import shutil
import os
import itertools
import json
import shapefile
import pylab as plb
import math
import sys
from scipy.spatial import ConvexHull
from scipy.interpolate import griddata
from scipy.spatial.distance import cdist
from scipy.spatial import cKDTree
import pandas as pd
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import pyvista as pv
import vtk
import sqlite3
import geopandas as gpd
import string
from lloydRelax import Field
class py2amesh:
"""It creates a mesh based on well positions and irregular blocks
The main characteristics are:
-It generates the mesh based on defined boundaries
-The inner section is called wellfield and can contain elements with hexagonal or square shape
-The wellfield can be delimited by a shapefile or by fixed squared boundaries.
-It allows to generates elements along a line to represent structures.
-The voronoi elements are generated by AMESH.
-The elements name cannot contain two consecutive zeros.
-The well blocks are the first on the list
-It creates the input files to work on Steinar (RockEditor)
-It can export a defined layer on a shapefile format
-It can plot a selected layer (It is recommended to use the function plot_voronoi() to plot)
-Two json file are generated with the correlative block for each well, from which can be track during all the modelling steps.
Parameters
----------
filename : str
File name with well feedzone location
filepath : str
Path of input files
Xmin : float
Minimun X coordinates for the grid
Xmax : float
Maximun X coordinates for the grid
Ymin : float
Minimun Y coordinates for the grid
Ymax : float
Maximun Y coordinates for the grid
toler : float
AMESH parameter
layers : dictionary
Name (correlative) and thickness of every layer on the model, keyword on input_dictionary
layer_to_plot : int
In case it is specified a voronoi plot will be performed
x_space : float
Horizontal distance between elements for the outerfield
y_space : float
Vertical distance between elements for the outerfield
radius_criteria: float
Minimun distance between well location and a regular element
x_from_boarder: float
Horizontal distance from the first element to the east border
y_from_boarder: float
Vertical distance from the first element to the south border
x_gap_min: float
Minimun X coordinates on the grid for the well field
x_gap_max: float
Maximun X coordinates on the grid for the well field
x_gap_space: float
Horizontal distance between elements for the farfield
y_gap_min: float
Minimun Y coordinates on the grid for the well field o
y_gap_max: float
Maximun X coordinates on the grid for the well field
y_gap_space: float
Vertical distance between elements for the farfield
plot_names: bool
If true it plots the name of the blocks from the selected layer to plot
plot_centers: bool
If true it plots the centers of the blocks from the selected layer to plot
z0_level: float
Reference level (elevation) for all the grid, keyword on input_dictionary
mesh_creation: bool
If true the mesh is created
plot_layer: bool
If true it plots the selected layer
to_steinar: bool
If true it creates the input files for steinar
to_GIS: bool
If true it generates a shapefile of the selected layer
plot_all_GIS: bool
If true it generates a shapefile of all layers
from_leapfrog: bool
lee archivos leapfrong ../mesh/from_leapfrog/LF_geometry.dat y ../mesh/from_leapfrog/LF_t2.dat, sin embargo se pierde la simbologia usada en leapfrog y no te utiliza la malla regular ni los pozos. Solamente se crea la malla utilizando amesh
line_file: str
It defines the path and name of a line that can represented a fault or other structure on the mesh, The input file must contain the header: ID,X,Y on csv format. ID referes to the same
structure, thus, more than one structure can be defined on a single file.
fault_distance: float
In case a line_file is define, some paralels elements will be created at a defined distance
with_polygon: bool
If true a shapefile will be read to define the wellfield.
polygon_shape: str
The shapefile deines the wellfield boundaries. The shape must not contain any cavity
set_inac_from_poly: bool
If true all the elements on the outside of the shapefile are defined as inactive
set_inac_from_inner:bool
If true all the elements on the outerfield are defined as inactive
rotate: bool
If true it rotates the mesh a defined angle
angle: float
Angle in degrees
inner_mesh_type: string
Type of mesh on the inner part of the mesh, it could be 'honeycomb' or 'regular'
Returns
-------
file
eleme: list of blocks from the grid
file
conne : list of connections on the grid
shapefile
mesh_{field}_layer_{layer} : shapefile of a defined (or all) layer including rock distribution
plot
Voronoi plot (in case is specified)
Attention
---------
A copy of AMESH must be on the path or directory
"""
def __init__(self,filename,filepath,Xmin,Xmax,Ymin,Ymax,\
toler,layers,layer_to_plot,x_space,y_space,radius_criteria,\
x_from_boarder,y_from_boarder,\
x_gap_min,x_gap_max,x_gap_space,y_gap_min,y_gap_max,y_gap_space,\
plot_names,plot_centers,z0_level,plot_all_GIS,from_leapfrog,line_file,fault_distance,with_polygon,polygon_shape,set_inac_from_poly,set_inac_from_inner,rotate,angle,inner_mesh_type,\
distance_points,fault_rows,relaxation_times,points_around_well,distance_points_around_well):
self.filename=filename
self.filepath=filepath
self.layers=layers
self.number_of_layer=len(layers)
self.Xmin=Xmin
self.Xmax=Xmax
self.Ymin=Ymin
self.Ymax=Ymax
self.z0_level=z0_level
self.layer_to_plot=layer_to_plot
self.radius_criteria=radius_criteria
self.x_space=x_space
self.y_space=y_space
self.x_from_boarder=x_from_boarder
self.y_from_boarder=y_from_boarder
self.z=0
self.delf_rock="101" #Layer index
self.filename_out="in"
self.toler=toler
self.x_gap_min=x_gap_min
self.x_gap_max=x_gap_max
self.x_gap_space=x_gap_space
self.y_gap_min=y_gap_min
self.y_gap_max=y_gap_max
self.y_gap_space=y_gap_space
self.plot_names=plot_names
self.plot_centers=plot_centers
self.plot_all_GIS=plot_all_GIS
self.from_leapfrog=from_leapfrog
self.line_file=line_file
self.fault_distance=fault_distance
self.with_polygon=with_polygon
self.set_inac_from_poly=set_inac_from_poly
self.set_inac_from_inner=set_inac_from_inner
self.rotate=rotate
self.angle=angle
self.inner_mesh_type=inner_mesh_type
self.polygon_shape=polygon_shape
self.distance_points=distance_points
self.fault_rows=fault_rows
self.relaxation_times=relaxation_times
self.points_around_well=points_around_well
self.distance_points_around_well=distance_points_around_well
if self.with_polygon:
shape = shapefile.Reader(polygon_shape)
#first feature of the shapefile
feature = shape.shapeRecords()[0]
points = feature.shape.__geo_interface__
self.polygon=[]
for n in points:
for v in points[n]:
if n=='coordinates':
self.polygon.append([v[0],v[1]]) # (GeoJSON format)
#Read border to clip write into in files
borders=gpd.read_file('../../GIS/reservoir/reservoir_limits_1_pol.shp')
border_points=[]
for line in borders.iterrows():
pointList = line[1].geometry.exterior.coords.xy
for point in zip(pointList[0],pointList[1]):
border_points.append([point[0],point[1]])
self.polygon_external=border_points[::-1][0:-1]
self.color_dict = {1:[['AA','AB','AC','AD','AE','AF','AG'],'ROCK1','red'],\
2:[['BA','BB','BC','BD','BE','BF','BG'],'ROCK2','white'],\
3:[['CA','CB','CC','CD','CE','CF','CG'],'ROCK3','yellow'],\
4:[['DA','DB','DC','DD','DE','DF','DG'],'ROCK4','blue'],\
5:[['EA','EB','EC','ED','EE','EF','EG'],'ROCK5','green'],\
6:[['FA','FB','FC','FD','FE','FF','FG'],'ROCK6','purple'],\
7:[['GA','GB','GC','GD','GE','GF','GG'],'ROCK7','#ff69b4'],\
8:[['HA','HB','HC','HD','HE','HF','HG'],'ROCK8','darkorange'],\
9:[['IA','IB','IC','ID','IE','IF','IG'],'ROCK9','cyan'],\
10:[['JA','JB','JC','JD','JE','JF','JG'],'ROK10','magenta'],\
11:[['KA','KB','KC','KD','KE','KF','KG'],'ROK11','#faebd7'],\
12:[['LA','LB','LC','LD','LE','LF','LG'],'ROK12','#2e8b57'],\
13:[['MA','MB','MC','MD','ME','MF','MG'],'ROK13','#eeefff'],\
14:[['NA','NB','NC','ND','NE','NF','NG'],'ROK14','#da70d6'],\
15:[['OA','OB','OC','OD','OE','OF','OG'],'ROK15','#ff7f50'],\
16:[['PA','PB','PC','PD','PE','PF','PG'],'ROK16','#cd853f'],\
17:[['QA','QB','QC','QD','QE','QF','QG'],'ROK17','#bc8f8f'],\
18:[['RA','RB','RC','RD','RE','RF','RG'],'ROK18','#5f9ea0'],\
19:[['SA','SB','SC','SD','SE','SF','SG'],'ROK19','#daa520'],
20:[['TA','TB','SC','SD','SE','SF','SG'],'ROK20','#daa520'],
21:[['UA','UB','UC','UD','UE','UF','UG'],'ROK21','#daa520'],
22:[['VA','VB','SC','VD','VE','VF','VG'],'ROK22','#daa520'],
23:[['WA','WB','SC','WD','WE','WF','WG'],'ROK23','#daa520'],
24:[['XA','XB','SC','XD','XE','XF','XG'],'ROK19','#daa520'],
25:[['YA','YB','SC','YD','YE','YF','YG'],'ROK19','#daa520'],
26:[['ZA','ZB','SC','ZD','ZE','ZF','ZG'],'ROK19','#daa520']}
self.rock_dict={}
prof_cont=0
for jk in range(1,len(layers)+1):
if jk==1:
prof_cont=layers[jk-1]*0.5
z_real=z0_level-prof_cont
elif jk>1:
prof_cont=prof_cont+layers[jk-1]*0.5+layers[jk-2]*0.5
z_real=z0_level-prof_cont
self.rock_dict[jk]=[self.color_dict[jk][0][0],self.color_dict[jk][1],\
self.color_dict[jk][2],self.color_dict[jk][0][0],z_real,self.layers[jk-1]]
def regular_mesh(self):
"""Genera malla regular en en toda la extension de la region definida por Xmin,Xmax,Ymin y Ymax
"""
x_regular=range(self.Xmin+self.x_from_boarder,self.Xmax+self.x_space-self.x_from_boarder,self.x_space)
y_regular=range(self.Ymin+self.y_from_boarder,self.Ymax+self.y_space-self.y_from_boarder,self.y_space)
x_regular_small=range(self.x_gap_min,self.x_gap_max+self.x_gap_space,self.x_gap_space)
y_regular_small=range(self.y_gap_min,self.y_gap_max+self.y_gap_space,self.y_gap_space)
self.mesh_array=[]
for nx in x_regular:
for ny in y_regular:
if ((nx<self.x_gap_min) or (nx>self.x_gap_max)) or ((ny<self.y_gap_min) or (ny>self.y_gap_max)):
self.mesh_array.append([nx,ny])
#Small polygon area must be here
for nxx in x_regular_small:
cnt=0
for nyy in y_regular_small:
if [nxx,nyy] not in self.mesh_array:
if self.inner_mesh_type=='honeycomb':
if cnt%2==0:
self.mesh_array.append([nxx,nyy])
else:
self.mesh_array.append([nxx+self.x_gap_space/2,nyy])
elif self.inner_mesh_type=='regular':
self.mesh_array.append([nxx,nyy])
cnt+=1
if self.rotate:
angle=self.angle
for pair in range(len(self.mesh_array)):
x1=self.mesh_array[pair][0]-self.Xmin
y1=self.mesh_array[pair][1]-self.Ymin
self.mesh_array[pair][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.Xmin
self.mesh_array[pair][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.Ymin
return np.array(self.mesh_array)
def check_in_out(self,position,point,source):
"""Verifica si un punto de la malla del campo cercano esta dentro o fuera del poligo definido por el shapefile de entrada o del campo cercano
"""
if position=='internal':
polygon=self.polygon
elif position=='external':
polygon=self.polygon_external
boolean=False
if source=='shapefile':
cnt=0
for n in range(len(polygon)):
if n+1!=len(polygon):
m,b=plb.polyfit([polygon[n][0],polygon[n+1][0]],[polygon[n][1],polygon[n+1][1]],1)
val_range=[polygon[n][1],polygon[n+1][1]]
elif n+1==len(polygon):
m,b=plb.polyfit([polygon[-1][0],polygon[0][0]],[polygon[-1][1],polygon[0][1]],1)
val_range=[polygon[-1][1],polygon[0][1]]
x=(point[1]-b)/m
if point[0]<x and min(val_range)<point[1] and point[1]<max(val_range):
cnt+=1
if cnt==1:
boolean=True
elif source=='inner':
Xarray_inner=np.array([self.x_gap_min,self.x_gap_max+self.x_gap_space])
Yarray_inner=np.array([self.y_gap_min,self.y_gap_max+self.y_gap_space])
if Xarray_inner[0]<point[0] and Xarray_inner[1]>point[0] and Yarray_inner[0]<point[1] and Yarray_inner[1]>point[1]:
boolean=True
return boolean
def reg_pol_mesh(self):
"""Crea malla regular cuando existe un poligono de entrada
"""
#x_regular=np.arange(self.Xmin+self.x_from_boarder,self.Xmax+self.x_space-self.x_from_boarder,self.x_space)
#y_regular=np.arange(self.Ymin+self.y_from_boarder,self.Ymax+self.y_space-self.y_from_boarder,self.y_space)
nx=40 #number of elements in one direction
n_times=4
ny=int((self.Ymax+2*n_times*self.x_from_boarder-self.Ymin)*nx/(self.Xmax-self.Xmin+n_times*2*self.x_from_boarder))
x_regular=np.linspace(self.Xmin-n_times*self.x_from_boarder,self.Xmax+n_times*self.x_from_boarder,nx,endpoint=True) #generation of regular grid on X
y_regular=np.linspace(self.Ymin-n_times*self.y_from_boarder,self.Ymax+n_times*self.x_from_boarder,ny,endpoint=True) #generation of regular grid on Y
self.mesh_array=[]
for nx in x_regular:
for ny in y_regular:
self.mesh_array.append([nx,ny])
if self.rotate:
angle=self.angle
for pair in range(len(self.mesh_array)):
x1=self.mesh_array[pair][0]-(self.Xmin-n_times*self.x_from_boarder)
y1=self.mesh_array[pair][1]-(self.Ymin-n_times*self.x_from_boarder)
self.mesh_array[pair][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.Xmin-n_times*self.x_from_boarder
self.mesh_array[pair][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.Ymin-n_times*self.x_from_boarder
x_pol=[]
y_pol=[]
for n in range(len(self.polygon)):
x_pol.append(int(self.polygon[n][0]))
y_pol.append(int(self.polygon[n][1]))
x_pol.append(int(self.polygon[0][0]))
y_pol.append(int(self.polygon[0][1]))
x_gap_min=self.x_gap_min#min(x_pol)
x_gap_max=self.x_gap_max#max(x_pol)
y_gap_min=self.y_gap_min#min(y_pol)
y_gap_max=self.y_gap_max#max(y_pol)
small_mesh=[]
#x_regular_small=np.arange(x_gap_min,x_gap_max+self.x_gap_space,self.x_gap_space)
#y_regular_small=np.arange(y_gap_min,y_gap_max+self.y_gap_space,self.y_gap_space)
x_regular_small=np.arange(x_gap_min,x_gap_max,self.x_gap_space)
y_regular_small=np.arange(y_gap_min,y_gap_max,self.y_gap_space)
for nxx in x_regular_small:
cnt=0
for nyy in y_regular_small:
if [nxx,nyy] not in small_mesh:
if self.inner_mesh_type=='honeycomb':
if cnt%2==0:
small_mesh.append([nxx,nyy])
else:
small_mesh.append([nxx+self.x_gap_space/2,nyy])
elif self.inner_mesh_type=='regular':
small_mesh.append([nxx,nyy])
cnt+=1
if self.rotate:
angle=self.angle
for pair in range(len(small_mesh)):
x1=small_mesh[pair][0]-self.x_gap_min
y1=small_mesh[pair][1]-self.y_gap_min
small_mesh[pair][0]=x1*math.cos(math.pi*angle/180)-y1*math.sin(math.pi*angle/180)+self.x_gap_min
small_mesh[pair][1]=x1*math.sin(math.pi*angle/180)+y1*math.cos(math.pi*angle/180)+self.y_gap_min
to_delete=[]
for v in range(len(self.mesh_array)):
point=[self.mesh_array[v][0],self.mesh_array[v][1]]
check=self.check_in_out('internal',point,source='shapefile')
if check:
to_delete.append(v)
self.mesh_array=np.delete(self.mesh_array, to_delete, 0)
to_delete=[]
for v in range(len(small_mesh)):
point=[small_mesh[v][0],small_mesh[v][1]]
check=self.check_in_out('internal',point,source='shapefile')
if not check:
to_delete.append(v)
small_mesh=np.delete(small_mesh, to_delete, 0)
mesh_pol=[]
for vk in range(len(self.mesh_array)):
mesh_pol.append([self.mesh_array[vk][0],self.mesh_array[vk][1]])
for vk in range(len(small_mesh)):
mesh_pol.append([small_mesh[vk][0],small_mesh[vk][1]])
return np.array(mesh_pol)
def radius_select(self,x0,y0,xr,yr,type_i='mesh'):
"""Verifica si dos puntos estan mas cerca que el criterio seleccionado
"""
r=((x0-xr)**2+(y0-yr)**2)**0.5
if type_i=='mesh':
cx=self.radius_criteria
elif type_i=='well':
cx=10*0.4*2**0.5
elif type_i=='fault':
cx=15
if r<cx:
boolean=1
else:
boolean=0
return boolean
def from_leapfrog_mesh(self):
"""Extrae puntos mas extremos y la posicion de los elementos de un set de datos provinientes de leapfrog, sin embargo no considera los elementos asociados a la roca ATM 0
"""
geometry_file="../mesh/from_leapfrog/LF_geometry.dat"
leapfrog_t2_file="../mesh/from_leapfrog/LF_t2.dat"
#Creates a dictionary using the layers
printlayer=False
layer_min=[]
layers={}
with open(geometry_file,'r') as f:
for line in f.readlines():
if line.rstrip()=='LAYERS':
printlayer=True
continue
elif line.rstrip()=='SURFA' or line.rstrip()=='':
printlayer=False
if printlayer:
layer=line.rstrip()[0:2]
if layer==' 0':
layer_min.append(line.rstrip()[2:13])
layer_middle=line.rstrip()[13:23]
layer_max=line.rstrip()[13:23]
continue
else:
layer_max=layer_min[-1]
layer_min.append(line.rstrip()[2:13])
layer_middle=line.rstrip()[13:23]
layers[int(layer)]=[float(layer_max),float(layer_middle),float(layer_min[-1])]
max_layer=max(layers.keys())
xc=[]
yc=[]
self.LP_mesh=[]
printeleme=False
#Takes the elements at the selected leyer
with open(leapfrog_t2_file,'r') as f:
for line in f.readlines():
if line.rstrip()=='ELEME':
printeleme=True
continue
elif line.rstrip()=='CONNE' or line.rstrip()=='':
printeleme=False
if printeleme and line.rstrip()[0:5]!="ATM 0" and int(line.rstrip()[3:5])==max_layer:
xc=float(line.rstrip()[51:60])
yc=float(line.rstrip()[60:70])
self.LP_mesh.append([xc,yc])
#Creates a dictionary of the VERTICES
printvertices=False
self.x_min=1E20
self.x_max=0
self.y_min=1E20
self.y_max=0
with open(geometry_file,'r') as f:
for line in f.readlines():
#It will read between the keywords GRID and CONNECTIONS
if line.rstrip()=='VERTICES':
printvertices=True
continue
elif line.rstrip()=='GRID' or line.rstrip()=="":
printvertices=False
if printvertices:
vertice_x=float(line.rstrip()[4:13])
vertice_y=float(line.rstrip()[13:23])
if vertice_x>self.x_max:
self.x_max=vertice_x
if vertice_y>self.y_max:
self.y_max=vertice_y
if vertice_x<self.x_min:
self.x_min=vertice_x
if vertice_y<self.y_min:
self.y_min=vertice_y
return self.x_max,self.x_min, self.y_min,self.y_max, self.LP_mesh
def data(self):
"""Define los puntos que ingresaran al archivo de entrada para amesh. Adicionalmente, en caso de definir un linea en el archivo de entrada <i><lines_data/i> se procedera a ingresar estos puntos y crear puntos paralelos en ambos extremos de la linea
"""
self.raw_data=np.genfromtxt(self.filepath+self.filename,dtype={'names':('ID','MD','X','Y','Z','TYPE'),'formats':('<U7','f4','f4','f4','f4','<U10')},delimiter=',',skip_header=True)
self.IDXY={}
if not self.from_leapfrog:
if self.with_polygon:
regular_mesh=self.reg_pol_mesh()
else:
regular_mesh=self.regular_mesh()
else:
x,x1,y1,y2,regular_mesh=self.from_leapfrog_mesh()
for n in range(len(self.raw_data['ID'])):
#Store the data from wells
self.IDXY["%s"%(str(self.raw_data['ID'][n]))]=[self.raw_data['X'][n],self.raw_data['Y'][n],self.raw_data['TYPE'][n]]
#self.IDXY["%s"%(str(self.raw_data['ID'][n])).split("'")[1]]=[self.raw_data['X'][n],self.raw_data['Y'][n],self.raw_data['TYPE'][n]]
to_delete=[]
x0=self.raw_data['X'][n]
y0=self.raw_data['Y'][n]
#Delete the regular points close to the wells
for ngrid in range(len(regular_mesh)):
if abs(x0-regular_mesh[ngrid][0])<self.radius_criteria or abs(y0-regular_mesh[ngrid][1])<self.radius_criteria:
boolean=self.radius_select(x0,y0,regular_mesh[ngrid][0],regular_mesh[ngrid][1])
if boolean==1:
to_delete.append(ngrid)
regular_mesh=
|
np.delete(regular_mesh, to_delete, 0)
|
numpy.delete
|
"""
I/O for VTU.
<https://vtk.org/Wiki/VTK_XML_Formats>
<https://vtk.org/wp-content/uploads/2015/04/file-formats.pdf>
"""
import base64
import re
import sys
import zlib
import numpy as np
from ..__about__ import __version__
from .._common import info, join_strings, raw_from_cell_data, replace_space, warn
from .._exceptions import CorruptionError, ReadError
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
from .._vtk_common import meshio_to_vtk_order, meshio_to_vtk_type, vtk_cells_from_data
# Paraview 5.8.1's built-in Python doesn't have lzma.
try:
import lzma
except ModuleNotFoundError:
lzma = None
def num_bytes_to_num_base64_chars(num_bytes):
# Rounding up in integer division works by double negation since Python
# always rounds down.
return -(-num_bytes // 3) * 4
def _polyhedron_cells_from_data(offsets, faces, faceoffsets, cell_data_raw):
# In general the number of faces will vary between cells, and the
# number of nodes vary between faces for each cell. The information
# will be stored as a List (one item per cell) of lists (one item
# per face of the cell) of np-arrays of node indices.
cells = {}
cell_data = {}
# The data format for face-cells is:
# num_faces_cell_0,
# num_nodes_face_0, node_ind_0, node_ind_1, ..
# num_nodes_face_1, node_ind_0, node_ind_1, ..
# ...
# num_faces_cell_1,
# ...
# See https://vtk.org/Wiki/VTK/Polyhedron_Support for more.
# The faceoffsets describes the end of the face description for each
# cell. Switch faceoffsets to give start points, not end points
faceoffsets = np.append([0], faceoffsets[:-1])
# Double loop over cells then faces.
# This will be slow, but seems necessary to cover all cases
for cell_start in faceoffsets:
num_faces_this_cell = faces[cell_start]
faces_this_cell = []
next_face = cell_start + 1
for _ in range(num_faces_this_cell):
num_nodes_this_face = faces[next_face]
faces_this_cell.append(
np.array(
faces[next_face + 1 : (next_face + num_nodes_this_face + 1)],
dtype=int,
)
)
# Increase by number of nodes just read, plus the item giving
# number of nodes per face
next_face += num_nodes_this_face + 1
# Done with this cell
# Find number of nodes for this cell
num_nodes_this_cell = np.unique(
|
np.hstack([v for v in faces_this_cell])
|
numpy.hstack
|
import numpy as np
import pytest
from ..interpolate import (
interpolate_image_and_noise,
copy_masked_edges_image_and_noise,
interpolate_image_at_mask,
)
def test_interpolate_image_at_mask():
# linear image interp should be perfect for regions smaller than the
# patches used for interpolation
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
bmask = np.zeros_like(image, dtype=bool)
bmask[30:35, 40:45] = True
# put nans here to make sure interp is done ok
image[bmask] = np.nan
iimage = interpolate_image_at_mask(
image=image,
bad_msk=bmask,
)
assert np.allclose(iimage, 10 + x*5)
def test_interpolate_image_at_mask_allbad():
# linear image interp should be perfect for regions smaller than the
# patches used for interpolation
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
bmask = np.zeros_like(image, dtype=bool)
bmask[:, :] = True
iimage = interpolate_image_at_mask(
image=image,
bad_msk=bmask,
)
assert iimage is None
def test_interpolate_image_and_noise_weight():
# linear image interp should be perfect for regions smaller than the
# patches used for interpolation
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
weight = np.ones_like(image)
bmask = np.zeros_like(image, dtype=np.int32)
bad_flags = 0
weight[30:35, 40:45] = 0.0
# put nans here to make sure interp is done ok
msk = weight <= 0
image[msk] = np.nan
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
iimage, inoises = interpolate_image_and_noise(
image=image,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=noises)
assert np.allclose(iimage, 10 + x*5)
# make sure noise field was inteprolated
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
for noise, inoise in zip(noises, inoises):
assert not np.allclose(noise[msk], inoise[msk])
assert np.allclose(noise[~msk], inoise[~msk])
def test_interpolate_image_and_noise_weight_fill():
# linear image interp should be perfect for regions smaller than the
# patches used for interpolation
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
weight = np.ones_like(image)
bmask = np.zeros_like(image, dtype=np.int32)
bad_flags = 0
weight[30:50, 40:60] = 0.0
# put nans here to make sure interp is done ok
msk = weight <= 0
image[msk] = np.nan
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
rng = np.random.RandomState(seed=42)
iimage, inoises = interpolate_image_and_noise(
image=image,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=noises,
rng=rng,
fill_isolated_with_noise=True,
)
assert not np.allclose(iimage, 10 + x*5)
assert np.allclose(np.mean(iimage[35:45, 45:55]), 0, rtol=0, atol=0.2)
# make sure noise field was inteprolated
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
for noise, inoise in zip(noises, inoises):
assert not np.allclose(noise[msk], inoise[msk])
assert np.allclose(noise[~msk], inoise[~msk])
def test_interpolate_image_and_noise_bmask():
# linear image interp should be perfect for regions smaller than the
# patches used for interpolation
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
weight = np.ones_like(image)
bmask = np.zeros_like(image, dtype=np.int32)
bad_flags = 1
rng = np.random.RandomState(seed=42)
bmask[30:35, 40:45] = 1
bmask[:, 0] = 2
bmask[:, -1] = 4
# put nans here to make sure interp is done ok
msk = (bmask & bad_flags) != 0
image[msk] = np.nan
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
iimage, inoises = interpolate_image_and_noise(
image=image,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=noises)
assert np.allclose(iimage, 10 + x*5)
# make sure noise field was inteprolated
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
for noise, inoise in zip(noises, inoises):
assert not np.allclose(noise[msk], inoise[msk])
assert np.allclose(noise[~msk], inoise[~msk])
def test_interpolate_image_and_noise_big_missing():
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
weight = np.ones_like(image)
bmask = np.zeros_like(image, dtype=np.int32)
bad_flags = 1
rng = np.random.RandomState(seed=42)
nse = rng.normal(size=image.shape)
bmask[15:80, 15:80] = 1
# put nans here to make sure interp is done ok
msk = (bmask & bad_flags) != 0
image[msk] = np.nan
iimage, inoises = interpolate_image_and_noise(
image=image,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=[nse])
# interp will be waaay off but shpuld have happened
assert np.all(np.isfinite(iimage))
# make sure noise field was inteprolated
rng = np.random.RandomState(seed=42)
noise = rng.normal(size=image.shape)
assert not np.allclose(noise[msk], inoises[0][msk])
assert np.allclose(noise[~msk], inoises[0][~msk])
def test_interpolate_image_and_noise_allbad():
# linear image interp should be perfect for regions smaller than the
# patches used for interpolation
y, x = np.mgrid[0:100, 0:100]
image = (10 + x*5).astype(np.float32)
weight = np.ones_like(image)
bmask = np.zeros_like(image, dtype=np.int32)
bad_flags = 1
rng = np.random.RandomState(seed=42)
bmask[:, :] = 1
# put nans here to make sure interp is done ok
msk = (bmask & bad_flags) != 0
image[msk] = np.nan
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
iimage, inoises = interpolate_image_and_noise(
image=image,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=noises)
assert iimage is None
assert inoises is None
def test_interpolate_gauss_image(show=False):
"""
test that our interpolation works decently for a linear
piece missing from a gaussian image
"""
rng = np.random.RandomState(seed=31415)
noise = 0.001
sigma = 4.0
is2 = 1.0/sigma**2
dims = 51, 51
cen = (np.array(dims)-1.0)/2.0
rows, cols = np.mgrid[
0:dims[0],
0:dims[1],
]
rows = rows - cen[0]
cols = cols - cen[1]
image_unmasked = np.exp(-0.5*(rows**2 + cols**2)*is2)
weight = image_unmasked*0 + 1.0/noise**2
noise_image = rng.normal(scale=noise, size=image_unmasked.shape)
badcol = int(cen[1]-3)
bw = 3
rr = badcol-bw, badcol+bw+1
weight[rr[0]:rr[1], badcol] = 0.0
image_masked = image_unmasked.copy()
image_masked[rr[0]:rr[1], badcol] = 0.0
bmask = np.zeros_like(image_unmasked, dtype=np.int32)
bad_flags = 0
iimage, inoises = interpolate_image_and_noise(
image=image_masked,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=[noise_image],
)
maxdiff = np.abs(image_unmasked-iimage).max()
if show:
import images
images.view_mosaic([image_masked, weight])
images.compare_images(
image_unmasked,
iimage,
width=2000,
height=int(2000*2/3),
)
print('max diff:', maxdiff)
assert maxdiff < 0.0025
@pytest.mark.parametrize("kind", ["x", "y"])
@pytest.mark.parametrize("i,ii", [(0, 1), (-1, -2)])
def test_copy_masked_edges_image_and_noise_weight(i, ii, kind):
rng = np.random.RandomState(seed=56)
image = rng.uniform(size=(100, 100))
weight = np.ones_like(image)
bmask = np.zeros_like(image, dtype=np.int32)
bad_flags = 0
if kind == "x":
weight[:, i] = 0
else:
weight[i, :] = 0
# put nans here to make sure interp is done ok
msk = weight <= 0
image[msk] = np.nan
rng = np.random.RandomState(seed=42)
noises = [
rng.normal(size=image.shape),
rng.normal(size=image.shape),
rng.normal(size=image.shape),
]
iimage, inoises, ibmask, iweight = copy_masked_edges_image_and_noise(
image=image,
weight=weight,
bmask=bmask,
bad_flags=bad_flags,
noises=noises)
if kind == "x":
assert np.allclose(iimage[:, i], image[:, ii])
assert np.allclose(ibmask[:, i], bmask[:, ii])
assert np.allclose(iweight[:, i], weight[:, ii])
for noise, inoise in zip(noises, inoises):
assert np.allclose(inoise[:, i], noise[:, ii])
else:
assert np.allclose(iimage[i, :], image[ii, :])
assert np.allclose(ibmask[i, :], bmask[ii, :])
assert np.allclose(iweight[i, :], weight[ii, :])
for noise, inoise in zip(noises, inoises):
assert np.allclose(inoise[i, :], noise[ii, :])
@pytest.mark.parametrize("kind", ["x", "y"])
@pytest.mark.parametrize("i,ii", [(0, 1), (-1, -2)])
def test_copy_masked_edges_image_and_noise_bmask(i, ii, kind):
rng =
|
np.random.RandomState(seed=56)
|
numpy.random.RandomState
|
import unittest
import sys
import numpy as np
import os
import warnings
from pyiron.atomistics.structure.atom import Atom
from pyiron.atomistics.structure.atoms import Atoms, CrystalStructure
from pyiron.atomistics.structure.sparse_list import SparseList
from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement
from pyiron.base.generic.hdfio import FileHDFio
class TestAtoms(unittest.TestCase):
@classmethod
def tearDownClass(cls):
if sys.version_info[0] >= 3:
file_location = os.path.dirname(os.path.abspath(__file__))
if os.path.isfile(os.path.join(file_location, "../../static/atomistics/test_hdf")):
os.remove(os.path.join(file_location, "../../static/atomistics/test_hdf"))
@classmethod
def setUpClass(cls):
C = Atom('C').element
cls.C3 = Atoms([C, C, C], positions=[[0, 0, 0], [0, 0, 2], [0, 2, 0]])
cls.C2 = Atoms(2 * [Atom('C')])
def setUp(self):
# These atoms are reset before every test.
self.CO2 = Atoms("CO2", positions=[[0, 0, 0], [0, 0, 1.5], [0, 1.5, 0]])
def test__init__(self):
pos, cell = generate_fcc_lattice()
pse = PeriodicTable()
el = pse.element("Al")
basis = Atoms()
self.assertIsInstance(basis, Atoms)
self.assertIsInstance(basis.info, dict)
self.assertIsInstance(basis.arrays, dict)
self.assertIsInstance(basis.adsorbate_info, dict)
self.assertIsInstance(basis.units, dict)
self.assertIsInstance(basis.pbc, (bool, list, np.ndarray))
self.assertIsInstance(basis.indices, np.ndarray)
self.assertIsNone(basis.positions)
self.assertIsInstance(basis.species, list)
self.assertIsInstance(basis.elements, np.ndarray)
self.assertIsNone(basis.cell)
basis = Atoms(symbols='Al', positions=pos, cell=cell)
self.assertIsInstance(basis, Atoms)
self.assertEqual(basis.get_spacegroup()["Number"], 225)
basis = Atoms(elements='Al', positions=pos, cell=cell)
self.assertIsInstance(basis, Atoms)
basis = Atoms(elements=['Al'], positions=pos, cell=cell)
self.assertIsInstance(basis, Atoms)
self.assertRaises(ValueError, Atoms, symbols="Pt", elements='Al', positions=pos, cell=cell)
basis = Atoms(numbers=[13], positions=pos, cell=cell)
self.assertEqual(basis.get_majority_species()['symbol'], "Al")
basis = Atoms(species=[el], indices=[0], positions=pos, cell=cell)
self.assertEqual(basis.get_majority_species()['symbol'], "Al")
self.assertIsInstance(basis, Atoms)
self.assertIsInstance(basis.info, dict)
self.assertIsInstance(basis.arrays, dict)
self.assertIsInstance(basis.adsorbate_info, dict)
self.assertIsInstance(basis.units, dict)
self.assertIsInstance(basis.pbc, (bool, list, np.ndarray))
self.assertIsInstance(basis.indices, np.ndarray)
self.assertIsInstance(basis.species, list)
self.assertIsInstance(basis.cell, np.ndarray)
self.assertIsInstance(basis.positions, np.ndarray)
self.assertIsInstance(basis.get_scaled_positions(), np.ndarray)
self.assertIsInstance(basis.elements, np.ndarray)
def test_set_species(self):
pos, cell = generate_fcc_lattice()
pse = PeriodicTable()
el = pse.element("Pt")
basis = Atoms(symbols='Al', positions=pos, cell=cell)
self.assertEqual(basis.get_chemical_formula(), "Al")
basis.set_species([el])
self.assertEqual(basis.get_chemical_formula(), "Pt")
self.assertTrue("Al" not in [sp.Abbreviation] for sp in basis._species_to_index_dict.keys())
self.assertTrue("Pt" in [sp.Abbreviation] for sp in basis._species_to_index_dict.keys())
def test_new_array(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([10, 10, 10])
spins = np.ones(len(basis))
basis.new_array(name="spins", a=spins)
self.assertTrue(np.array_equal(basis.arrays['spins'], spins))
def test_set_array(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([10, 10, 10])
spins = np.ones(len(basis), dtype=float)
basis.set_array(name="spins", a=2*spins, dtype=int)
self.assertTrue(np.array_equal(basis.arrays['spins'], 2 * spins))
def test_get_array(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([10, 10, 10])
spins = np.ones(len(basis), dtype=float)
basis.set_array(name="spins", a=2*spins, dtype=int)
self.assertTrue(np.array_equal(basis.arrays['spins'], 2 * spins))
self.assertTrue(np.array_equal(basis.get_array(name="spins"), 2 * spins))
def test_add_tags(self):
self.CO2.add_tag(test_tag="a")
self.assertIsInstance(self.CO2.test_tag, SparseList)
self.assertEqual(self.CO2.test_tag[0], "a")
self.assertEqual(self.CO2.test_tag[0], self.CO2.test_tag[2])
self.assertIsInstance(self.CO2.test_tag.list(), list)
self.CO2.add_tag(selective_dynamics=[True, True, True])
self.CO2.selective_dynamics[1] = [True, False, True]
self.assertEqual(self.CO2.selective_dynamics[1], [True, False, True])
self.assertIsInstance(self.CO2.selective_dynamics.list(), list)
def test_get_tags(self):
self.CO2.add_tag(test_tag="a")
self.assertIsInstance(self.CO2.test_tag, SparseList)
self.assertIsInstance(self.CO2.get_tags(), type(dict().keys()))
def test_get_pbc(self):
self.assertTrue(np.array_equal(self.CO2.pbc, self.CO2.get_pbc()))
self.assertEqual(len(self.CO2.get_pbc()), 3)
def test_set_pbc(self):
self.CO2.set_pbc(value=[True, True, False])
self.assertTrue(np.array_equal(self.CO2.pbc, self.CO2.get_pbc()))
self.assertTrue(np.array_equal([True, True, False], self.CO2.get_pbc()))
self.CO2.set_pbc(value=False)
self.assertTrue(np.array_equal([False, False, False], self.CO2.get_pbc()))
self.assertTrue(np.array_equal(self.CO2.pbc, self.CO2.get_pbc()))
def test_chemical_element(self):
conv = self.CO2.convert_element('C')
self.assertIsInstance(conv, ChemicalElement)
self.assertIsInstance(self.CO2.convert_element(conv), ChemicalElement)
self.assertIsInstance(self.CO2.convert_element(self.CO2[0]), ChemicalElement)
with self.assertRaises(AssertionError):
self.assertIsInstance(self.CO2.convert_element(self.CO2), ChemicalElement)
self.assertEqual(len(self.CO2.species), 2)
def test_copy(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis_copy = basis.copy()
self.assertEqual(basis, basis_copy)
basis_copy[:] = "Pt"
self.assertNotEqual(basis, basis_copy)
def test_numbers_to_elements(self):
num_list = [1, 12, 13, 6]
self.assertTrue(np.array_equal([el.Abbreviation for el in self.CO2.numbers_to_elements(num_list)],
['H', 'Mg', 'Al', 'C']))
def test_scaled_pos_xyz(self):
basis = Atoms(symbols='AlAl', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
pos_xyz = basis.pos_xyz()
self.assertAlmostEqual(np.linalg.norm(pos_xyz[0]-np.array([0, 1])), 0)
scaled_pos_xyz = basis.scaled_pos_xyz()
self.assertAlmostEqual(np.linalg.norm(pos_xyz[0]-basis.cell[0,0]*scaled_pos_xyz[0]), 0)
def test_to_hdf(self):
if sys.version_info[0] >= 3:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../static/atomistics/test_hdf")
abs_filename = os.path.abspath(filename)
hdf_obj = FileHDFio(abs_filename)
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_repeat([2, 2, 2])
basis.to_hdf(hdf_obj, "test_structure")
self.assertTrue(np.array_equal(hdf_obj["test_structure/positions"], basis.positions))
basis_new = Atoms().from_hdf(hdf_obj, "test_structure")
self.assertEqual(basis, basis_new)
def test_from_hdf(self):
if sys.version_info[0] >= 3:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../static/atomistics/test_hdf")
abs_filename = os.path.abspath(filename)
hdf_obj = FileHDFio(abs_filename)
pos, cell = generate_fcc_lattice()
basis_store = Atoms(symbols='Al', positions=pos, cell=cell)
basis_store.set_repeat([2, 2, 2])
basis_store.to_hdf(hdf_obj, "simple_structure")
basis = Atoms().from_hdf(hdf_obj, group_name="simple_structure")
self.assertEqual(len(basis), 8)
self.assertEqual(basis.get_majority_species()['symbol'], "Al")
self.assertEqual(basis.get_spacegroup()['Number'], 225)
def test_create_Fe_bcc(self):
self.pse = PeriodicTable()
self.pse.add_element("Fe", "Fe_up", spin="up", pseudo_name='GGA')
self.pse.add_element("Fe", "Fe_down", spin="down", pseudo_name='GGA')
Fe_up = self.pse.element("Fe_up")
Fe_down = self.pse.element("Fe_down")
self.Fe_bcc = Atoms([Fe_up, Fe_down], scaled_positions=[[0, 0, 0], [0.25, 0.25, 0.25]], cell=np.identity(3))
self.Fe_bcc.add_tag("group")
self.Fe_bcc.group[:] = 0
def test_convert_formula(self):
self.assertEqual(self.CO2.convert_formula('C'), ['C'])
self.assertEqual(self.CO2.convert_formula('C3'), ['C', 'C', 'C'])
self.assertEqual(self.CO2.convert_formula('CO2'), ['C', 'O', 'O'])
self.assertEqual(self.CO2.convert_formula('CO2Fe'), ['C', 'O', 'O', 'Fe'])
self.assertEqual(self.CO2.convert_formula('CO2FeF21'), ['C', 'O', 'O', 'Fe', 'F', 'F'])
def test__getitem__(self):
self.assertEqual(self.CO2[0].symbol, 'C')
self.assertEqual(self.C3[2].position.tolist(), [0, 2, 0])
self.assertTrue((self.C3[1:].positions == np.array([[0, 0, 2], [0, 2, 0]])).all())
short_basis = self.CO2[0]
self.assertIsInstance(short_basis, Atom)
short_basis = self.CO2[[0]]
self.assertIsInstance(short_basis, Atoms)
self.assertEqual(short_basis.indices[0], 0)
self.assertEqual(len(short_basis.species), 1)
short_basis = self.CO2[[2]]
self.assertIsInstance(short_basis, Atoms)
self.assertEqual(short_basis.indices[0], 0)
self.assertEqual(len(short_basis.species), 1)
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
basis_O.positions += [0., 0., 0.5]
basis = basis_Mg + basis_O
basis.center_coordinates_in_unit_cell()
basis.set_repeat([3, 3, 3])
mg_indices = basis.select_index("Mg")
o_indices = basis.select_index("O")
basis_new = basis[mg_indices] + basis[o_indices]
self.assertEqual(len(basis_new._tag_list), len(basis[mg_indices]) + len(basis[o_indices]))
self.assertEqual(basis_new.get_spacegroup()["Number"], 225)
def test_positions(self):
self.assertEqual(self.CO2[1:].positions[1:].tolist(), [[0.0, 1.5, 0.0]])
self.CO2.positions[1][0] = 5.
self.assertEqual(self.CO2.positions[1].tolist(), [5.0, 0, 1.5])
def test_set_positions(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell)
basis.set_positions(np.array([[2.5, 2.5, 2.5]]))
self.assertTrue(np.array_equal(basis.positions, [[2.5, 2.5, 2.5]]))
def test_set_scaled_positions(self):
pos, cell = generate_fcc_lattice()
basis = Atoms(symbols='Al', positions=pos, cell=cell, a=4.2)
basis.set_scaled_positions(np.array([[0.5, 0.5, 0.5]]))
self.assertTrue(np.array_equal(basis.get_scaled_positions(), [[0.5, 0.5, 0.5]]))
self.assertTrue(np.array_equal(basis.positions, np.dot([[0.5, 0.5, 0.5]], basis.cell)))
with warnings.catch_warnings(record=True):
basis.scaled_positions = np.array([[0.5, 0.5, 0.5]])
self.assertTrue(np.array_equal(basis.scaled_positions, [[0.5, 0.5, 0.5]]))
def test_cell(self):
CO = Atoms("CO",
positions=[[0, 0, 0], [0, 0, 2]],
cell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
pbc=[True, True, True])
self.assertTrue((CO.get_cell() == np.identity(3)).all())
self.assertTrue((CO.cell == np.identity(3)).all())
CO.cell[2][2] = 10.
self.assertTrue(CO.cell[2, 2] == 10.)
self.assertAlmostEqual(CO.get_volume(), 10)
self.assertAlmostEqual(CO.get_volume(per_atom=True), 0.5*10)
with self.assertRaises(ValueError):
CO.cell = -np.eye(3)
with self.assertRaises(ValueError):
CO.cell = [2,1]
def test_add(self):
COX = self.C2 + Atom("O", position=[0, 0, -2])
COX += Atom("O", position=[0, 0, -4])
COX += COX
n_objects = len(set(COX.get_species_objects()))
n_species = len(set(COX.get_chemical_elements()))
self.assertEqual(n_objects, n_species)
def test_pbc(self):
CO = Atoms("CO",
positions=[[0, 0, 0], [0, 0, 2]],
cell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
pbc=[True, True, True])
self.assertTrue((CO.pbc == np.array([True, True, True])).all())
CO.set_pbc((True, True, False))
def test_get_masses_DOF(self):
self.assertEqual(len(self.CO2.get_masses_dof()), len(self.CO2.positions.flatten()))
def test_get_center_of_mass(self):
basis = Atoms(elements='AlFe', positions=[3*[0.5], 3*[1.5]], cell=2*np.eye(3))
mass = np.array(basis.get_masses())
self.assertAlmostEqual((mass[0]*0.5+mass[1]*1.5)/mass.sum(), basis.get_center_of_mass()[0])
basis.set_repeat(2)
self.assertAlmostEqual((mass[0]*0.5+mass[1]*1.5)/mass.sum()+1, basis.get_center_of_mass()[0])
def test_rotate(self):
unitcell = Atoms(elements='AlFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
basis = unitcell.copy()
basis.rotate(vector=[0, 0, 0.1*np.pi])
self.assertAlmostEqual(np.arccos(basis.positions[1, :2].sum()/2)/np.pi, 0.1)
basis = unitcell.copy()
basis.rotate(vector=[0, 0, 1], angle=0.1*np.pi)
self.assertAlmostEqual(np.arccos(basis.positions[1, :2].sum()/2)/np.pi, 0.1)
basis = unitcell.copy()
center_of_mass = basis.get_center_of_mass()
basis.rotate(vector=[0, 0, 0.1*np.pi], center='com')
self.assertTrue(np.allclose(basis.get_center_of_mass(), center_of_mass))
basis = unitcell.copy()
center_of_positions = basis.positions.mean(axis=0)
basis.rotate(vector=[0, 0, 1], center='cop')
self.assertTrue(np.allclose(center_of_positions, basis.positions.mean(axis=0)))
basis = unitcell.copy()
position = basis.positions[1]
basis.rotate(vector=[0, 0, 1], center='cou')
self.assertTrue(np.allclose(position, basis.positions[1]))
basis = unitcell.copy()
basis.rotate(vector=np.random.random(3), rotate_cell=True)
self.assertAlmostEqual(basis.get_scaled_positions()[1,0], 0.5)
basis = unitcell.copy()
basis.rotate(vector=np.random.random(3), index_list=[0])
self.assertTrue(np.allclose(unitcell.positions.flatten(), basis.positions.flatten()))
def test_rotate_euler(self):
unitcell = Atoms(elements='AlFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
basis = unitcell.copy()
basis.rotate_euler(phi=0.1*np.pi)
self.assertAlmostEqual(np.arccos(basis.positions[1, :2].sum()/2)/np.pi, 0.1)
basis = unitcell.copy()
center_of_mass = basis.get_center_of_mass()
basis.rotate_euler(phi=0.1*np.pi, center='com')
self.assertTrue(np.allclose(basis.get_center_of_mass(), center_of_mass))
basis = unitcell.copy()
center_of_positions = basis.positions.mean(axis=0)
basis.rotate_euler(phi=0.1*np.pi, center='cop')
self.assertTrue(np.allclose(center_of_positions, basis.positions.mean(axis=0)))
basis = unitcell.copy()
position = basis.positions[1]
basis.rotate_euler(phi=0.1*np.pi, center='cou')
self.assertTrue(np.allclose(position, basis.positions[1]))
def test_get_parent_basis(self):
periodic_table = PeriodicTable()
periodic_table.add_element(parent_element="O", new_element="O_up")
O_up = periodic_table.element("O_up")
O_basis = Atoms([O_up], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5]])
O_simple = Atoms(["O"], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5]])
O_parent = O_basis.get_parent_basis()
self.assertNotEqual(O_basis, O_parent)
self.assertEqual(O_simple, O_parent)
self.assertEqual(O_parent[0].symbol, "O")
periodic_table.add_element(parent_element="O", new_element="O_down")
O_down = periodic_table.element("O_down")
O_basis = Atoms([O_up, O_down], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5], [0, 0, 0]])
O_simple = Atoms(["O", "O"], cell=10.0 * np.eye(3), scaled_positions=[[0.5, 0.5, 0.5]])
O_parent = O_basis.get_parent_basis()
self.assertNotEqual(O_basis, O_parent)
self.assertEqual(O_simple, O_parent)
self.assertEqual(O_parent.get_chemical_formula(), "O2")
self.assertEqual(len(O_basis.species), 2)
self.assertEqual(len(O_simple.species), 1)
self.assertEqual(len(O_parent.species), 1)
def test_profiling(self):
num = 1000
C100 = Atoms(num * ["C"], positions=[(0, 0, 0) for _ in range(num)])
self.assertEqual(len(C100), num)
def test_Au(self):
a = 4.05 # Gold lattice constant
b = a / 2.
fcc = Atoms(['Au'],
cell=[(0, b, b), (b, 0, b), (b, b, 0)],
pbc=True)
# print fcc
# print "volume: ", fcc.get_volume()
def test_set_absolute(self):
a = 4.05 # Gold lattice constant
b = a / 2.
positions = np.array([(0.5, 0.4, 0.)])
fcc = Atoms(symbols=['Au'],
scaled_positions=positions,
cell=[(0, b, b), (b, 0, b), (b, b, 0)],
pbc=True)
# fcc.set_absolute()
# print fcc.positions
# fcc.set_relative()
self.assertTrue(np.linalg.norm(fcc.get_scaled_positions() - positions) < 1e-10)
def test_set_relative(self):
lattice = CrystalStructure(element='Al', bravais_basis='fcc', lattice_constants=4)
basis_relative = lattice.copy()
basis_relative.set_relative()
basis_relative.cell[0,0] = 6
basis_absolute = lattice.copy()
basis_absolute.set_absolute()
basis_absolute.cell[0,0] = 6
self.assertAlmostEqual(basis_relative.positions[-1,0]*1.5, basis_absolute.positions[-1,0])
basis = lattice.copy()
self.assertAlmostEqual(basis.get_scaled_positions()[-1,0], basis_relative.get_scaled_positions()[-1,0])
basis.cell[0,0] = 6
self.assertAlmostEqual(basis.positions[-1,0], basis_absolute.positions[-1,0])
basis = lattice.copy()
basis_relative = lattice.copy()
basis_relative.set_relative()
basis.positions[-1,0] = 0.5
basis_relative.positions[-1,0] = 0.5
self.assertAlmostEqual(basis.positions[-1,0], basis_relative.positions[-1,0])
basis.cell = 3*np.ones(3)
self.assertAlmostEqual(basis.get_volume(), 27)
basis.cell = np.append(np.ones(3), 90-np.random.random(3)).flatten()
self.assertLess(basis.get_volume(), 1)
def test_repeat(self):
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
basis_O.set_scaled_positions(basis_O.get_scaled_positions()+[0., 0., 0.5])
basis = basis_Mg + basis_O
basis.center_coordinates_in_unit_cell()
basis.add_tag(selective_dynamics=[True, True, True])
basis.selective_dynamics[basis.select_index("O")] = [False, False, False]
len_before = len(basis)
sel_dyn_before = np.array(basis.selective_dynamics.list())
self.assertTrue(np.alltrue(np.logical_not(np.alltrue(sel_dyn_before[basis.select_index("O")], axis=1))))
self.assertTrue(np.alltrue(np.alltrue(sel_dyn_before[basis.select_index("Mg")], axis=1)))
basis.set_repeat([3, 3, 2])
sel_dyn_after = np.array(basis.selective_dynamics.list())
len_after = len(basis)
self.assertEqual(basis.get_spacegroup()["Number"], 225)
self.assertEqual(len_before * 18, len_after)
self.assertEqual(len(sel_dyn_before) * 18, len(sel_dyn_after))
self.assertTrue(np.alltrue(np.logical_not(np.alltrue(sel_dyn_after[basis.select_index("O")], axis=1))))
self.assertTrue(np.alltrue(np.alltrue(sel_dyn_after[basis.select_index("Mg")], axis=1)))
basis = basis_Mg + basis_O
basis.add_tag(spin=None)
basis.spin[basis.select_index("Mg")] = 1
basis.spin[basis.select_index("O")] = -1
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
basis.set_repeat(2)
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
basis = basis_Mg + basis_O
basis.add_tag(spin=None)
# Indices set as int
Mg_indices = np.array(basis.select_index("Mg"), dtype=int).tolist()
for ind in Mg_indices:
basis.spin[ind] = 1
O_indices = np.array(basis.select_index("O"), dtype=int).tolist()
for ind in O_indices:
basis.spin[ind] = -1
basis.set_repeat(2)
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
# Indices set as numpy.int
Mg_indices = np.array(basis.select_index("Mg"), dtype=np.int)
for ind in Mg_indices:
basis.spin[ind] = 1
O_indices = np.array(basis.select_index("O"), dtype=np.int)
for ind in O_indices:
basis.spin[ind] = -1
basis.set_repeat(2)
self.assertTrue(np.array_equal(basis.spin[basis.select_index("Mg")].list(), 1 *
np.ones(len(basis.select_index("Mg")))))
self.assertTrue(np.array_equal(basis.spin[basis.select_index("O")].list(), -1 *
np.ones(len(basis.select_index("O")))))
def test_boundary(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
NaCl.set_repeat([3, 3, 3])
# NaCl.plot3d()
NaCl_bound = NaCl.get_boundary_region(0.2)
# NaCl_bound.plot3d()
def test_get_distance(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
self.assertAlmostEqual(NaCl.get_distance(0, 1), 2.2*0.5*np.sqrt(3))
self.assertAlmostEqual(NaCl.get_distance(0, [0, 0, 0.5]), 0.5)
self.assertAlmostEqual(NaCl.get_distance([0, 0, 0], [0, 0, 0.5]), 0.5)
def test_get_neighborhood(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
neigh = basis.get_neighborhood([0, 0, 0.1])
self.assertEqual(neigh.distances[0], 0.1)
def test_get_neighbors(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
# NaCl.repeat([3, 3, 3])
# NaCl.positions = [(1,1,1)]
boundary = NaCl.get_boundary_region(3.5)
extended_cell = NaCl + boundary
# extended_cell.plot3d()
nbr_dict = NaCl.get_neighbors(num_neighbors=12, t_vec=True)
basis = Atoms(symbols='FeFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
neigh = basis.get_neighbors(include_boundary=False)
self.assertAlmostEqual(neigh.distances[0][0], np.sqrt(3))
basis.set_repeat(2)
self.assertAlmostEqual(neigh.distances[0][0], np.sqrt(3))
# print nbr_dict.distances
# print [set(s) for s in nbr_dict.shells]
def test_center_coordinates(self):
cell = 2.2 * np.identity(3)
NaCl = Atoms('NaCl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
NaCl.set_repeat([3, 3, 3])
NaCl.positions += [2.2, 2.2, 2.2]
NaCl.center_coordinates_in_unit_cell(origin=-0.5)
self.assertTrue(-0.5 <= np.min(NaCl.get_scaled_positions()))
self.assertTrue(np.max(NaCl.get_scaled_positions() < 0.5))
NaCl.center_coordinates_in_unit_cell(origin=0.)
self.assertTrue(0 <= np.min(NaCl.positions))
self.assertTrue(np.max(NaCl.get_scaled_positions() < 1))
@unittest.skip("skip ovito because it is not installed in the test environment")
def test_analyse_ovito_cna_adaptive(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
basis.analyse_ovito_cna_adaptive()['CommonNeighborAnalysis.counts.BCC']==2
@unittest.skip("skip ovito because it is not installed in the test environment")
def test_analyse_ovito_centro_symmetry(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
self.assertTrue(all(basis.analyse_ovito_centro_symmetry()==np.array([0.75, 0.75])))
@unittest.skip("skip ovito because it is not installed in the test environment")
def test_analyse_ovito_voronoi_volume(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
self.assertTrue(all(basis.analyse_ovito_centro_symmetry()==np.array([0.5, 0.5])))
@unittest.skip("skip nglview because it is not installed in the test environment")
def test_plot3d(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
view = basis.plot3d()
def test_get_shell_radius(self):
basis = Atoms('FeFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
self.assertAlmostEqual(basis.get_shell_radius(), np.mean(list(basis.get_shells().values())))
def test_group_points_by_symmetry(self):
basis = Atoms('FeFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
self.assertEqual(len(basis.group_points_by_symmetry([3*[0.5], 3*[1.5]])), 1)
self.assertEqual(len(basis.group_points_by_symmetry([3*[0.5], 3*[1.4]])), 2)
def test_get_equivalent_voronoi_vertices(self):
basis = Atoms('FeFe', positions=[3*[0], 3*[1]], cell=2*np.eye(3))
vert = basis.get_equivalent_voronoi_vertices()
self.assertEqual(len(vert), 1)
self.assertGreater(np.min(np.linalg.norm(vert[0]-basis.positions[0], axis=-1)), 0.5)
self.assertGreater(np.min(np.linalg.norm(vert[0]-basis.positions[1], axis=-1)), 0.5)
def test_get_shells(self):
dim = 3
cell = 2.2 * np.identity(dim)
Al_sc = Atoms('AlAl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
Al_sc.set_repeat([3, 3, 3])
self.assertEqual(np.round(Al_sc.get_shells()[2], 6), 2.2)
def test_get_shell_matrix(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
output = basis.get_shell_matrix(shell=1, restraint_matrix=['Fe', 'Fe'])
self.assertIsInstance(output, np.ndarray)
self.assertEqual(np.sum(output), 16)
self.assertTrue(np.all(np.dot(output, output) == np.identity(2)*64))
def test_get_distance_matrix(self):
basis = Atoms('FeFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=np.identity(3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
output = basis.get_distance_matrix()
self.assertIsInstance(output, np.ndarray)
output = np.rint(output*2/np.sqrt(3))
self.assertTrue(np.all(np.dot(output, output)==np.identity(2)))
self.assertEqual(len(w), 1)
def test_cluster_analysis(self):
import random
cell = 2.2 * np.identity(3)
Al_sc = Atoms(elements=['Al', 'Al'], scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
Al_sc.set_repeat([4, 4, 4])
neighbors = Al_sc.get_neighbors(num_neighbors=100, t_vec=False, exclude_self=True)
c_Zn = 0.1
pse = PeriodicTable()
Zn = pse.element("Zn")
random.seed(123456)
for _ in range(1):
Zn_ind = random.sample(range(len(Al_sc)), int(c_Zn * len(Al_sc)))
# for i_Zn in Zn_ind:
# Al_sc.elements[i_Zn] = Zn
cluster = Al_sc.cluster_analysis(Zn_ind, neighbors)
cluster_len = np.sort([len(v) for k, v in cluster.items()])
# print np.histogram(cluster_len), np.sum(cluster_len), len(Zn_ind)
# for key, value in cluster.items():
# el = pse.Element((key % 100) + 1)
# for i_el in value:
# Al_sc.elements[i_el] = el
# Al_sc.plot3d()
def test_get_bonds(self):
dim = 3
cell = 2.62 * np.identity(dim)
d1, d2 = 0.6, 0.6
H2O = Atoms('H2O', scaled_positions=[(d1, d2, 0), (d1, -d2, 0), (0, 0, 0)], cell=cell)
H2O.set_repeat([1, 1, 3])
# H2O.plot3d(show_bonds=True) #, bond_stretch=2)
# print H2O.get_bonds(radius=2.)[0]
# print np.sum(H2O.get_masses())/H2O.get_volume()
def test_get_symmetry(self):
cell = 2.2 * np.identity(3)
Al = Atoms('AlAl', positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell).repeat(2)
self.assertEqual(len(set(Al.get_symmetry()['equivalent_atoms'])), 1)
self.assertEqual(len(Al.get_symmetry()['translations']), 96)
self.assertEqual(len(Al.get_symmetry()['translations']), len(Al.get_symmetry()['rotations']))
def test_get_voronoi_vertices(self):
cell = 2.2 * np.identity(3)
Al = Atoms('AlAl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
pos, box = Al._get_voronoi_vertices()
self.assertEqual(len(pos), 14)
def test_get_parent_symbols(self):
self.assertTrue(np.array_equal(self.CO2.get_parent_symbols(), ["C", "O", "O"]))
self.assertTrue(np.array_equal(self.CO2.get_parent_symbols(), self.CO2.get_chemical_symbols()))
cell = np.eye(3) * 10.0
pse = PeriodicTable()
pse.add_element("O", "O_up", spin="up")
o_up = pse.element("O_up")
basis = Atoms([o_up], scaled_positions=[[0.27, 0.27, 0.27]], cell=cell)
self.assertTrue(np.array_equal(basis.get_parent_symbols(), ["O"]))
self.assertFalse(np.array_equal(basis.get_parent_symbols(), basis.get_chemical_symbols()))
def test_get_chemical_symbols(self):
self.assertTrue(np.array_equal(self.CO2.get_chemical_symbols(), ["C", "O", "O"]))
cell = np.eye(3) * 10.0
pse = PeriodicTable()
pse.add_element("O", "O_up", spin="up")
o_up = pse.element("O_up")
basis = Atoms([o_up], scaled_positions=[[0.27, 0.27, 0.27]], cell=cell)
self.assertTrue(np.array_equal(basis.get_chemical_symbols(), ["O_up"]))
def test_get_symmetry_dataset(self):
cell = 2.2 * np.identity(3)
Al_sc = Atoms('AlAl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
Al_sc.set_repeat([2, 2, 2])
self.assertEqual(Al_sc.get_symmetry_dataset()['number'], 229)
def test_get_space_group(self):
cell = 2.2 * np.identity(3)
Al_sc = Atoms('AlAl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
self.assertEqual(Al_sc.get_spacegroup()['InternationalTableSymbol'], 'Im-3m')
self.assertEqual(Al_sc.get_spacegroup()['Number'], 229)
cell = 4.2 * (0.5 * np.ones((3, 3)) - 0.5 * np.eye(3))
Al_fcc = Atoms('Al', scaled_positions=[(0, 0, 0)], cell=cell)
self.assertEqual(Al_fcc.get_spacegroup()['InternationalTableSymbol'], 'Fm-3m')
self.assertEqual(Al_fcc.get_spacegroup()['Number'], 225)
a = 3.18
c = 1.623 * a
cell = np.eye(3)
cell[0, 0] = a
cell[2, 2] = c
cell[1, 0] = -a/2.
cell[1, 1] = np.sqrt(3) * a / 2.
pos = np.array([[0., 0., 0.], [1./3., 2./3., 1./2.]])
Mg_hcp = Atoms('Mg2', scaled_positions=pos, cell=cell)
self.assertEqual(Mg_hcp.get_spacegroup()['Number'], 194)
cell = np.eye(3)
cell[0, 0] = a
cell[2, 2] = c
cell[1, 1] = np.sqrt(3) * a
pos = np.array([[0., 0., 0.], [0.5, 0.5, 0.], [0.5, 0.16666667, 0.5], [0., 0.66666667, 0.5]])
Mg_hcp = Atoms('Mg4', scaled_positions=pos, cell=cell)
self.assertEqual(Mg_hcp.get_spacegroup()['Number'], 194)
def test_get_primitive_cell(self):
cell = 2.2 * np.identity(3)
Al_sc = Atoms('AlFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
Al_sc.set_repeat([2, 2, 2])
primitive_cell = Al_sc.get_primitive_cell()
self.assertEqual(primitive_cell.get_spacegroup()['Number'], 221)
def test_get_ir_reciprocal_mesh(self):
cell = 2.2 * np.identity(3)
Al_sc = Atoms('AlAl', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
self.assertEqual(len(Al_sc.get_ir_reciprocal_mesh([3, 3, 3])[0]), 27)
def test_get_number_species_atoms(self):
self.assertEqual(list(self.CO2.get_number_species_atoms().values()), [1, 2])
def test_get_chemical_formula(self):
self.assertEqual(self.CO2.get_chemical_formula(), "CO2")
def test_get_equivalent_atoms(self):
cell = 2.2 * np.identity(3)
Al_sc = Atoms('AlFe', scaled_positions=[(0, 0, 0), (0.5, 0.5, 0.5)], cell=cell)
Al_sc.set_repeat([2, 2, 2])
def test_center(self):
old_pos = self.CO2.positions.copy()
self.CO2.center(vacuum=5)
new_array = old_pos + 5 * np.ones(3)
self.assertTrue(np.array_equal(self.CO2.positions, new_array))
def test_get_positions(self):
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
self.assertTrue(np.array_equal(basis_Mg.positions, basis_Mg.get_positions()))
def test_get_scaled_positions(self):
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_Mg.cell += 0.1*np.random.random((3,3))
basis_Mg = basis_Mg.center_coordinates_in_unit_cell()
self.assertTrue(np.allclose(np.dot(np.linalg.inv(basis_Mg.cell).T, basis_Mg.positions.T).T, basis_Mg.get_scaled_positions()))
def test_occupy_lattice(self):
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
basis_O.set_scaled_positions(basis_O.get_scaled_positions()+[0., 0., 0.5])
basis = basis_Mg + basis_O
basis.center_coordinates_in_unit_cell()
orig_basis = basis.copy()
self.assertEqual(basis.get_chemical_formula(), "Mg4O4")
Mg_indices = basis.select_index("Mg")
O_indices = basis.select_index("O")
basis.occupy_lattice(Na=Mg_indices)
self.assertEqual(basis.get_chemical_formula(), "Na4O4")
basis.occupy_lattice(Cl=O_indices)
self.assertEqual(basis.get_chemical_formula(), "Cl4Na4")
self.assertTrue(np.array_equal(basis.select_index("Na"), Mg_indices))
self.assertTrue(np.array_equal(basis.select_index("Cl"), O_indices))
orig_basis.set_repeat([2, 2, 2])
Mg_indices = orig_basis.select_index("Mg")
O_indices = orig_basis.select_index("O")
orig_basis.occupy_lattice(Cl=O_indices, Na=Mg_indices)
self.assertEqual(orig_basis.get_chemical_formula(), "Cl32Na32")
orig_basis.occupy_lattice(H=O_indices[0])
self.assertEqual(orig_basis.get_chemical_formula(), "Cl31HNa32")
def test_get_majority_species(self):
basis = Atoms(symbols=4*['Fe'], positions=np.random.random((4, 3)), cell=np.eye(3))
self.assertEqual(basis.get_majority_species()['count'], 4)
self.assertEqual(basis.get_majority_species()['symbol'], 'Fe')
basis = Atoms(symbols=['Fe', 'Cu', 'Ni', 'Al'], positions=np.random.random((4, 3)), cell=np.eye(3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
basis.get_majority_species()
self.assertEqual(len(w), 1)
def test_select_index(self):
basis = Atoms(symbols=['Fe', 'Cu', 'Ni', 'Al'], positions=np.random.random((4, 3)), cell=np.eye(3))
self.assertTrue(np.array_equal(basis.select_index("Fe"), [0]))
self.assertTrue(np.array_equal(basis.select_index("Ni"), [2]))
self.assertTrue(np.array_equal(basis.select_index(['Cu', 'Al']), [1, 3]))
Fe = basis.convert_element('Fe')
Ni = basis.convert_element('Ni')
self.assertTrue(np.array_equal(basis.select_index([Fe, Ni]), [0, 2]))
pse = PeriodicTable()
pse.add_element("Ni", "Ni_up", spin=1)
ni_up = pse.element("Ni_up")
basis = Atoms(symbols=['Fe', 'Cu', ni_up, 'Al'], positions=np.random.random((4, 3)), cell=np.eye(3))
self.assertTrue(np.array_equal(basis.select_index("Fe"), [0]))
self.assertTrue(np.array_equal(basis.select_index(ni_up), [2]))
self.assertTrue(np.array_equal(basis.select_index(['Cu', 'Al']), [1, 3]))
Fe = basis.convert_element('Fe')
Ni = basis.convert_element(ni_up)
self.assertTrue(np.array_equal(basis.select_index([Fe, Ni]), [0, 2]))
def test_parent_index(self):
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
basis_O.positions += [0., 0., 0.5]
basis = basis_Mg + basis_O
basis.center_coordinates_in_unit_cell()
basis.set_repeat([2, 2, 2])
o_indices = basis.select_index("O")
pse = PeriodicTable()
pse.add_element("O", "O_up", spin="up")
o_up = pse.element("O_up")
basis[o_indices] = o_up
self.assertTrue(np.array_equal(o_indices, basis.select_index(o_up)))
self.assertEqual(len(basis.select_index("O")), 0)
self.assertTrue(np.array_equal(o_indices, basis.select_parent_index("O")))
def test__eq__(self):
test_basis = self.CO2.copy()
self.assertEqual(test_basis, self.CO2)
test_basis.positions[2] += 0.0
self.assertEqual(test_basis, self.CO2)
self.assertNotEqual(self.C2, self.CO2)
def test__add__(self):
cell = np.eye(3) * 10.0
basis_0 = Atoms(["O"], scaled_positions=[[0.5, 0.5, 0.5]], cell=cell)
basis_1 = Atoms(["H"], scaled_positions=[[0.75, 0.75, 0.75]], cell=cell)
basis_2 = Atoms(["H"], scaled_positions=[[0.25, 0.25, 0.25]], cell=cell)
basis_3 = Atoms(["H", "O", "N"], scaled_positions=[[0.35, 0.35, 0.35], [0., 0., 0.], [0., 0., 0.1]], cell=cell)
pse = PeriodicTable()
pse.add_element("O", "O_up", spin="up")
o_up = pse.element("O_up")
basis_4 = Atoms([o_up], scaled_positions=[[0.27, 0.27, 0.27]], cell=np.eye(3) * 20.0)
b = basis_0 + basis_1
self.assertEqual(b.get_chemical_formula(), "HO")
b = basis_0 + basis_1 + basis_2
self.assertEqual(b.get_chemical_formula(), "H2O")
b += basis_2
self.assertEqual(b.get_chemical_formula(), "H3O")
b = basis_0 + basis_1 + basis_2 + basis_3
self.assertEqual(b.get_chemical_formula(), "H3NO2")
self.assertTrue(np.array_equal(b.get_scaled_positions()[b.select_index("N")], [[0., 0., 0.1]]))
self.assertTrue(np.allclose(b.get_scaled_positions()[b.select_index("H")], [[0.75, 0.75, 0.75], [0.25, 0.25, 0.25],
[0.35, 0.35, 0.35]]))
self.assertTrue(np.allclose(b.get_scaled_positions()[b.select_index("O")], [[0.5, 0.5, 0.5], [0., 0., 0.]]))
b.set_repeat([2, 2, 2])
self.assertEqual(b.get_chemical_formula(), "H24N8O16")
b += basis_4
self.assertEqual(b.get_chemical_formula(), "H24N8O16O_up")
self.assertTrue(np.allclose(b.get_scaled_positions()[b.select_index(o_up)], [[0.27, 0.27, 0.27]]))
COX = self.C2 + Atom("O", position=[0, 0, -2])
COX += Atom("O", position=[0, 0, -4])
COX += COX
n_objects = len(set(COX.get_species_objects()))
n_species = len(set(COX.get_chemical_elements()))
self.assertEqual(n_objects, n_species)
self.assertEqual(n_objects, 2)
self.assertEqual(n_species, 2)
basis_Mg = CrystalStructure("Mg", bravais_basis="fcc", lattice_constant=4.2)
basis_O = CrystalStructure("O", bravais_basis="fcc", lattice_constant=4.2)
# basis_O.set_relative()
basis_O.set_scaled_positions([0., 0., 0.5]+basis_O.get_scaled_positions())
basis = basis_Mg + basis_O
self.assertEqual(len(basis._tag_list), len(basis_Mg._tag_list) + len(basis_O._tag_list))
basis.center_coordinates_in_unit_cell()
self.assertEqual(basis.get_spacegroup()["Number"], 225)
def test__delitem__(self):
cell = np.eye(3) * 10.0
basis_0 = Atoms(["O"], scaled_positions=[[0.5, 0.5, 0.5]], cell=cell)
basis_1 = Atoms(["H"], scaled_positions=[[0.75, 0.75, 0.75]], cell=cell)
basis_2 = Atoms(["H"], scaled_positions=[[0.25, 0.25, 0.25]], cell=cell)
basis_3 = Atoms(["H", "O", "N"], scaled_positions=[[0.35, 0.35, 0.35], [0., 0., 0.], [0., 0., 0.1]], cell=cell)
pse = PeriodicTable()
pse.add_element("O", "O_up", spin="up")
o_up = pse.element("O_up")
basis_4 = Atoms([o_up], scaled_positions=[[0.27, 0.27, 0.27]], cell=cell)
b = basis_0 + basis_1 + basis_2 + basis_3 + basis_4
O_indices = b.select_index("O")
self.assertEqual(len(b), 7)
self.assertEqual(len(b.indices), 7)
self.assertEqual(len(b.species), 4)
b.__delitem__(O_indices[0])
self.assertEqual(b.get_chemical_formula(), "H3NOO_up")
self.assertEqual(len(b), 6)
self.assertEqual(len(b.indices), 6)
self.assertEqual(len(b._tag_list), 6)
self.assertEqual(len(b.species), 4)
O_indices = b.select_index("O")
b.__delitem__(O_indices)
self.assertEqual(b.get_chemical_formula(), "H3NO_up")
self.assertEqual(len(b), 5)
self.assertEqual(len(b.indices), 5)
self.assertEqual(len(b.species), 3)
self.assertEqual(np.max(b.indices), 2)
N_indices = b.select_index("N")
b.__delitem__(N_indices)
self.assertEqual(b.get_chemical_formula(), "H3O_up")
self.assertEqual(len(b), 4)
self.assertEqual(len(b.indices), 4)
self.assertEqual(len(b.species), 2)
self.assertEqual(np.max(b.indices), 1)
O_indices = b.select_index(o_up)
b.__delitem__(O_indices)
self.assertEqual(b.get_chemical_formula(), "H3")
self.assertEqual(len(b), 3)
self.assertEqual(len(b.indices), 3)
self.assertEqual(len(b.species), 1)
self.assertEqual(np.max(b.indices), 0)
def test__setitem__(self):
basis = self.CO2.copy()
basis[0] = 'H'
basis[1] = 'H'
self.assertEqual(basis.get_chemical_formula(), "H2O")
self.assertEqual(len(basis.species), 2)
self.assertEqual(len(basis.get_species_symbols()), 2)
basis = self.CO2.copy()
basis[0] = 'H'
basis[
|
np.int64(0)
|
numpy.int64
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provide functions that operate on projections
"""
import itertools
import warnings
import multiprocessing
import tomopy
import numpy as np
import scipy as sp
import concurrent.futures as cf
from typing import Tuple
from scipy.signal import medfilt
from scipy.signal import medfilt2d
from scipy.ndimage import gaussian_filter
from scipy.ndimage import gaussian_filter1d
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from skimage import exposure
from skimage.transform import probabilistic_hough_line
from skimage.feature import canny
from skimage.feature import register_translation
from sklearn.cluster import KMeans
from tifffile import imread
from lmfit.models import GaussianModel
from lmfit.models import LorentzianModel
from tomopy import minus_log
from tomopy import find_center_pc
from tomoproc.util.npmath import rescale_image
from tomoproc.util.peakfitting import fit_sigmoid
from tomoproc.util.npmath import rescale_image
from tomoproc.util.npmath import binded_minus_log
def detect_sample_in_sinogram(
sino: np.ndarray,
kernel_size: int=3,
sigma: int=50,
minimum_distance_to_edge: int=5,
) -> Tuple[int, int]:
"""
Description
-----------
Automatically detect the left and right edge of the sample region
in a sinogram with median and gaussian filtering.
Parameters
----------
sino: np.ndarray
Sinogram for evaluation
kernel_size: int
median filter (quick denoising) kernel size
sigma: int
gaussian filter kernel size
minimum_distance_to_edge: int
minimum amount of pixels to sinogram edge
Returns
-------
(int, int)
left and right edge of the sample region
"""
# use median filter and gaussian filter to locate the sample region
# -- median filter is to counter impulse noise
# -- gaussian filter is for estimating the sample location
prf = np.gradient(
np.sum(
gaussian_filter(
medfilt2d(sino, kernel_size=kernel_size),
sigma=sigma,
),
axis=0,
)
)
return (
max(prf.argmin(), minimum_distance_to_edge),
min(prf.argmax(), sino.shape[1]-minimum_distance_to_edge),
)
def detect_corrupted_proj(
projs: np.ndarray,
omegas: np.ndarray,
threshold: float=0.8,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Description
-----------
Corrupted frames/projections will add a forgy layer (artifact) of random
noise to the final reconstruction. These corrupted frames can be detected
through 180 degree pair-wise checking.
Parameters
----------
projs: np.ndarray
tomography image stack [axis_omega, axis_imgrow, axis_imgcol]
omegas: np.ndarray
angular position vector
threshold: float
Threshold for picking out the outliers
Returns
-------
tuple(np.ndarray, np.ndarray)
idx_BAD idx_GOOD
Return the indices of BAD frames and GOOD frames/projections
"""
# assume equal step, find the index range equals to 180 degree
dn = int(np.pi/abs(omegas[1] - omegas[0]))
# get the cnts from each 180 pairs
# use the faster version instead
with cf.ProcessPoolExecutor() as e:
_jobs = [
e.submit(
tomopy.find_center_pc,
rescale_image(binded_minus_log(projs[nimg,:,:])),
rescale_image(binded_minus_log(projs[nimg+dn,:,:])),
)
for nimg in range(dn)
]
cnts = [me.result() for me in _jobs]
# 180 -> 360
cnts = np.array(cnts + cnts)
# locate outlier
diff = np.absolute(cnts - medfilt(cnts))/cnts
return np.where(diff>threshold)[0], np.where(diff<=threshold)[0]
def guess_slit_box(img: np.ndarray, boost: bool=True) -> dict:
"""
Description
-----------
Auto detect/guess the four blades position (in pixels) for given image
Parameters
----------
img: np.ndarray
2D tomography image with slit box
Returns
-------
dict:
dictionary contains the approximated position (in pixel) for the four
slit blades
NOTE
----
For images without any slit blades, a random (probably useless) region
will be returned.
Relative fast:
tested on MacBookPro13,3
395 ms ± 14 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
if boost:
# Contrast stretching
pl, ph = np.percentile(img, (2, 98))
img = exposure.rescale_intensity(img, in_range=(pl, ph))
# equilize hist
img = exposure.equalize_adapthist(img)
# map to log to reveal transition box
img = np.log(medfilt2d(img.astype(float))+1)
# get row and col profile gradient
pdot_col = np.gradient(gaussian_filter1d(np.average(img, axis=0), sigma=11))
pdot_row = np.gradient(gaussian_filter1d(np.average(img, axis=1), sigma=11))
return {
'left': np.argmax(pdot_col),
'right': np.argmin(pdot_col),
'top': np.argmax(pdot_row),
'bot': np.argmin(pdot_row),
}
def detect_slit_corners(img: np.ndarray, r: float=50) -> list:
"""
Description
-----------
Detect four corners (sub-pixel) formed by the four balde slits commonly
used at 1ID@APS.
Parameters
----------
img: np.ndarray
input images, slit baldes must be visible within the image
r: float
domain size, will be automatically adjusted to avoid out-of-bound
issue
Returns
-------
list[upper_left, lower_left, lower_right, upper_right]
List of the sub-pixel positions of the four corners in the
counter-clock-wise order
NOTE
----
The location of the corner is affected by the size of the domain (r). A
consistent value is recommended for quantitative analysis, such as detector
drift correction.
"""
# guess the rough location first
# by default use boost contrast, if failed, use raw image
img = _safe_read_img(img)
try:
edges = guess_slit_box(img, boost=True)
le,re,te,be = edges['left'], edges['right'], edges['top'], edges['bot']
r_row, r_col = min(r, be-te-1), min(r, re-le-1)
safe_domain = lambda row, col: [(max(row - r_row, 0), min(row + r_row + 1, img.shape[0])),
(max(col - r_col, 0), min(col + r_col + 1, img.shape[1])),
]
cnrs = [(te, le), (be, le), (be, re), (te, re)] # (row, col)
for i, cnr in enumerate(cnrs):
rowrange, colrange = safe_domain(*cnr)
domain = img[rowrange[0]:rowrange[1], colrange[0]:colrange[1]]
horizontal_lp = np.average(domain, axis=0)
vertical_lp = np.average(domain, axis=1)
popt, _ = fit_sigmoid(np.arange(len(vertical_lp)), vertical_lp)
_row = popt[0]
popt, _ = fit_sigmoid(np.arange(len(horizontal_lp)), horizontal_lp)
_col = popt[0]
cnrs[i] = (rowrange[0]+_row, colrange[0]+_col)
except:
print("boost contrast leads to error, use raw image instead")
edges = guess_slit_box(img, boost=False)
le,re,te,be = edges['left'], edges['right'], edges['top'], edges['bot']
r_row, r_col = min(r, be-te-1), min(r, re-le-1)
safe_domain = lambda row, col: [(max(row - r_row, 0), min(row + r_row + 1, img.shape[0])),
(max(col - r_col, 0), min(col + r_col + 1, img.shape[1])),
]
cnrs = [(te, le), (be, le), (be, re), (te, re)] # (row, col)
for i, cnr in enumerate(cnrs):
rowrange, colrange = safe_domain(*cnr)
domain = img[rowrange[0]:rowrange[1], colrange[0]:colrange[1]]
horizontal_lp =
|
np.average(domain, axis=0)
|
numpy.average
|
#!/usr/bin/python
'''
calculates spatial scale of the activity patterns, determines fractures,
the spatial scale of the correlation structure, the eccentricity of the
local correlations, and the dimensionality of the activity patterns.
'''
import numpy as np
import os
import sys
import h5py
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import cv2
from skimage.morphology import skeletonize
from scipy.ndimage import measurements, binary_fill_holes, binary_erosion, binary_dilation,label
from scipy.optimize import curve_fit
from scipy.ndimage.morphology import distance_transform_edt
from scipy.interpolate import interp2d
from analysis.tools import auto_correlation,find_local_maxima,get_point_neighbourhood,\
ellipse_fitting,calc_surrogate_activity_pattern,get_peak_corr_vals,\
dimension_abbott,smooth_map
from network_model.tools import save_activity
def expfct_full(x,tau,a):
return np.exp(-x/tau)*(1.-a)+a
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
sz = 25 #fontsize
lw = 2 #linewidth
fontdict = {'fontsize' : sz}
if __name__=="__main__":
## version to analyse
VERSION = int(sys.argv[1])
file_path = save_activity.global_path + 'data2d/'
image_path = save_activity.global_path + 'image/'
## SCRIPT SETTINGS ##
save_data = True ## save computed data in hdf5 file
frame_no = -1 ## which activity frame to analyse (per event)
## choose which analyses to run
spatial_scale_analysis = True
fracture_analysis = True
elli_analysis = True
long_range_analysis = True
dimensionality_analysis = True
## loading and storing paths
listfiles = os.listdir(file_path)
str_to_search = 'params_new_v{}.0.hdf5'.format(VERSION) # 'corr_matrix'
matched_files = np.array(listfiles)[np.array(['{}'.format(str_to_search) in string for string in listfiles])]
for ifolder in ['activity','envelope','fracture','corr']:
if not os.path.exists(image_path + 'activity_v{}/{}'.format(VERSION,ifolder)):
os.makedirs(image_path + 'activity_v{}/{}'.format(VERSION,ifolder))
'''fracture processing parameters'''
filter_thr = 0
delta = 1
## folder name of generated pdfs
where_to = 'activity_v{}'.format(VERSION)
if save_data:
full_name = file_path + 'fracture_analysis_v{}.hdf5'.format(VERSION)
vals_to_plot = h5py.File(full_name,'a')
for item in matched_files:
network_params = h5py.File(file_path+item,"r")
network_output = h5py.File(file_path+'activity_v{}.0.hdf5'.format(VERSION),"r")
all_keys = np.array(sorted(network_output.keys())).astype(int)
for iidx in all_keys:
try:
iidx = str(iidx)
print('*** {} ***'.format(iidx));sys.stdout.flush()
ecc = network_params[iidx]['ecc'].value
if True:
## activity shape = events x number of frames x N x M
activity = network_output[iidx]['activity'].value
N,M = network_params[iidx]['shape'].value
final_pattern = activity[:,frame_no,:,:]
npatterns = final_pattern.shape[0]
''' correlation over time'''
cc = np.corrcoef(final_pattern.reshape(npatterns,N*M),rowvar=0)
roi = np.sum(np.isfinite(cc),axis=0)>0
roi = roi.reshape(M,N)
bnd,end = 0,N
nid = np.sum(roi)
''' estimate wavelength '''
sigma1 = network_params[iidx]['sigmax'].value
sigma2 = network_params[iidx]['inh_factor'].value*sigma1
wavelength_mh = np.sqrt( 4*np.pi**2/4.*(sigma1**2-sigma2**2)/np.log(sigma1/sigma2) )
wavelength_pxl = 1./np.sqrt( np.sum((1.*network_params[iidx]['kmax'].value/np.array([N,M]))**2) )
''' gradient of corr patterns '''
normed = (cc - np.nanmean(cc,axis=0)[None,:])/np.nanstd(cc,axis=0)[None,:]
normed = normed.reshape(N*M,N,M)
grad_x = 1-np.nanmean(normed*np.roll(normed,1,axis=1),axis=0)
grad_y = 1-np.nanmean(normed*np.roll(normed,1,axis=2),axis=0)
grad = np.sqrt((grad_x)**2 + (grad_y)**2 )
estimate_wavelength = 1.*wavelength_mh
'''estimate spatial scale of activity patterns'''
if (spatial_scale_analysis or save_data):
try:
estimate_wavelength = vals_to_plot['{}/{}'.format(iidx , 'est_wavelength')].value
wavelength_mh = vals_to_plot['{}/{}'.format(iidx , 'wavelength')].value
except:
idcs = np.random.choice(np.arange(npatterns),size=npatterns,replace=False)
autocorr = auto_correlation.get_autocorr(final_pattern[idcs,:,:],max_lag=N//2,method='wiener_khinchin')
rough_patch_size = int(np.nanmax([wavelength_mh//2,7])) #410microns in data
maxima = np.zeros_like(autocorr,dtype=int)
for i,iautocorr in enumerate(autocorr):
maxima[i,:,:] = find_local_maxima.detect_peaks(iautocorr,rough_patch_size,roi=None)
maxima[:,M//2,N//2] = 0
coord_x,coord_y = np.meshgrid(np.arange(-(N//2),N//2+1,1),np.arange(-(M//2),M//2+1,1))
distance_to_center = np.sqrt((1.*coord_x)**2 + (1.*coord_y)**2)
maxima_sum = np.sum(maxima,axis=0)
distances = distance_to_center[maxima_sum>0]
estimate_wavelength = np.nanmin(distances)
ring = (distance_to_center<(estimate_wavelength*1.3))*(distance_to_center>(estimate_wavelength*0.8))
maxima[:,np.logical_not(ring)] = 0
estimate_wavelength = np.nanmean(distance_to_center[np.sum(maxima,axis=0)>0])
if save_data:
try:
vals_to_plot.create_dataset('{}/{}'.format(iidx , 'wavelength'), data=wavelength_mh)
vals_to_plot.create_dataset('{}/{}'.format(iidx , 'est_wavelength'), data=estimate_wavelength)
except:
dset=vals_to_plot['{}'.format(iidx)]
for jkey in ['wavelength','est_wavelength']:
if jkey in dset.keys():
del dset[jkey]
dset['wavelength'] = wavelength_mh
dset['est_wavelength'] = estimate_wavelength
print('***');sys.stdout.flush()
print('Spatial scale Lambda = {:.2f} (unit pixels)'.format(estimate_wavelength))
print('*** Spatial scale analysis done ***');sys.stdout.flush()
''' same processing as experimental data for finding fractures'''
if fracture_analysis:
print('start fracture_analysis')
grad = cv2.medianBlur(grad.astype('float32'), 3)
grad_with_nan = np.copy(grad)
ynan,xnan = np.where(np.logical_not(np.isfinite(grad)))
for iy,ix in zip(ynan,xnan):
grad[iy,ix] = np.nanmean(grad[[(iy-1)%M,iy,(iy+1)%M]*3,[(ix-1)%N,ix,(ix+1)%N,ix,(ix+1)%N,(ix-1)%N,(ix+1)%N,(ix-1)%N,ix]])
''' histogramm span full range from a to b, histogram normalisation/stretching'''
mingrad = np.nanmin(grad)
maxgrad = np.nanmax(grad)
a,b = 0,256
grad_normalised = (grad-mingrad)/(maxgrad-mingrad)*(b-a)+a #a=-1,b=1
clahe = cv2.createCLAHE(clipLimit=20, tileGridSize=(10,10) )
grad_normalised = clahe.apply(grad_normalised.astype('uint8'))
''' apply a highpass filter'''
grad_filter = smooth_map.high_normalize(grad_normalised, mask=None, sigma=15)#, sig_low=2)
''' Thresholding + remove small objects(morphology) '''
furrow = grad_filter > filter_thr #(1-np.exp(-4.))
''' get rid of holes '''
furrow_erosion = np.copy(furrow)
labels,num_features = measurements.label(furrow, np.ones((3,3),dtype=int))
for ifeature in range(1,num_features+1):
furrow_part = np.sum(labels==ifeature)
if furrow_part<3:
labels[labels==ifeature]=0
furrow_dilated = labels>0
furrow_skeleton = skeletonize(furrow_dilated)
furrow_skeleton_mask = furrow_skeleton*np.isfinite(grad_with_nan)
distance_map_fg = distance_transform_edt(np.logical_not(furrow_skeleton_mask))
try:
wavelength_pxl = estimate_wavelength
except:
wavelength_pxl = np.nanmax(distance_map_fg)*2
max_peaks = distance_map_fg>(wavelength_pxl/7.)
'''fracture strength'''
grad[np.logical_not(np.isfinite(grad_with_nan))] = np.nan
dist_furrow = grad[furrow_skeleton_mask]
dist_peaks = grad[max_peaks]
strength_val = np.nanmedian(dist_furrow[np.isfinite(dist_furrow)]) - np.nanmedian(dist_peaks[np.isfinite(dist_peaks)])
''' plotting fracture'''
if True:
fig=plt.figure()
ax = fig.add_subplot(111)
cmap = 'binary'
MU = np.nanmean(grad)
SD = np.nanstd(grad)
im=ax.imshow(grad*estimate_wavelength*10/1000.,interpolation='nearest',cmap=cmap,vmin=0,vmax=0.1)
plt.colorbar(im)
fig.savefig(image_path + '{}/fracture/fract_{}_010.pdf'.format(where_to,iidx),dpi=200,format='pdf')
plt.close(fig)
if save_data:
try:
vals_to_plot.create_dataset('{}/{}'.format(iidx , 'strength_val'), data=strength_val)
except:
dset=vals_to_plot['{}'.format(iidx)]
for jkey in ['strength_val']:
if jkey in dset.keys():
del dset[jkey]
dset['strength_val'] = strength_val
print('***');sys.stdout.flush()
print('Fracture strength = {:.5f} (per Lambda)'.format(strength_val*estimate_wavelength*10/1000.))
print('*** Fracture analysis done ***');sys.stdout.flush()
'''compare ellipse props on fractures from regions away from fractures'''
if elli_analysis:
cc = cc.reshape(M,N,M,N)
local_patch_size = 9
interpolate_by = 3
try:
elli_params = vals_to_plot['{}/{}'.format(iidx , 'elli_params')].value
except:
print('No ellipse_params found. Refit ellipses!');sys.stdout.flush()
if interpolate_by>1:
local_patch_size *= 3
cc2d = np.copy(cc)
roi_float = roi.astype(float)
cc2d[np.logical_not(np.isfinite(cc2d))] = 0
roiy,roix = np.where(roi)
roi_fft = np.pad( np.fft.fftshift(np.fft.fft2(roi_float)),((N,N),(N,N)),'constant')
roi_intp = np.real(np.fft.ifft2(np.fft.fftshift(roi_fft))*9)
roi_intp = roi_intp>0.5
if (final_pattern.shape[1]>50 and estimate_wavelength<16):
local_patch_size = 5*3
corrs = []
for irow in range(M):
if np.sum(roiy==irow)==0:
continue
cc2d_fft = np.pad( np.fft.fftshift(np.fft.fft2(cc2d[irow,:,:,:],axes=(1,2)),axes=(1,2)),((0,0),(N,N),(N,N)), 'constant')
cc2d_intp = np.real(np.fft.ifft2(np.fft.fftshift(cc2d_fft,axes=(1,2)),axes=(1,2))*9)
cc2d_intp[:,np.logical_not(roi_intp)] = np.nan
cc2d_intp = cc2d_intp[roi[irow,:],:,:]
points_of_interest = [roiy[roiy==irow]*3,roix[roiy==irow]*3]
icorrs,convolved = get_point_neighbourhood.get_point_neighbourhood(points_of_interest,cc2d_intp,local_patch_size)
if len(corrs)==0:
corrs = icorrs
else:
corrs = np.concatenate([corrs,icorrs])
corrs = np.array(corrs)
corrs = corrs.reshape(nid,2*local_patch_size+1,2*local_patch_size+1)
else:
cc2d = cc2d[roi,:,:]
cc2d_fft = np.pad( np.fft.fftshift(np.fft.fft2(cc2d,axes=(1,2)),axes=(1,2)),((0,0),(N,N),(N,N)), 'constant')
cc2d_intp = np.real(np.fft.ifft2(np.fft.fftshift(cc2d_fft,axes=(1,2)),axes=(1,2))*9)
cc2d_intp[:,np.logical_not(roi_intp)] = np.nan
points_of_interest = [roiy*3,roix*3]
corrs,convolved = get_point_neighbourhood.get_point_neighbourhood(points_of_interest,cc2d_intp,local_patch_size)
else:
cc2d_intp=cc[roi,:,:]
points_of_interest = roi
corrs,convolved = get_point_neighbourhood.get_point_neighbourhood(points_of_interest,cc2d_intp,local_patch_size)
fig = plt.figure()
for i in range(6):
for j in range(6):
ax = fig.add_subplot(6,6,i*6+j+1)
ax.imshow(corrs[i*6+j+1,:,:],interpolation='nearest',cmap='RdBu_r',vmin=-0.75,vmax=0.75)
fig.savefig(image_path + '{}/fracture/corrs_{}.pdf'.format(where_to,iidx),dpi=200,format='pdf')
plt.close(fig)
p1 = 1
np.random.seed(46756)
part = np.random.choice(np.arange(2),size=nid,replace=True,p=[1-p1,p1]).astype(bool)
threshold_mode = 0.7
ellies_corr,ellies_thr,check_ellc,check_cntc = ellipse_fitting.get_fit_ellipse(corrs[part,:,:].copy(),\
'corr', threshold_mode, full_output=False)
''' check how many local regions were fitted '''
keysc = ellies_corr.keys()
print('Fits in corr={} of total={}'.format(len(keysc),np.sum(part)));sys.stdout.flush()
elli_params = np.empty((4,N,M))*np.nan
yroi,xroi = np.where(roi)
for ikey in sorted(ellies_corr.keys()):
elli_params[0,yroi[int(ikey)],xroi[int(ikey)]] = ellies_corr[ikey][0]/180*np.pi #ori in rad
elli_params[1,yroi[int(ikey)],xroi[int(ikey)]] = ellies_corr[ikey][1] #ecc
elli_params[2,yroi[int(ikey)],xroi[int(ikey)]] = ellies_corr[ikey][2] #a (height)
elli_params[3,yroi[int(ikey)],xroi[int(ikey)]] = ellies_corr[ikey][3] #b (width)
eccs = np.argsort(elli_params[1,roi])
try:
del ellies_corr
except:
pass
mean_ecc =
|
np.nanmean(elli_params[1,bnd:end,bnd:end])
|
numpy.nanmean
|
#!/usr/bin/env python3
import sys, os
from os.path import join as pjoin
import shutil
import signal
import socket
import time
import copy
import random
import socket
import math
import json
import logging
import tempfile
import multiprocessing
import traceback
from datetime import datetime, timedelta
from pprint import pprint, pformat
from collections import namedtuple
from rttypes.frame import FrameOfReference
import numpy as np
import numpy.random
import pymongo
from bson.objectid import ObjectId
import h5py
try:
from tqdm import tqdm
except ImportError as e:
# return no-op wrapper
def tqdm(iterable, *args, **kwargs):
return iterable
import payloadtypes
import defaults
from api_enums import (MESSAGETYPE, STATUS, MLROLE, PROCSTATUS, MCGEOTYPE,
VARTYPE, DBCOLLECTIONS, PARTICLETYPE,
STORAGETYPE)
from commandmenu import CommandMenuBase, menucommand
import parse
import socketio
import dicomutils
from loaders import ArrayLoader
from sparse import SparseMatrixCOO
from utils import load_bin, save_bin, none_or_type, get_resizing_params
import database
from filewriters import RotatingNPFileWriter
import quan_config
import restactions
from beamlist import read_beamlist, ensure_exclusive_setting
import log
logger = log.get_module_logger("client")
LOW_PARTICLES = int(18e6)
HIGH_PARTICLES = int(2e3)
dsaddr = None
def get_simdoc(filter):
"""Multiprocessing worker function"""
database.reinit_dbclient()
return database.db[DBCOLLECTIONS.SIMULATION].find_one(filter)
def sort_beamlets_columnorder(beamdoc):
return sorted(beamdoc['beamlets'],
key=lambda x: x['position'][0]*beamdoc['fmapdims'][0]+x['position'][1])
def validate_rest_response(response):
if response['status'] != STATUS.SUCCESS:
raise RuntimeError('action failed with error: {!s}'.format(response['message']))
return response['content']
def load_bbox(bbox_dict, voxelsize):
"""load bbox coordsys from bbox.json file"""
bbox = payloadtypes.CoordSys()
bbox.spacing = voxelsize
bbox.start = [bbox_dict[k][0] for k in ['x','y','z']]
bbox.size = [int(math.ceil((bbox_dict[k][1]-bbox.start[ii])/voxelsize[ii])) for ii, k in enumerate(['x','y','z'])]
return bbox
#==================
# Command Line Menu
#==================
class CMenuDebug(CommandMenuBase):
description = 'Client for interacting with distributed Monte Carlo Dose Calculation network'
@staticmethod
def generate_simulation_payload(geometry_file, gps_file, reply_host, reply_port):
"""simulation test payload send directly to computeserver"""
payload = payloadtypes.SimInstruction()
payload.id = str(database.ObjectId())
payload.num_vacant_threads = 2
payload.beam_id = str(database.ObjectId())
payload.subbeam_id = str(database.ObjectId())
payload.files = {
'geometry': socketio.pack_file_text(name='mcgeo.txt', file=geometry_file),
'gps': socketio.pack_file_text(name='gps.mac', file=gps_file),
}
payload.simulations = [
payloadtypes.SimulationConfig.fromdict({'id': str(database.ObjectId()), 'num_runs': 1,
'num_particles': LOW_PARTICLES, 'vartype': VARTYPE.LOW, }),
payloadtypes.SimulationConfig.fromdict({'id': str(database.ObjectId()), 'num_runs': 30,
'num_particles': HIGH_PARTICLES, 'vartype': VARTYPE.HIGH, }),
]
payload.reply_host = reply_host
payload.reply_port = reply_port
return payload
@staticmethod
def submit_request(payload, timeout=10):
starttime = time.perf_counter()
ipidx = 0
while True:
if timeout and timeout > 0 and time.perf_counter()-starttime > timeout:
raise OSError('Timeout reached while trying to submit processing request')
try:
cs_addr = defaults.cs_address[ipidx]
response = payload.send_request(cs_addr, timeout=None, conection_timeout=1)
if response['status'] == STATUS.SUCCESS:
break
except (socket.timeout, ConnectionRefusedError, ConnectionResetError) as err:
logger.debug('timeout while trying to connect to "{}:{}"'.format(*cs_addr))
ipidx = (ipidx+1)%len(defaults.cs_address)
time.sleep(1)
return cs_addr
@menucommand
def test_simulation(self, parser):
"""Send a test simulation processing request"""
parser.add_argument('num', nargs='?', type=int, default=0, help='send a number of tests immediately')
parser.add_argument('--replyhost', '--rh', default='127.0.0.1')
parser.add_argument('--replyport', '--rp', type=int, default=5567)
self.args = parser.parse_args(namespace=self.args)
if self.args.num > 0:
for jobnum in range(self.args.num):
logger.info('submitting simulation request {} of {}'.format(jobnum+1, self.args.num))
CMenuDebug.submit_request( CMenuDebug.generate_simulation_payload('./test/mcgeo.txt', './test/gps.mac',
self.args.replyhost, self.args.replyport), timeout=None )
logger.info('request accepted')
return
while True:
input('press enter to send a task request\n')
CMenuDebug.submit_request( CMenuDebug.generate_simulation_payload('./test/mcgeo.txt', './test/gps.mac',
self.args.replyhost, self.args.replyport), timeout=None )
logger.info('request accepted')
class CMenuDatabase(CommandMenuBase):
description='Apply actions directly on database'
def register_addl_args(self, parser):
parse.register_db_args(parser)
parser.add_argument('--data', '-d', type=str, default="db_data", help="set data root directory")
def run_after_parse(self):
# set global settings
database.init_dbclient(host=self.args.dbhost, port=self.args.dbport,
dbname=self.args.dbname, auth=self.args.dbauth)
database.InitDataStorage(self.args.data)
#====================================
# REUSABLE DB MANIPULATION
#====================================
@menucommand
def resetdb(self, parser):
"""Reset specified database, clearing all data from it"""
dbname = self.args.dbname
client, db = database.dbclient, database.db
logger.info("Resetting database \"{}\"".format(dbname))
available_dbs = client.list_database_names()
if not dbname in available_dbs:
raise pymongo.database.ConfigurationError("Database \"{}\" does not exist. Options are: [{}]".format(dbname, ','.join(available_dbs)))
logger.warning("Are you sure you want to reset the database \"{}\"?".format(dbname))
resp = input("(y/[n]): ")
if resp.lower() in ['y', 'yes']:
logger.warning('Deleting database...')
client.drop_database(dbname)
logger.warning('Done')
else:
logger.warning('aborted')
logger.warning("Would you like to also clear all data from referenced data directory \"{}\"?".format(database.DATASTORE.DATAROOT))
resp = input("(y/[n]): ")
if resp.lower() in ['y', 'yes']:
logger.warning('Deleting referenced data...')
try:
logger.debug('attempting to delete: '+database.DATASTORE.DATAROOT)
shutil.rmtree(database.DATASTORE.DATAROOT)
except Exception as e:
logger.exception('Error while attempting to delete directory tree "{}"'.format(database.DATASTORE.DATAROOT))
logger.warning('Done')
else:
logger.warning('aborted')
@menucommand
def backupdb(self, parser):
"""Copy current state of database to new table"""
parser.add_argument('--backup-name', default='{}_backup'.format(self.args.dbname))
self.args = parser.parse_args(namespace=self.args)
dbclient, db = database.dbclient, database.db
backupdb = dbclient[self.args.backup_name]
if self.args.backup_name in dbclient.list_database_names():
logger.warning("database \"{}\" already exists. Would you like to overwrite?".format(self.args.backup_name))
resp = input("(y/[n]): ")
if resp.lower() in ['y', 'yes']:
logger.warning('Deleting database...')
dbclient.drop_database(self.args.backup_name)
logger.warning('Done')
else:
logger.warning('Could not complete backup!')
return
logger.info('Copying database "{}" to "{}"'.format(self.args.dbname, self.args.backup_name))
for collname in db.list_collection_names():
logger.debug('copying collection "{}"'.format(collname))
backupcoll = backupdb[collname]
backupcoll.insert_many(db[collname].find())
@menucommand
def cleandb(self, parser):
"""Verify all simulation documents for file size/existence. Reset invalid docs for recalculation"""
parser.add_argument('-n', '--dry-run', action='store_true', help='just print invalid entries')
parser.add_argument('-a', '--action', choices=['all', 'corrupt_sims', 'leftovers'], default='all', help='Which action to take (default=all)')
self.args = parser.parse_args(namespace=self.args)
# the order of these cleanup commands matters
if self.args.action in [ 'all', 'leftovers' ]:
database.cleandb_remove_leftover_files(dryrun=self.args.dry_run)
if self.args.action in [ 'all', 'corrupt_sims' ]:
database.cleandb_reset_corrupt_sims(dryrun=self.args.dry_run)
#====================================
# TREATMENT PLANNING FUNCTIONS
#====================================
@menucommand
def generate(self, parser):
""">>Collection of results generation functions"""
self.CMenuResultsGeneration()
class CMenuResultsGeneration(CommandMenuBase):
@staticmethod
def _get_beam_ids(beamlistfile=None, image_id=None, image_uid=None, image_doi=None, geom_id=None):
beam_ids = []
if beamlistfile is not None:
with open(beamlistfile, 'r') as fd:
for beam_id in fd:
beam_ids.append(beam_id.rstrip('\n'))
else:
# use all beams associated to image/geom pair
imagedoc = database.get_image_doc(id= image_id,
uid=image_uid,
doi=image_doi)
geomdoc = database.get_geometry_doc(id=geom_id,
image_id=imagedoc['_id'])
assert str(geomdoc['image_id']) == str(imagedoc['_id'])
beam_ids = next(database.db[DBCOLLECTIONS.BEAMPHOTON].aggregate([
{'$match': {"geom_id": ObjectId(geomdoc['_id'])}},
{'$group': { "_id": '0', "ids": { "$push": "$_id" } }},
]))['ids']
return beam_ids
@menucommand
def dataset(self, parser):
parser.add_argument('--nparticles', type=int, default=None, help='filter only samples for this number of particles')
parser.add_argument('--limit-examples', type=int, default=float('inf'), help='max number of data example pairs to include')
parser.add_argument('--fsize', type=float, default=1, help='max filesize in GB before splitting')
parser.add_argument('--xcontext', type=none_or_type(int), default=20, help='number of slices included to either side of beamlet center (along x-axis)')
parser.add_argument('--zcontext', type=none_or_type(int), default=20, help='number of slices included to either side of beamlet center (along z-axis)')
parser.add_argument('--out', '-o', default=None, help='directory to dump training data')
parser.parse_args(namespace=self.args)
if self.args.out is None:
self.args.out = 'dataset_{!s}'.format(datetime.now().strftime('%F_%T'))
db = database.db
dosefactor = 1e26 # multply raw MC dose arrays by this value before pre-processing
loadarr = ArrayLoader(reorient=True, context=(self.args.xcontext, 50, self.args.zcontext),
get_geom=True, get_label=True, multiproc=True,dosefactor=dosefactor)
os.makedirs(self.args.out, exist_ok=True)
error_logger = open(pjoin(self.args.out, 'errors.txt'), 'w')
for role in [MLROLE.TRAIN, MLROLE.TEST]:
if role == MLROLE.TEST:
limit_examples = 8000
else:
limit_examples = self.args.limit_examples
outdir = pjoin(self.args.out, role)
os.makedirs(outdir, exist_ok=True)
datafw = RotatingNPFileWriter(pjoin(outdir, role), max_fsize=self.args.fsize, texthead='image_id,beam_id,subbeam_id,sim_id,sample_id')
# RANDOMLY SAMPLE A SUBSET OF SIMULATIONS
beam_ids = [doc['_id'] for doc in db[DBCOLLECTIONS.BEAMPHOTON].find({
'mlrole': role,
# 'date_added': {"$gte": datetime(2020, 1, 5)},
})]
filter = {
"magnetic_field": [0.0, 0.0, 1.5, 'tesla'],
'procstatus.status': PROCSTATUS.FINISHED,
'vartype': VARTYPE.HIGH,
'beam_id': {"$in": beam_ids},
}
if self.args.nparticles:
filter['num_particles'] = int(self.args.nparticles)
try:
nsims = db[DBCOLLECTIONS.SIMULATION].aggregate([
{"$match": filter},
{"$count": 'num_sims'}
]).next()['num_sims']
except StopIteration:
logger.info('no documents matched query')
continue
logger.info('{} matching samples'.format(nsims))
# GET UNIFORM RANDOM SAMPLE OF DOCS
simdocs = random.sample(list(db[DBCOLLECTIONS.SIMULATION].find(filter)), k=min(nsims, limit_examples))
logger.info('randomly sampled {} documents'.format(len(simdocs)))
with multiprocessing.Pool() as pool:
iresults = pool.imap(loadarr, simdocs, chunksize=8)
for ii, (result, simdoc) in enumerate(zip(tqdm(iresults, total=len(simdocs), desc='Constructing {} dataset'.format(role.title())), simdocs)):
if result is None:
raise RuntimeError('Error while loading beamlet dose arrays for simulation "{!s}"'.format(simdoc['_id']))
input_arrs, geom_arr, label_arr = result[0]
# Check for empty dose (bad beamlet specification during random select?)
ctr_idx = np.array(label_arr.shape[0])//2
sumslice = slice(max(0, ctr_idx-5), min(label_arr.shape[2], ctr_idx+5))
volsum = np.sum(label_arr[sumslice, :, sumslice])
if volsum < 1000:
error_logger.write("corrupt dose ({}) on volume: {!s}\n".format(volsum, simdoc['_id']))
error_logger.flush()
continue
try:
assert np.amin(label_arr)>=0.0
for input_arr in input_arrs:
assert np.amin(input_arr)>=0.0
except AssertionError as e:
print(simdoc['_id'], np.amin(label_arr), [np.amin(input_arr) for input_arr in input_arrs])
raise
channel_arrs = []
for input_arr in input_arrs:
channel_arrs.append( np.stack([label_arr, input_arr, geom_arr], axis=-1).astype(np.float32) )
sample_arr = np.stack(channel_arrs, axis=0)
text_out = []
for sampledoc in simdoc['samples']:
text_out.append('{!s},{!s},{!s},{!s},{!s}'.format(
simdoc['image_id'],
simdoc['beam_id'],
simdoc['subbeam_id'],
simdoc['_id'],
sampledoc['_id'],
))
datafw.write(sample_arr, text=text_out)
datafw.save()
with open(pjoin(self.args.out, 'stats.json'), 'w') as fd:
json.dump({'factor': [dosefactor, dosefactor, 1.0]}, fd)
error_logger.close()
@menucommand
def masks(self, parser):
"""Export a file containing mask volumes matching the image coordinate system, respecting the
user-specified voxelsize (if it was manually defined)"""
parser.add_argument('--image_id', type=str, help="image database id")
parser.add_argument('--image_uid', type=str, help="image dicom uid")
parser.add_argument('--image_doi', type=str, help="image doi")
parser.add_argument('--geom_id', type=str, help="geometry database id")
parser.add_argument('--numpy', action='store_true', help='also produce a .npy file for each mask')
parser.add_argument('--out', default='masks.h5', help='file to write mask data')
self.args = parser.parse_args()
imagedoc = database.get_image_doc(id=self.args.image_id,
uid=self.args.image_uid,
doi=self.args.image_doi)
assert imagedoc is not None
assert 'rtstruct' in imagedoc
geomdoc = database.get_geometry_doc(id=self.args.geom_id,
image_id=imagedoc['_id'])
# calculate resizing params
ic = imagedoc['coordsys']
# Get list of structures
existing_structures = {doc['name']: doc['_id'] for doc in imagedoc['structures']}
structure_names = set(existing_structures.keys())
if imagedoc['rtstruct'] is not None:
structure_names.update(dicomutils.get_roi_names(database.dbabspath(imagedoc['rtstruct'])))
# also get any custom masks (directly inserted as voxelized mask arrays)
logger.info("Found structures: {}".format(structure_names))
# generate missing masks
for structure_name in structure_names:
if structure_name in existing_structures:
continue
logger.info('Requesting mask generation for structure "{}"'.format(structure_name))
try:
existing_structures[structure_name] = database.structure_insert(imagedoc['_id'], name=structure_name)
except:
logger.warning("Failed to create mask for structure \"{}\", most likely because it " \
"doesn't contain any boundary coordinates.".format(structure_name))
# add structure masking voxels of low density (air) within body contour
air_struct_name = "T_AIR"
if air_struct_name not in existing_structures:
# force recompute
ctvol, _ = database.get_ctvolume(imagedoc['_id'])
air_mask = np.where(ctvol<0.2, 1, 0).astype(np.int8)
existing_structures[air_struct_name] = database.structure_insert(imagedoc['_id'], air_struct_name, mask=air_mask)
# refresh local copy of imagedoc and fetch mask data from it
imagedoc = database.get_doc(DBCOLLECTIONS.IMAGES, imagedoc['_id'])
# save to file
try:
logger.info('Saving masks for structures: {!s}'.format([s['name'] for s in imagedoc['structures']]))
with h5py.File(self.args.out, 'w') as h5fd:
try:
for ii, structure in enumerate(tqdm(imagedoc['structures'], desc='Saving masks')):
sc = structure['boundbox']
f_mask = database.dbabspath(structure['maskfile'])
mask_arr = np.load(f_mask) # size matches CT
mask_name = structure['name']
# crop to the mask bbox for efficient storage
subslice = get_resizing_params(ic, sc)
cropped_mask = mask_arr[subslice]
assert list(cropped_mask.shape) == sc['size'][::-1]
group = h5fd.create_group(mask_name)
group.attrs['index'] = ii
group.attrs['name'] = mask_name
arrprops = group.create_group('ArrayProps')
arrprops.attrs['crop_size'] = cropped_mask.shape[::-1]
arrprops.attrs['crop_start'] = [sl.start for sl in subslice][::-1]
arrprops.attrs['size'] = ic['size']
group['mask'] = cropped_mask.astype(np.int8)
if self.args.numpy:
fname = pjoin(os.path.dirname(self.args.out), 'npmasks', mask_name+'.npy')
os.makedirs(os.path.dirname(fname), exist_ok=True)
np.save(fname, cropped_mask)
except:
logger.exception('Structure "{}" is missing its mask file'.format(structure['name']))
raise
except:
os.remove(self.args.out)
@menucommand
def fmaps(self, parser):
parser.add_argument('--image_id', type=str, help="image database id")
parser.add_argument('--image_uid', type=str, help="image dicom uid")
parser.add_argument('--image_doi', type=str, help="image doi")
parser.add_argument('--geom_id', type=str, help="geometry database id")
parser.add_argument('--beamlist', type=str, help='file listing all beam ObjectIds to include')
parser.add_argument('--out', '-o', type=str, default=os.path.curdir)
self.args = parser.parse_args(namespace=self.args)
from fmaps import Beam, Fmaps
fmaps = Fmaps()
beam_ids = self._get_beam_ids(beamlistfile=self.args.beamlist,
image_id=self.args.image_id,
image_uid=self.args.image_uid,
image_doi=self.args.image_doi,
geom_id=self.args.geom_id)
for bb, beam_id in enumerate(tqdm(beam_ids, desc='Loading beams')):
# download per-beamlet dose and construct sparse matrix as .mat file
beamdoc = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': ObjectId(beam_id)})
fluence_map = np.zeros([beamdoc['fmapdims'][1],beamdoc['fmapdims'][0]])
for ii, beamletdoc in enumerate(beamdoc['beamlets']):
pos = beamletdoc['position']
fluence_map[pos[0], pos[1]] = 1.0
fmaps.addBeam(Beam(fluence_map,
gantry=beamdoc['angle_gantry'],
couch=beamdoc['angle_couch'],
coll=beamdoc['angle_coll'],
iso=[x/10 for x in beamdoc['isocenter']],
sad=beamdoc['sad']/10,
beamlet_size=[x/10 for x in beamdoc['beamletsize']],
beamlet_spacing=[x/10 for x in beamdoc['beamletspacing']],
))
fmaps.generate(self.args.out)
@menucommand
def exportdata(self, parser):
parser.add_argument('--image_id', type=str, help="image database id")
parser.add_argument('--image_uid', type=str, help="image dicom uid")
parser.add_argument('--image_doi', type=str, help="image doi")
parser.add_argument('--geom_id', type=str, help="geometry database id")
parser.add_argument('--beamlist', type=str, help='file listing all beam ObjectIds to include')
parser.add_argument('--nparticles', type=float, help='number of simulation histories')
parser.add_argument('--tag', type=str, help='tag string referencing a set of simulations')
parser.add_argument('--drop_thresh', default=None, type=none_or_type(float), help='drop dose values below this percent of each beamlet\'s max element')
parser.add_argument('--magnetic_field', default=None, type=none_or_type(float), help='magnetic field strength in Z-direction (unit: Tesla)')
parser.add_argument('--name', type=str, default='dose3d.bin', help='name of data file to export for every beamlet')
parser.add_argument('--out', '-o', type=str, default=os.path.curdir)
self.args = parser.parse_args()
assert self.args.drop_thresh is None or self.args.drop_thresh >= 0.0
data_filename = os.path.basename(self.args.name)
# prepare data output
if os.path.isdir(self.args.out):
outfile = pjoin(self.args.out, os.path.splitext(data_filename)[0]+'.h5')
else:
outfile = self.args.out
os.makedirs(os.path.dirname(outfile), exist_ok=True)
assert os.path.isdir(os.path.dirname(outfile))
imagedoc = database.get_image_doc(id=self.args.image_id,
uid=self.args.image_uid,
doi=self.args.image_doi)
geomdoc = database.get_geometry_doc(id=self.args.geom_id,
image_id=imagedoc['_id'])
# calculate resizing params
gc = geomdoc['coordsys']
ic = imagedoc['coordsys']
subslice = get_resizing_params(ic, gc)
logger.debug('embedding data subarray with size {!s} into full array with size {!s} at {!s}'.format(
gc['size'], ic['size'], '[{}]'.format(', '.join(['{}:{}'.format(sl.start,sl.stop) for sl in subslice][::-1]))))
beam_ids = self._get_beam_ids(beamlistfile=self.args.beamlist,
image_id=self.args.image_id,
image_uid=self.args.image_uid,
image_doi=self.args.image_doi,
geom_id=self.args.geom_id)
loadarr = ArrayLoader(multiproc=True, dosefactor=1.0, max_samples=1, data_filename=data_filename)
# download per-beamlet dose and construct sparse matrix as .mat file
sparsemat = SparseMatrixCOO(outfile, drop_thresh=self.args.drop_thresh)
for bb, beam_id in enumerate(tqdm(beam_ids, desc='Processing beams')):
# supplementary information
beamdoc = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': ObjectId(beam_id)})
filters = []
beamlet_ids = []
for beamletdoc in sort_beamlets_columnorder(beamdoc):
beamlet_ids.append(beamletdoc['_id'])
filter = {'subbeam_id': ObjectId(beamletdoc['_id']),
'procstatus.status': {'$in': [PROCSTATUS.FINISHED, PROCSTATUS.SKIPPED]}}
if self.args.nparticles:
filter['num_particles'] = int(self.args.nparticles)
if self.args.magnetic_field:
filter['magnetic_field.2'] = self.args.magnetic_field,
if self.args.tag:
filter['tag'] = self.args.tag
filters.append(filter)
simdocs = []
for ifilter in filters:
simdoc = get_simdoc(ifilter)
simdocs.append(simdoc)
if not all((doc is not None for doc in simdocs)):
nfinished = 0
for doc in simdocs:
if doc is not None: nfinished += 1
raise RuntimeError('Only {} of {} simulations for beam "{}" have been completed. ' \
'Please wait for the rest to complete and try again.'.format(nfinished, len(simdocs), beam_id) )
# check for requested data file
nwithdata = 0
nskipped = 0
for simdoc in simdocs:
if simdoc['num_particles'] <= 0:
nskipped += 1
elif os.path.isfile(pjoin(database.build_datapath_sample(simdoc['_id'], simdoc['samples'][0]['_id']), data_filename)):
nwithdata += 1
if (nwithdata + nskipped) < len(simdocs):
raise RuntimeError('{} of {} simulations for beam "{}" contain the requested data file: "{}".\n'
'Try again with one of the following data filenames or wait and try later: {!s}'.format(
nwithdata + nskipped, len(simdocs), beam_id, data_filename,
os.listdir(database.build_datapath_sample(simdocs[0]['_id'], simdocs[0]['samples'][0]['_id']))
))
with multiprocessing.Pool() as pool:
def poolmap(chunksize):
def f(*args):
return pool.imap(*args, chunksize=chunksize)
return f
map_funcs = [poolmap(8), poolmap(1), map]
while len(map_funcs):
try:
map_func = map_funcs.pop(0)
iresults = map_func(loadarr, simdocs)
for ii, (result, simdoc) in enumerate(zip(tqdm(iresults, desc='Collecting beamlet data', total=len(simdocs)), simdocs)):
if result is None:
raise RuntimeError('Error while loading beamlet data arrays for simulation "{!s}"'.format(simdoc['_id']))
# load noisy dose volume for one beamlet
dosearr = result[0][0][0]
if np.all(dosearr == 0):
# add empty column
sparsemat.add_column(None)
else:
# resize to match CT coordsys
fulldosearr = np.zeros(ic['size'][::-1])
fulldosearr[subslice] = dosearr
sparsemat.add_column(fulldosearr)
break
except multiprocessing.pool.MaybeEncodingError as err:
logger.warning('Data loading failed, falling back to less efficient method')
assert len(map_funcs)
continue
logger.info('Writing sparse beamlet data matrix to file: "{}"'.format(outfile))
sparsemat.finish()
@menucommand
def export_detection_data(self, parser):
parser.add_argument('--image_id', type=str, help="image database id")
parser.add_argument('--image_uid', type=str, help="image dicom uid")
parser.add_argument('--image_doi', type=str, help="image doi")
parser.add_argument('--geom_id', type=str, help="geometry database id")
parser.add_argument('--beamlist', type=str, help='file listing all beam ObjectIds to include')
parser.add_argument('--nparticles', type=float, help='number of simulation histories')
parser.add_argument('--tag', type=str, help='tag string referencing a set of simulations')
parser.add_argument('--name', type=str, default='detectedevents.pb', help='name of data file to aggregate from every beamlet')
parser.add_argument('--out', '-o', type=str, default=os.path.curdir)
self.args = parser.parse_args()
# You need to copy your protobuf_pb2 file into ./dosecalc/webapi/ for this to work
from protobuf_pb2 import pbDetectedEvents, pbDetectionEvent
# strip any directory names from user-specified protobuf filename
# This is the name of the file we will match against for each of the simulation outputs
data_filename = os.path.basename(self.args.name)
# prepare data output (either .mat or .h5 would be convenient for matlab users)
if os.path.isdir(self.args.out):
outfile = pjoin(self.args.out, os.path.splitext(data_filename)[0]+'.h5')
else:
outfile = self.args.out
os.makedirs(os.path.dirname(outfile), exist_ok=True)
assert os.path.isdir(os.path.dirname(outfile))
# Begin our navigation of the mongodb database
# database entries are called documents (docs). They act like python dictionaries after
# we retrieve them with a .find() or .find_one() command (called inside my get_*_doc() functions)
# for a complete list of the contents of each "doc" type, see the bottom of database.py
# (gen_doc_*() functions define the database document structures)
#
# we essentially navigate the tree-like hierarchy of linked documents by repeatedly searching for the
# child doc that has a matching id, stored in the parent doc
# the tree looks like: image -> geometry -> beam(s) -> beamlet(s) -> simulation(s) -> sample(s)
imagedoc = database.get_image_doc(id=self.args.image_id,
uid=self.args.image_uid,
doi=self.args.image_doi)
geomdoc = database.get_geometry_doc(id=self.args.geom_id,
image_id=imagedoc['_id'])
# this is a convenience function for matching to beam docs by one of the five options.
# match by image_id/uid/doi is simplest (gives all beams for an image)
# the _id is generated when the image is first inserted
# _uid is the Dicom uuid assigned when the image is captured by the scanner
# _doi is the "plan_name" you assign when running simpledose create-plan <doi> <data-dir>
# match by beam_id is also possible for absolute control. --beamlist expects a text file with a
# mongodb-assigned beam_id on each line. you can access the id of any document with doc['_id']
beam_ids = self._get_beam_ids(beamlistfile=self.args.beamlist,
image_id=self.args.image_id,
image_uid=self.args.image_uid,
image_doi=self.args.image_doi,
geom_id=self.args.geom_id)
# Let's pretend that you are simply appending the data from each DetectionEvent to the end of each
# of the following four data arrays. You can probably find a way to write a list of structs to h5
# just like you did for the protobuf file, but writing four equal-length (co-registered) arrays
# is much simpler than writing a list of structs with h5.
allGlobalTimes = []
allEventIds = []
allDetectorIds = []
allEnergies = []
allBeams = []
allBeamlets = []
# locate/read per-beamlet data and aggregate into final output file
for bb, beam_id in enumerate(tqdm(beam_ids, desc='Processing beams')):
beamdoc = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': ObjectId(beam_id)})
filters = []
beamlet_ids = []
# iterate over the beamlets in this beamdoc, sorted by the beamlet position in fmap
for beamletdoc in sort_beamlets_columnorder(beamdoc):
beamlet_ids.append(beamletdoc['_id'])
# filters are used with .find() and .find_one() to access specific docs from the database
# these filters are saved to a list for now, and used in a multithreaded lookup function later
filter = {'subbeam_id': ObjectId(beamletdoc['_id']),
'procstatus.status': {'$in': [PROCSTATUS.FINISHED, PROCSTATUS.SKIPPED]}}
if self.args.nparticles:
filter['num_particles'] = int(self.args.nparticles)
if self.args.tag:
filter['tag'] = self.args.tag
filters.append(filter)
# call get_simdoc() [at top of file] for each filter, using a multithreaded approach for speed
#with multiprocessing.Pool() as pool:
#simdocs = pool.map(get_simdoc, filters)
#print(list(filters))
#simdocs = map(get_simdoc, filters)
simdocs = []
for ifilter in filters:
simdoc = get_simdoc(ifilter)
simdocs.append(simdoc)
# pool.map is a blocking call, so we only get to this line after get_simdoc() has been called
# for every filter. Now we look at the success/fail of each filter to make sure there are no
# errors, otherwise we quit. Common error here is if some simulation tasks are still running.
if not all((doc is not None for doc in simdocs)):
nfinished = 0
for doc in simdocs:
if doc is not None: nfinished += 1
raise RuntimeError('Only {} of {} simulations for beam "{}" have been completed. ' \
'Please wait for the rest to complete and try again.'.format(nfinished, len(simdocs), beam_id) )
# check for requested data file. We look in the per-simulation data folder in <dbdata> to confirm
# that the requested data file exists for all sims. Error and exit otherwise.
# common error here is a type in the filename, so some options are suggested to remind the user
# of the actual files produced by geant4.
nwithdata = 0
nskipped = 0
for simdoc in simdocs:
if simdoc['num_particles'] <= 0:
nskipped += 1
elif os.path.isfile(pjoin(database.build_datapath_sample(simdoc['_id'], simdoc['samples'][0]['_id']), data_filename)):
nwithdata += 1
# if (nwithdata + nskipped) < len(simdocs):
# raise RuntimeError('{} of {} simulations for beam "{}" contain the requested data file: "{}". Try again with one of the following data filenames: {!s}'.format(
# nwithdata + nskipped, len(simdocs), beam_id, data_filename,
# os.listdir(database.build_datapath_sample(simdocs[0]['_id'], simdocs[0]['samples'][0]['_id']))
# ))
# now we know that all data files exist, we just need to read them to memory, aggregate somehow,
# then write the data to the output file. I removed my complex multiprocessing approach to keep
# this easy to understand. You can try to implement multiprocessing later if you want
#print(simdocs)
#print(list(simdocs))
for bblets, simdoc in enumerate(simdocs):
if not simdoc['samples']:
continue
else:
# build the path leading to this sim's data file
sampledoc = simdoc['samples'][0]
sim_data_dir = database.build_datapath_sample(simdoc['_id'], sampledoc['_id'])
data_path = pjoin(sim_data_dir, data_filename)
# read the data from protobuf data file
with open(data_path, 'rb') as fd:
detected_events = pbDetectedEvents()
detected_events.ParseFromString(fd.read())
# merge this sim's data into an aggregate data structure
# (you should decide how to implement this. This is just an example)
# aggregate the data into memory
for event in detected_events.detectionEvent:
allGlobalTimes.append(event.globalTime)
allEventIds.append(event.eventId)
allDetectorIds.append(event.detectorId)
allEnergies.append(event.energy)
allBeams.append(bb)
allBeamlets.append(bblets)
# heres a simple example of writing each of the four lists as a separate "dataset" in h5
# This will make it easy to read all the data into matlab as four separate vectors
with h5py.File(outfile, mode='w') as h5root:
h5root.create_dataset('/globalTimes', data=allGlobalTimes)
h5root.create_dataset('/eventIds', data=allEventIds)
h5root.create_dataset('/detectorIds', data=allDetectorIds)
h5root.create_dataset('/energy', data=allEnergies)
h5root.create_dataset('/beamNo', data=allBeams)
h5root.create_dataset('/beamletNo', data=allBeamlets)
logger.info('Writing detected events to file: "{}"'.format(outfile))
@menucommand
def beamletdose_DL(self, parser):
parser.add_argument('beamlist', type=str, help='file listing all beam ObjectIds to include')
parser.add_argument('--nparticles', type=float, help='number of simulation histories')
parser.add_argument('--drop_thresh', default=None, type=none_or_type(float), help='drop dose values below this percent of each beamlet\'s max element')
parser.add_argument('--magnetic_field', default=1.5, type=float, help='magnetic field strength in Z-direction (unit: Tesla)')
parser.add_argument('--out', '-o', type=str, default=os.path.curdir)
parser.add_argument('--predict', nargs=3, help='--predict <config-file> <weights-file> <stats-file>')
parser.add_argument('--nopredict', nargs=1, help='--nopredict <stats-file>')
parser.add_argument('--zcontext', type=none_or_type(int), default=12, help='number of slices on each side of beamlet center (z-axis) to include in dosecalc')
parser.add_argument('--xcontext', type=none_or_type(int), default=12, help='number of rows/cols on each side of beamlet center (x,y-axes) to include in dosecalc')
parser.add_argument('--make-plots', action='store_true', default=False, help='save debug plots indicating prediction output')
parser.add_argument('--cpu', action='store_true', help='inference on CPU')
self.args = parser.parse_args(namespace=self.args)
assert self.args.predict or self.args.nopredict
assert self.args.drop_thresh is None or self.args.drop_thresh >= 0.0
assert self.args.zcontext is None or self.args.zcontext > 0
assert self.args.xcontext is None or self.args.xcontext > 0
make_plots = self.args.make_plots
# make_plots = True
# prepare data output
if os.path.isdir(self.args.out):
if self.args.predict:
outfile = pjoin(self.args.out, 'beamlet_dose_predicted.h5')
else:
outfile = pjoin(self.args.out, 'beamlet_dose.h5')
else:
outfile = self.args.out
os.makedirs(os.path.dirname(outfile), exist_ok=True)
assert os.path.isdir(os.path.dirname(outfile))
# prepare dose prediction model
model = None
if self.args.predict:
if self.args.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ""
config_file, weights_file, stats_file = self.args.predict
# docker volume mounting magic places the mcdose module in this src directory
from mcdose import get_trained_model
mcdoselogger = logging.getLogger('MCDose')
mcdoselogger.addHandler(logging.StreamHandler())
mcdoselogger.setLevel(logging.DEBUG)
model = get_trained_model(
config=config_file,
weights=weights_file,
normstats=stats_file,
)
else:
stats_file = self.args.nopredict[0]
with open(stats_file, 'r') as fd:
normstats = json.load(fd)
if 'factor' not in normstats:
normstats['factor'] = [1.0]*len(normstats['mean'])
with open(self.args.beamlist, 'r') as fd:
beam_ids = [line.rstrip('\n') for line in fd]
loadarr = ArrayLoader(reorient=True, context=(self.args.xcontext, 60, self.args.zcontext),
get_geom=True, get_label=make_plots, reversible=True, multiproc=True,
dosefactor=normstats['factor'][0])
# download per-beamlet dose and construct sparse matrix as .mat file
sparsemat = SparseMatrixCOO(outfile, drop_thresh=self.args.drop_thresh)
for bb, beam_id in enumerate(tqdm(beam_ids, desc='Processing beams')):
# supplementary information
beamdoc = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': ObjectId(beam_id)})
geomdoc = database.db[DBCOLLECTIONS.MCGEOM].find_one({'_id': ObjectId(beamdoc['geom_id'])})
arrsize = geomdoc['coordsys']['size']
theta = beamdoc['angle_gantry']
filters = []
beamlet_ids = []
for beamletdoc in sort_beamlets_columnorder(beamdoc):
beamlet_ids.append(beamletdoc['_id'])
filter = {'subbeam_id': ObjectId(beamletdoc['_id']), 'magnetic_field.2': self.args.magnetic_field}
if self.args.nparticles:
filter['num_particles'] = int(self.args.nparticles)
filters.append(filter)
with multiprocessing.Pool() as pool:
simdocs = pool.map(get_simdoc, filters)
with multiprocessing.Pool() as pool:
iresults = pool.imap(loadarr, simdocs, chunksize=8)
# iresults = (loadarr(simdoc) for simdoc in simdocs)
for ii, (result, simdoc) in enumerate(zip(tqdm(iresults, desc='Collecting dose data', total=len(simdocs)), simdocs)):
if result is None:
raise RuntimeError('Error while loading beamlet dose arrays for simulation "{!s}"'.format(simdoc['_id']))
if model is None:
# load noisy dose volume for one beamlet
rotdosearr, unprocessor = result[0][0][0], result[1]
# force all dose to go through rotations to match aliasing effects (for now)
dosearr = unprocessor(rotdosearr)
dosearr[dosearr<0.0] = 0.0 # enforce realistic dose (aliasing and prediction may contain neg. artifacts)
else:
# predict clean dose from noisy dose
arrs, unprocessor = result
dosearr, geomarr = arrs[0][0], arrs[1]
# predict, rotate back (already rotated by loadarr)
inputs = (np.stack((dosearr, geomarr), axis=-1)[None,...]).astype(np.float32)
rotpredarr = model(inputs).numpy()[0,...,0]
predarr = unprocessor(rotpredarr) # drop "example" and "channel" axes
predarr[predarr<0.0] = 0.0 # enforce realistic dose (aliasing and prediction may contain neg. artifacts)
if make_plots and ii < 10:
gtrutharr = arrs[2] # drop channel axis
import matplotlib.pyplot as plt
from mcdose.visualize import create_volume_dose_figure, save_figure_array
figimg = create_volume_dose_figure(
np.stack([
np.stack([
gtrutharr[sliceidx],
dosearr [sliceidx],
predarr [sliceidx],
geomarr [sliceidx],
predarr[sliceidx] - gtrutharr[sliceidx],
], axis=0) for sliceidx in range(inputs.shape[1])
], axis=0),
dpi=200,
col_labels=['ground truth', 'input', 'predict', 'geom', 'predict - input'],
own_scale=False
)
fig_outdir = os.path.splitext(outfile)[0]+'_figs'
os.makedirs(fig_outdir, exist_ok=True)
save_figure_array(figimg, pjoin(fig_outdir, 'beam{:04d}_blt{:05d}.png'.format(bb, ii)))
# replace noisy dose with predicted dose in sparse matrix
dosearr = predarr
sparsemat.add_column(dosearr)
logger.info('Writing sparse beamlet dose matrix to file: "{}"'.format(outfile))
sparsemat.finish()
@menucommand
def finaldose(self, parser):
"""sum the beamlet-dose volumes for the specified beams and save to file"""
parser.add_argument('--image_id', type=str, help="image database id")
parser.add_argument('--image_uid', type=str, help="image dicom uid")
parser.add_argument('--image_doi', type=str, help="image doi")
parser.add_argument('--geom_id', type=str, help="geometry database id")
parser.add_argument('--beamlist', type=str, help='file listing all beam ObjectIds to include')
parser.add_argument('--nparticles', type=float, help='number of simulation histories')
parser.add_argument('--magnetic_field', default=None, type=none_or_type(float), help='magnetic field strength in Z-direction (unit: Tesla)')
parser.add_argument('--out', '-o', type=str, default=os.path.curdir)
self.args = parser.parse_args(namespace=self.args)
outdir = self.args.out
imagedoc = database.get_image_doc(id=self.args.image_id,
uid=self.args.image_uid,
doi=self.args.image_doi)
geomdoc = database.get_geometry_doc(id=self.args.geom_id,
image_id=imagedoc['_id'])
beam_ids = self._get_beam_ids(beamlistfile=self.args.beamlist,
image_id=self.args.image_id,
image_uid=self.args.image_uid,
image_doi=self.args.image_doi,
geom_id=self.args.geom_id)
# calculate resizing params
gc = geomdoc['coordsys']
ic = imagedoc['coordsys']
subslice = get_resizing_params(ic, gc)
logger.debug('embedding dose subarray with size {!s} into full array with size {!s} at {!s}'.format(
gc['size'], ic['size'], '[{}]'.format(', '.join(['{}:{}'.format(sl.start,sl.stop) for sl in subslice][::-1]))))
# get simulation samples one by one
subarrsize = gc['size']
sumarrs = {('finaldose', 'dosefile'): np.zeros(subarrsize[::-1]),
('density', 'densfile'): np.zeros(subarrsize[::-1]), }
for bb, beam_id in enumerate(tqdm(beam_ids, desc='Processing beams')):
# download per-beamlet dose and sum together
beamdoc = database.get_doc(DBCOLLECTIONS.BEAMPHOTON, beam_id)
assert ObjectId(beamdoc['geom_id']) == ObjectId(geomdoc['_id'])
nbeamletdocs = len(beamdoc['beamlets'])
for ii, beamletdoc in enumerate(tqdm(beamdoc['beamlets'], desc='Summing beamlets')):
filter = {
'subbeam_id': ObjectId(beamletdoc['_id']),
}
if self.args.nparticles:
filter['num_particles'] = int(self.args.nparticles)
if self.args.magnetic_field:
filter['magnetic_field.2'] = self.args.magnetic_field,
simdoc = database.db[DBCOLLECTIONS.SIMULATION].find_one(filter)
for key in sumarrs.keys():
arrlabel, arrtype = key
if arrtype == 'densfile' and ii>0:
continue
datafile = database.dbabspath(simdoc['samples'][0][arrtype])
with open(datafile, 'rb') as fd:
buf = fd.read()
dtype = 'f4' if arrtype == 'densfile' else 'f8'
arr = np.frombuffer(buf, dtype).reshape(subarrsize[::-1])
sumarrs[key] += arr
for key, sumarr in sumarrs.items():
arrlabel, arrtype = key
fullarr = np.zeros(ic['size'][::-1])
fullarr[subslice] = sumarr
sumarrs[key] = fullarr
os.makedirs(outdir, exist_ok=True)
for (arrlabel, arrtype), sumarr in sumarrs.items():
np.save(pjoin(outdir, '{}.npy'.format(arrlabel)), sumarr)
save_bin(pjoin(outdir, '{}.raw'.format(arrlabel)), sumarr)
#TODO convert rest to db api
@menucommand
def quan_mcconfig(self, parser):
raise NotImplementedError("need to convert from rest api to db api")
parser.add_argument('beam_id', help='Beam ObjectId, or file with beam id on each line')
parser.add_argument('--out', '-o', type=str, default=os.path.curdir)
self.args = parser.parse_args(namespace=self.args)
beam_ids = []
if os.path.isfile(self.args.beam_id):
with open(self.args.beam_id, 'r') as fd:
for beam_id in fd:
beam_ids.append(beam_id.rstrip())
else:
beam_ids.append(self.args.beam_id)
for beam_id in beam_ids:
outdir = pjoin(self.args.out, beam_id)
os.makedirs(outdir, exist_ok=True)
# download per-beamlet dose and sum together
p = payloadtypes.RESTReqBeamPhotonGet()
p.beam_id = beam_id
p.recursive = False
beamdoc = validate_rest_response(p.send_request(dsaddr))
beamlets = [blt['position'] for blt in beamdoc['beamlets']]
beamlets.sort(key=lambda p: p[1]*1000+p[0])
# define beams and control points
cp = quan_config.ControlPoint()
cp.gantry_rot = beamdoc['angle_gantry']
cp.mu = 1.0 # replace with 1/#cp
cp.sad = beamdoc['sad']
cp.iso = beamdoc['isocenter']
# cp.xjaw_pos = tuple([d*(beamdoc['fmapdims'][0]/2)*beamdoc['beamletsize'][0] for d in [-1.0, 1.0]])
# cp.yjaw_pos = tuple([d*(beamdoc['fmapdims'][1]/2)*beamdoc['beamletsize'][1] for d in [-1.0, 1.0]])
# split into multiple control points to fill holes in target projection
leaf_edge_seqs = quan_config.get_leaf_edges(beamlets, beamdoc['fmapdims'], beamdoc['beamletsize'])
cps = []
for id, seq in enumerate(leaf_edge_seqs):
cp_copy = copy.deepcopy(cp)
cp_copy.id = id+1
cp_copy.leaf_edges = seq
cp_copy.xjaw_pos, cp_copy.yjaw_pos = quan_config.get_jaw_positions(seq, beamdoc['fmapdims'], beamdoc['beamletsize'])
cp_copy.mu = 1/len(leaf_edge_seqs)
cps.append(cp_copy)
mlcbeams = [quan_config.MLCBeam()]
mlcbeams[-1].control_points = cps
quan_config.generate_mlcdef(pjoin(outdir, 'mlcdef_{!s}.txt'.format(beam_id)), beamdoc['fmapdims'], beamdoc['beamletsize'])
quan_config.generate_rtplan(outdir, mlcbeams, fsuffix=beam_id)
#====================================
# SINGLE-USE FUNCTIONS
#====================================
@menucommand
def userfunc(self, parser):
""">> Collection of single-use user functions for brute forcing database change"""
self.CMenuUserFunctions()
class CMenuUserFunctions(CommandMenuBase):
@menucommand
def update_nruns(self, parser):
db = database.db
beamdocs = db[DBCOLLECTIONS.BEAMPHOTON].find({'mlrole': 'test'})
for beam in beamdocs:
for beamlet in beam['beamlets']:
for simulation in beamlet['simulations']:
result = db[DBCOLLECTIONS.SIMULATION].update_one({'_id': simulation}, update={'$set': {'num_runs': 1}})
if not result.matched_count:
raise RuntimeError('Failed to modify simulation "{!s}"'.format(simulation))
@menucommand
def add_sim_tasks(self, parser):
"""Add new sims to all beams matching MLROLE"""
parser.add_argument('--role', required=True, choices=[MLROLE.TRAIN, MLROLE.TEST], type=str, help='Filter for beams to which to add sims')
parser.add_argument('--nparticles', required=True, type=int, nargs="+", help='Number of particles to simulate (may supply multiple)')
parser.add_argument('--vartype', choices=[VARTYPE.HIGH, VARTYPE.LOW], default='highvar', help='type of sample')
parser.add_argument('--nsamples', type=int, default=1, help='number of simulations to run')
parser.parse_args(namespace=self.args)
beamids = database.db[DBCOLLECTIONS.BEAMPHOTON].aggregate([
{'$match': {'mlrole': self.args.role}},
{'$project': {'_id': True}},
{'$group': {'_id': None, 'ids': {'$addToSet': '$_id'}}}
]).next()['ids']
print('Adding simulation tasks for {} beams'.format(len(beamids)))
for nparticles in self.args.nparticles:
print('Adding for {} histories'.format(nparticles))
for beamid in beamids:
database.add_sims_to_beam(beam_id=beamid, vartype=self.args.vartype, num_runs=self.args.nsamples,
num_particles=nparticles)
@menucommand
def test_code(self, parser):
image_id = "5d9bd5ec4f71a0917827897e"
geom_id = "5d9bd5f04f71a09178278980"
out = "/media/hdd1/g4sim/beamlet_dose/HN010/1.5T/10e6/test"
# check volumes
ctvolume, _ = database.get_ctvolume(image_id=image_id)
np.save(pjoin(out, "ctvolume.npy"), ctvolume)
geomvolume, _ = database.get_geometry_volume(geom_id=geom_id)
np.save(pjoin(out, "geomvolume.npy"), geomvolume)
# check mcgeom
import geometry
mcgeom_out = pjoin(out, "mcgeom.txt")
mcgeomvol_out = pjoin(out, "mcgeomvolume.npy")
if not os.path.exists(mcgeom_out):
geometry.generate_geometry(mcgeom_out, geomvolume, (2.5, 2.5, 2.5))
if not os.path.exists(mcgeomvol_out):
with open(mcgeom_out) as fd:
size = [int(x) for x in fd.readline().strip('\n').split(' ')]
voxelsize = [float(x) for x in fd.readline().strip('\n').split(' ')]
start = [float(x) for x in fd.readline().strip('\n').split(' ')]
mcgeomvolume = np.zeros(size[::-1]).ravel()
for ii, line in enumerate(fd):
mcgeomvolume[ii] = float(line.strip('\n').split(' ')[0])
mcgeomvolume = mcgeomvolume.reshape(size[::-1])
np.save(mcgeomvol_out, mcgeomvolume)
@menucommand
def diff_beamlets(self, parser):
parser.add_argument('beamlists', nargs=2, type=str)
self.args =parser.parse_args(namespace=self.args)
beamlists = []
for beamlist_file in self.args.beamlists:
with open(beamlist_file, 'r') as fd:
beamlist=[]
for line in fd:
beamlist.append(ObjectId(line.strip('\n')))
beamlists.append(beamlist)
for beama, beamb in zip(*beamlists):
beamdoca = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': beama})
beamdocb = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': beamb})
assert beamdoca and beamdocb
# extract beamlets
def extract_beamlets(beamdoc):
beamlets = []
for blt in beamdoc['beamlets']:
beamlets.append(blt['position'])
return beamlets
beamletsa = extract_beamlets(beamdoca)
beamletsb = extract_beamlets(beamdocb)
assert database.diff_fluence_arrays(beamletsa, beamletsb, verbose=True)
@menucommand
def test_new_raytrace(self, parser):
# get ctdoc, geomdoc, structure_id to test
imdoc = database.db[DBCOLLECTIONS.IMAGES].find_one({'doi': "HN010"})
geomdoc = database.db[DBCOLLECTIONS.MCGEOM].find_one({'image_id': imdoc['_id']})
beamlists = {
'P_PTV_5400': [ObjectId(x) for x in [
'5e165b29f7aee215815be409',
'5e165b29f7aee215815be532',
'5e165b2af7aee215815be641',
'5e165b2af7aee215815be7bd',
'5e165b2af7aee215815be93e',
'5e165b2af7aee215815bea38',
'5e165b2af7aee215815beb80',
]],
'P_PTV_5940': [ObjectId(x) for x in [
'5e165c05f7aee215815c088c',
'5e165c05f7aee215815c0a96',
'5e165c06f7aee215815c0d2d',
'5e165c06f7aee215815c0f3c',
'5e165c06f7aee215815c10ea',
'5e165c07f7aee215815c134d',
'5e165c07f7aee215815c15ba',
]],
}
for ptvname, beamlist in beamlists.items():
beamdocs = list(database.db[DBCOLLECTIONS.BEAMPHOTON].find({'_id': {"$in": beamlist}}))
structdoc = None
for struct in imdoc['structures']:
if struct['name'] == ptvname:
structdoc = struct
assert len(beamdocs)
for x in (imdoc, geomdoc, structdoc, beamdocs[0]):
assert '_id' in x
# calcluate new raytrace result
import geometry
for bb, beamdoc in enumerate(beamdocs):
mask = database.get_structure_mask(imdoc['_id'], structdoc['_id']) # full sized mask
active_beamlets, fmap = geometry.get_active_beamlets(
mask=mask,
angle_gantry=beamdoc['angle_gantry'],
angle_couch=beamdoc['angle_couch'],
angle_coll=beamdoc['angle_coll'],
iso=beamdoc['isocenter'],
start=imdoc['coordsys']['start'],
spacing=imdoc['coordsys']['spacing'],
fmapdims=beamdoc['fmapdims'],
beamletspacing=beamdoc['beamletspacing'],
beamletsize=beamdoc['beamletsize'],
sad=beamdoc['sad'],
)
# extract old raytrace result
old_active_beamlets = []
for blt in beamdoc['beamlets']:
old_active_beamlets.append(blt['position'])
database.diff_fluence_arrays(active_beamlets, old_active_beamlets)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.subplots(2,2)
fmapnew = database.make_fluence_array(active_beamlets, beamdoc['fmapdims'])
ax[0,0].imshow(fmapnew)
ax[0,1].imshow(fmap)
fmapold = database.make_fluence_array(old_active_beamlets, beamdoc['fmapdims'])
ax[1,0].imshow(fmapold)
im =ax[1,1].imshow(fmapnew-fmapold, cmap='RdBu')
plt.colorbar(im)
fig.savefig('test_{}_{}.png'.format(ptvname, bb))
@menucommand
def patchbeamlets(self, parser):
"""resolve differences between existing beams and replacement fluence maps by deleting unnecessary
beamlets and adding newly introduced beamlets
"""
# for struct in database.db[DBCOLLECTIONS.IMAGES].find_one({"_id": ObjectId("5d9bd5ec4f71a0917827897e")})['structures']:
# print('{:30s} {!s:50s}'.format(struct['name'], struct['_id']))
# beamdocs = database.db[DBCOLLECTIONS.BEAMPHOTON].find({"geom_id": ObjectId("5e16564100ad46279500ed4b")})
# for beam in beamdocs:
# print(beam['_id'], len(beam['beamlets']))
# return
beams = [
{
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN010/fluence_maps_5400/",
'magnetic_field': [0,0,0.0,'tesla'],
'beam_ids': {
"5e165b5df7aee215815bf62e": "fmap-000000.raw",
"5e165b5df7aee215815bf757": "fmap-000001.raw",
"5e165b5ef7aee215815bf866": "fmap-000002.raw",
"5e165b5ef7aee215815bf9e2": "fmap-000003.raw",
"5e165b5ef7aee215815bfb63": "fmap-000004.raw",
"5e165b5ff7aee215815bfc5d": "fmap-000005.raw",
"5e165b5ff7aee215815bfda5": "fmap-000006.raw",
}
}, {
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN010/fluence_maps_5400/",
'magnetic_field': [0,0,1.5,'tesla'],
'beam_ids': {
"5e165b29f7aee215815be409": "fmap-000000.raw",
"5e165b29f7aee215815be532": "fmap-000001.raw",
"5e165b2af7aee215815be641": "fmap-000002.raw",
"5e165b2af7aee215815be7bd": "fmap-000003.raw",
"5e165b2af7aee215815be93e": "fmap-000004.raw",
"5e165b2af7aee215815bea38": "fmap-000005.raw",
"5e165b2af7aee215815beb80": "fmap-000006.raw",
}
}, {
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN010/fluence_maps_5940/",
'magnetic_field': [0,0,0.0,'tesla'],
'beam_ids': {
"5e165c24f7aee215815c2639": "fmap-000000.raw",
"5e165c25f7aee215815c2843": "fmap-000001.raw",
"5e165c25f7aee215815c2ada": "fmap-000002.raw",
"5e165c25f7aee215815c2ce9": "fmap-000003.raw",
"5e165c25f7aee215815c2e97": "fmap-000004.raw",
"5e165c26f7aee215815c30fa": "fmap-000005.raw",
"5e165c26f7aee215815c3367": "fmap-000006.raw",
}
}, {
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN010/fluence_maps_5940/",
'magnetic_field': [0,0,1.5,'tesla'],
'beam_ids': {
"5e165c05f7aee215815c088c": "fmap-000000.raw",
"5e165c05f7aee215815c0a96": "fmap-000001.raw",
"5e165c06f7aee215815c0d2d": "fmap-000002.raw",
"5e165c06f7aee215815c0f3c": "fmap-000003.raw",
"5e165c06f7aee215815c10ea": "fmap-000004.raw",
"5e165c07f7aee215815c134d": "fmap-000005.raw",
"5e165c07f7aee215815c15ba": "fmap-000006.raw",
}
}
]
for beam in beams:
for bb, (beam_id, fmapfile) in enumerate( beam['beam_ids'].items() ):
fmaps_folder = beam['fmaps_folder']
fmapfile = pjoin(fmaps_folder, fmapfile)
fmap = np.squeeze(load_bin(fmapfile, (1, 40, 40)))
beamdoc = database.db[DBCOLLECTIONS.BEAMPHOTON].find_one({'_id': ObjectId(beam_id)})
print(beam_id, len(beamdoc['beamlets']), np.count_nonzero(fmap))
# create fmap from database
fmap_db = np.zeros((40,40))
for blt in beamdoc['beamlets']:
y, x = blt['position']
fmap_db[y, x] = 1.0
save_bin(pjoin(fmaps_folder, 'fmap_from_db-{:06d}.raw'.format(bb)), fmap_db)
# delete unnecessary beamlets
for blt in beamdoc['beamlets']:
if fmap[y, x] <= 0.0:
print('deleting beamlet: [x={}, y={}]'.format(x, y))
database.subbeam_delete(beam_id=beam_id, subbeam_id=blt['_id'])
# add new beamlets
positions_yx = []
for y in range(fmap.shape[0]):
for x in range(fmap.shape[1]):
if fmap[y, x] > 0 and fmap_db[y, x] <= 0:
print('adding beamlet: [x={}, y={}]'.format(x, y))
positions_yx.append((y, x))
# populate new beamlets with simulation specs
subbeam_ids = database.subbeam_insert(beam_id=beam_id, positions=positions_yx)
for subbeam_id in subbeam_ids:
sim_id = database.simulation_insert(beam_id=beam_id,
subbeam_id=subbeam_id,
vartype=VARTYPE.LOW,
num_particles=1e5,
magnetic_field=beam['magnetic_field'],
num_runs=1)
@menucommand
def insert_sims_by_geometry(self, parser):
"""Insert a set of simulation requests for all beamlets assigned to a geometry"""
parser.add_argument("geom_id", type=str)
self.args = parser.parse_args(namespace=self.args)
geomdoc = database.get_doc(DBCOLLECTIONS.MCGEOM, self.args.geom_id)
if geomdoc is None:
raise RuntimeError("Couldn't find geometry \"{}\"".format(self.args.geom_id))
# simdocs = database.db[DBCOLLECTIONS.SIMULATION].find({
# '$and': [
# {'geom_id': ObjectId(geomdoc['_id'])},
# {'num_particles': {'$lt': 20000}},
# {'date_added': {'$gte': datetime.today()-timedelta(hours=1)}},
# ]
# })
# for simdoc in simdocs:
# database.simulation_delete(simdoc['_id'])
beams = [
{
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN011/fluence_maps/",
'magnetic_field': [0,0,1.5,'tesla'],
'beam_ids': {
"5e15407dc9c073745a01cb7c": "fmap-000000.raw",
"5e15407dc9c073745a01cc75": "fmap-000001.raw",
"5e15407ec9c073745a01cd97": "fmap-000002.raw",
"5e15407ec9c073745a01ced3": "fmap-000003.raw",
"5e15407fc9c073745a01cfe6": "fmap-000004.raw",
"5e15407fc9c073745a01d0f4": "fmap-000005.raw",
"5e154080c9c073745a01d229": "fmap-000006.raw",
}
}, {
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN010/fluence_maps_5400/",
'magnetic_field': [0,0,1.5,'tesla'],
'beam_ids': {
"5e165b29f7aee215815be409": "fmap-000000.raw",
"5e165b29f7aee215815be532": "fmap-000001.raw",
"5e165b2af7aee215815be641": "fmap-000002.raw",
"5e165b2af7aee215815be7bd": "fmap-000003.raw",
"5e165b2af7aee215815be93e": "fmap-000004.raw",
"5e165b2af7aee215815bea38": "fmap-000005.raw",
"5e165b2af7aee215815beb80": "fmap-000006.raw",
}
}, {
"fmaps_folder": "/media/hdd1/dosecalc_debug/ryan_fmaps/HN010/fluence_maps_5940/",
'magnetic_field': [0,0,1.5,'tesla'],
'beam_ids': {
"5e165c05f7aee215815c088c": "fmap-000000.raw",
"5e165c05f7aee215815c0a96": "fmap-000001.raw",
"5e165c06f7aee215815c0d2d": "fmap-000002.raw",
"5e165c06f7aee215815c0f3c": "fmap-000003.raw",
"5e165c06f7aee215815c10ea": "fmap-000004.raw",
"5e165c07f7aee215815c134d": "fmap-000005.raw",
"5e165c07f7aee215815c15ba": "fmap-000006.raw",
}
}
]
for beam in beams:
for beam_id in beam['beam_ids'].keys():
# beamdocs = database.db[DBCOLLECTIONS.BEAMPHOTON].find({'geom_id': ObjectId(self.args.geom_id)})
beamdoc = database.get_doc(DBCOLLECTIONS.BEAMPHOTON, beam_id)
for subbeam in beamdoc['beamlets']:
for num_particles in [500, 1000, 2000, 5000]:
database.simulation_insert(beam_id=beamdoc['_id'],
subbeam_id=subbeam['_id'],
vartype=VARTYPE.LOW if num_particles>=1e5 else VARTYPE.HIGH,
num_particles=num_particles,
magnetic_field=[0,0,1.5,'tesla'],
num_runs=1)
@menucommand
def relocate_density_files(self, parser):
"""iterate through all beamlets and relocate density file to mcgeometry folder where it can be shared
to save on disk space """
for geomdoc in database.db[DBCOLLECTIONS.MCGEOM].find():
geom_id = geomdoc['_id']
densfile_path = pjoin(database.build_datapath_geom(geom_id), 'InputDensity.bin')
print("Shared density path is \"{}\"".format(densfile_path))
densfile_exists = os.path.isfile(densfile_path)
for simdoc in database.db[DBCOLLECTIONS.SIMULATION].find({'geom_id': geom_id}):
for sampledoc in simdoc['samples']:
oldfile = database.dbabspath(sampledoc['densfile'])
if not densfile_exists:
# copy this density file to shared location
try: shutil.copy2(oldfile, densfile_path)
except Exception as e: print(e)
densfile_exists = True
print('copied file from "{} to "{}'.format(oldfile, densfile_path))
# redirect path to shared file (convert to relative path if necessary)
newfile_rel = database.dbrelpath(densfile_path)
if sampledoc['densfile'] != newfile_rel:
res = database.db[DBCOLLECTIONS.SIMULATION].update_many(
filter={
'_id': simdoc['_id'],
'samples._id': sampledoc['_id'],
},
update={
'$set': {'samples.$.densfile': newfile_rel}
}
)
print('updated sample "{}" densfile path to "{}"'.format(sampledoc['_id'], newfile_rel))
# delete redundant file (don't delete relocated/shared file)
if oldfile != densfile_path:
try: os.remove(oldfile)
except: print('failed to delete file: \"{}\"'.format(oldfile))
print('deleted redundant densfile "{}"'.format(oldfile))
# delete additional large/wasteful files
for fname in ('run_log.txt', ):
filepath = pjoin(database.build_datapath_simulation(simdoc['_id']), fname)
try: os.remove(filepath)
except Exception as e:
print('failed to delete file: \"{}\"'.format(filepath))
print('deleted unnecessary file "{}"'.format(filepath))
# TODO
@menucommand
def remove_duplicate_beams(self, parser):
raise NotImplementedError()
repmap = namedtuple("repmap", ("old", "new"))
replace_map = [
repmap("<KEY>", "<KEY>"),
repmap("5dd5c9974b45cc99f1aa834b", "<KEY>"),
repmap("<KEY>", "<KEY>"),
repmap("5dd5c9984b45cc99f1aa85a9", "<KEY>"),
repmap("<KEY>", "<KEY>"),
repmap("5dd5c9984b45cc99f1aa87ca", "5dd5ca4a4b45cc99f1aab6e1"),
repmap("5dd5c9994b45cc99f1aa88ff", "<KEY>"),
]
for rep in replace_map:
# get old beamdoc
# for each beamlet in oldbeamdoc
# match to correct beamlet in newbeamdoc (by position)
# insert simulation objects into newbeamdoc array, and change beam_id and subbeam_id for these simulations
# move simulation data from oldbeamdoc to newbeamdoc folder (optional)
# delete old beams docs
pass
@menucommand
def insert_new_training_data(self, parser):
"""Added on 12jan2020 to create new geometry for all images/geoms designated for training/validation
Since original geometry had issues with flipped z-axis and swapped beamlet position x and y indices"""
# for all HN data associated with train/validate, add new geometry objects and re-generate random assortment
# of beams, beamlets, and simulations associated with those new geometries
image_uids = []
for imdoc in database.db[DBCOLLECTIONS.IMAGES].find({'doi': {'$not': {"$in": ['HN010', 'HN011']}}}):
print(imdoc['doi'], imdoc['_id'])
image_uids.append(imdoc['_id'])
print()
gfd = open('corrupt_geoms.txt', 'w')
for image_id in image_uids:
print("Image: {}".format(image_id))
# cleanup half-completed geoms
for geomdoc in database.db[DBCOLLECTIONS.MCGEOM].find({
'image_id': image_id,
'date_added': {'$gte': datetime.today()-timedelta(hours=2)},
}):
print('deleting geom: "{}"'.format(geomdoc['_id']))
database.geometry_delete(geomdoc['_id'])
# add new geoms
for oldgeomdoc in list(database.db[DBCOLLECTIONS.MCGEOM].find({'image_id': image_id})):
try:
nbeams = next(database.db[DBCOLLECTIONS.BEAMPHOTON].aggregate([
{'$match': {'geom_id': oldgeomdoc['_id']}},
{'$count': 'nbeams'}
]))['nbeams']
if nbeams is None or nbeams<=0:
raise StopIteration
except StopIteration as e:
print("Geometry \"{}\" is empty, deletion is possible but not performed now".format(oldgeomdoc['_id']))
# database.geometry_delete(oldgeomdoc['_id'])
continue
geom_id = database.geometry_insert(image_id=image_id,
**oldgeomdoc['coordsys'],
)
geomdoc = database.get_doc(DBCOLLECTIONS.MCGEOM, geom_id)
database.db[DBCOLLECTIONS.MCGEOM].update_one(
filter={'_id': oldgeomdoc['_id']},
update={"$set": {'procstatus.message': 'Corrupt: flipped sim mcgeom'}}
)
gfd.write('{!s}\n'.format(oldgeomdoc['_id']))
# create new beams
for beamdoc in database.db[DBCOLLECTIONS.BEAMPHOTON].find(
{'geom_id': oldgeomdoc['_id']}
):
if beamdoc is not None:
structure_id = beamdoc['structure_id']
print(" Structure id: {}".format(structure_id))
break
assert structure_id is not None
structuredoc = next((x for x in database.db[DBCOLLECTIONS.IMAGES].find_one({'_id': image_id})['structures'] if str(x['_id'])==str(structure_id)))
actual_isocenter = structuredoc['centroid']
iso_shift = (10, 10, 20) # allowable shift from centroid [units: mm]
for angle in
|
np.random.uniform(0, 2*math.pi, size=40)
|
numpy.random.uniform
|
import numpy as np
import matplotlib.pyplot as plt
import os
from utils_examples import generate_synthetic_measure
path = os.getcwd() + "/output/"
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.isdir(path + "/paper/"):
os.mkdir(path + "/paper/")
if not os.path.isdir(path + "/cvrate/"):
os.mkdir(path + "/cvrate/")
rc = {"pdf.fonttype": 42, 'text.usetex': True, 'text.latex.preview': True,
'text.latex.preamble': [r'\usepackage{amsmath}',
r'\usepackage{amssymb}']}
plt.rcParams.update(rc)
def load_wot_data():
import wot
import pandas as pd
gene_set_scores = pd.read_csv('data/gene_set_scores.csv', index_col=0)
proliferation = gene_set_scores['Cell.cycle']
apoptosis = gene_set_scores['Apoptosis']
# apply logistic function to transform to birth rate and death rate
def logistic(x, L, k, x0=0):
f = L / (1 +
|
np.exp(-k * (x - x0))
|
numpy.exp
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 8 22:10:13 2019
@author: <NAME>
"""
import numpy as np
from .grid_configuration import config
import torch.utils.data
import torch
import pandas as pd
from scipy import signal
# hyper parameters
use_forcast_weather = 1 # whether to use the forecast data, 1 means True, 0 means False
delta_weather = 0
use_filter = 1 # 对获得的Dimensionless trend进行低通滤波
use_ratio = 1 # dimensionless ratio
use_mean_ratio = 1
use_different_mean_ratio = 0 # 各个区用不同的ratio (this indicates that each district only uses its own dimensionless trend)
use_CV_ratio = 1 # 使用交叉检验中的ratio (this indicates that different groups have different dimensionless trends, and the districts within each group use the same dimensionless trend)
use_weather_error_test = 0 # 此时只有test有误差
weather_error_test = 0.0
use_weather_error_train = 1 # 此时只有train有误差, 有利于提升鲁棒性
weather_error_train = 0.05
def ave_ratio (data_origin, use_filter): # the default value of the dimensionless trend. When more accurate expert experience is available, this function can be replaced by the dimensionless trend generated by the expert system.
# calculate the average ratio of all the districts
# inputs: the original data of all districts
# outputs: mean_ratio_all is a (num_districts*num_data) matrix. it is the mean_ratio of all districts
# mean_ratio_all_ave is an array with size of (num_data,). it is the average mean_ratio over all districts
mean_ratio_all = None
for i in range (14):
load = data_origin[i]
load_raw_array = load.iloc[:, 1] # 32616个ratio
input_load = np.array(load_raw_array)
data_num = np.shape(input_load)[0]
week_num = int(data_num/168) # calculate the number of weeks
# reshape loads to (num of hours in one week) * (num of weeks)
delet_ID = np.arange(week_num*168, data_num)
input_load_del = np.delete( input_load, delet_ID, 0)# 产生整数周的结果
input_load_week = input_load_del.reshape(168, week_num) # 168(num of hours in one week) * num of weeks
# calculate the average ratio in one week
input_load_week_mean = np.mean(input_load_week, axis=1)
print('original:',np.mean(input_load_week_mean))
#print('original:',np.max(input_load_week_mean)-np.min(input_load_week_mean))
if use_filter == True:
# 低通滤波
b, a = signal.butter(8, 0.2, 'lowpass')
filter_input_load_week_mean = signal.filtfilt(b, a, input_load_week_mean)
# 放缩到过滤前ratio的尺度。因为过滤会使得ratio的尺度降低。
filter_input_load_week_mean = (filter_input_load_week_mean-np.min(filter_input_load_week_mean)) / (np.max(filter_input_load_week_mean)-np.min(filter_input_load_week_mean))
input_load_week_mean = filter_input_load_week_mean * (np.max(input_load_week_mean)-np.min(input_load_week_mean)) + np.min(input_load_week_mean)
print('filtered:',np.mean(input_load_week_mean))
# generate the average ratio for the length of data_num
mean_ratio = None
for i in range (week_num+1):
if mean_ratio is None:
mean_ratio = input_load_week_mean
else:
mean_ratio = np.hstack((mean_ratio, input_load_week_mean))
delet_ID = np.arange(data_num, np.shape(mean_ratio)[0])
mean_ratio = np.delete( mean_ratio, delet_ID, 0).reshape(1,-1)
# save the mean_ratio of all districts
if mean_ratio_all is None: # mean_ratio_all is the mean_ratio of all the districts
mean_ratio_all = mean_ratio
else:
mean_ratio_all = np.vstack((mean_ratio_all, mean_ratio))
mean_ratio_all_ave = np.mean(np.delete(mean_ratio_all,[10,13],0), axis=0) # mean_ratio_all_ave is the average of mean_ratio_all over 14 districts
#np.savetxt('load_results.csv', np.array(mean_ratio_all).T, delimiter=',')
mean_ratio_group1 = (np.sum(np.delete(mean_ratio_all,[10,13],0), axis=0)-mean_ratio_all[4,:]-mean_ratio_all[3,:]-mean_ratio_all[0,:])/9
print('the sum of filtered:',np.mean(np.sum(np.delete(mean_ratio_all,[10,13],0), axis=0)))
mean_ratio_group2 = (np.sum(np.delete(mean_ratio_all,[10,13],0), axis=0)-mean_ratio_all[5,:]-mean_ratio_all[7,:]-mean_ratio_all[2,:])/9
mean_ratio_group3 = (np.sum(np.delete(mean_ratio_all,[10,13],0), axis=0)-mean_ratio_all[11,:]-mean_ratio_all[8,:]-mean_ratio_all[6,:])/9
mean_ratio_group4 = (np.sum(np.delete(mean_ratio_all,[10,13],0), axis=0)-mean_ratio_all[12,:]-mean_ratio_all[1,:]-mean_ratio_all[9,:])/9
mean_ratio_group = np.vstack(( np.vstack(( np.vstack(( mean_ratio_group1,mean_ratio_group2 )),mean_ratio_group3 )),mean_ratio_group4 ))
print('the group of filtered:',np.mean(mean_ratio_group, axis=1))
#np.savetxt('ratio_group.csv', np.array(mean_ratio_group).T, delimiter=',')
return mean_ratio_all, mean_ratio_all_ave, mean_ratio_group # 都是32616长度的。但是在真正预测的时候,对于output,缺少第一天,应该是32592.因此对于预测要删除开头24个
# load data
data_origin = []
data_list = ['CY','HD','FT','SJS','PG','YZ','CP',
'MTG','FS','DX','HR','MY','SY','YQ'] # 缺少net 缺少PG,PG天气数据从2010年6月开始,缺失过多
for i in range (14):
if use_ratio == True:
name = config.data_path + '/data_day_'+ data_list[i] +'.csv'
else:
name = config.data_path + '/real_data_day_'+ data_list[i] +'.csv'
#name = 'E:/Research CYT/grid/enlstm_code/ratio/data/data_day_'+ data_list[i] +'.csv'
script = 'df = pd.read_csv(\'{0}\')'.format(name)
script2 = 'data_origin.append(df)'
exec (script)
exec (script2)
#print(i)
print('shape of df',np.shape(np.array(df)))
# 根据各区数据对load分别归一化,生产load_normal
load_mean = np.zeros((14, 4)) # 3对应的是load+weather+风速
load_mean_save = np.zeros((14, 1))
load_std = np.zeros((14, 4))
load_std_save = np.zeros((14, 1))
load_normal = [] # 对应正则化后的load和weather,维度为x*1,已经拼接成一列了
# generate the mean_ratio_all
if use_mean_ratio == True:
mean_ratio_all, mean_ratio_all_ave, mean_ratio_group = ave_ratio (data_origin, use_filter)
print ('shape of mean_ratio_all is:', np.shape(mean_ratio_all))
print ('shape of mean_ratio_all_ave is:', np.shape(mean_ratio_all_ave))
#np.savetxt('test.csv', mean_ratio_group.T, delimiter=',')
# 生成基本数据 generate the whole dataset and normalize a part of it
# generate the load_normal, include load, weather(tem, rhu, wind)
for i in range (14):
load = data_origin[i]
load_raw_array = load.iloc[:, 1:2]
if use_mean_ratio == True:
if use_different_mean_ratio == True:
load_raw_array = load_raw_array - mean_ratio_all[i,:].reshape(-1,1)
elif use_CV_ratio == True:
load_raw_array = load_raw_array - mean_ratio_group[config.test_set_ID,:].reshape(-1,1)
else:
load_raw_array = load_raw_array - mean_ratio_all_ave.reshape(-1,1)
weather_raw_array = load.iloc[:, 2:5]
# calculate the change of weather
if delta_weather == True:
weather_raw_array =
|
np.array(weather_raw_array)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
<NAME>
Computational Biologist
Target Sciences
GSK
<EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
import dataclasses
from scipy import stats
def get_unique_pcuts(P, max_cuts=1000):
uP = np.unique(P)[::-1]
uP = np.insert(uP, 0, uP[0] + (uP[0]-uP[1])) if uP.size > 1 else np.insert(uP, 0, 1.01*uP[0])
if uP.size > max_cuts:
return uP[np.linspace(0, uP.size-1, max_cuts, dtype='int64')]
else:
return uP
def get_priority_cutoff_metadata(stat_cut, pp_min_frac=0.1, xx_min_frac=0.01):
ap = stat_cut.select('ap',[])[0]
pp_min = int(np.ceil(ap*pp_min_frac))
xx_min = int(np.ceil(ap*xx_min_frac))
is_qualified_cutoff = np.logical_and((stat_cut.matrix[np.in1d(stat_cut.rowlabels, ['tp', 'fp', 'tn', 'fn']),:] >= xx_min).all(0).reshape(-1),
(stat_cut.matrix[stat_cut.rowlabels=='pp',:] >= pp_min).reshape(-1))
if (~is_qualified_cutoff).all():
is_qualified_cutoff[:] = True # if no qualified cutoffs, resort to all cutoffs
mcc = stat_cut.select('mcc',[])
ppv = stat_cut.select('ppv',[])
mcc_max = mcc[is_qualified_cutoff].max()
ppv_max = ppv[is_qualified_cutoff].max()
mcc_idx = np.where(mcc >= mcc_max - 0.01*np.abs(mcc_max))[0][-1]
ppv_idx = np.where(ppv >= ppv_max - 0.01*np.abs(ppv_max))[0][-1]
p50_idx = np.argmin((stat_cut.select('p',[])-0.5)**2)
ppe_idx = np.argmin((stat_cut.select('pp',[])-ap)**2)
stat_cut.columnmeta['mcc_cutoff'] = np.arange(stat_cut.shape[1], dtype='int64') == mcc_idx
stat_cut.columnmeta['ppv_cutoff'] = np.arange(stat_cut.shape[1], dtype='int64') == ppv_idx
stat_cut.columnmeta['p50_cutoff'] = np.arange(stat_cut.shape[1], dtype='int64') == p50_idx
stat_cut.columnmeta['ppe_cutoff'] = np.arange(stat_cut.shape[1], dtype='int64') == ppe_idx
def get_classifier_performance_stats(Y, P, uP=1000, classifier_stats='all', plot_curves=True, get_priority_cutoffs=True, pp_min_frac=0.1, xx_min_frac=0.01):
if type(uP) == int:
uP = get_unique_pcuts(P=P, max_cuts=uP).reshape(-1,1)
elif len(uP.shape) == 1:
uP = uP.reshape(-1,1)
if type(classifier_stats) == str:
classifier_stats = np.array(['p', 'n', 'ap', 'an', 'pp', 'pn', 'tp', 'fp', 'tn', 'fn', 'tpr', 'fpr', 'auroc', 'fnr', 'tnr',
'mcr', 'acc', 'fdr', 'ppv', 'auprc', 'fomr', 'npv', 'plr', 'nlr', 'dor', 'drr', 'darr',
'mrr', 'marr', 'f1s', 'mcc', 'fnlp'], dtype='object')
n = np.float64(Y.size) + 0.2
ap = Y.sum().astype('float64') + 0.1
an = (~Y).sum().astype('float64') + 0.1
pp = (P >= uP).sum(1).astype('float64') + 0.1
pn = (P < uP).sum(1).astype('float64') + 0.1
tp = np.logical_and(P >= uP, Y).sum(1).astype('float64') + 0.05 # if count is 5, then this introduces 1% error
fp = np.logical_and(P >= uP, ~Y).sum(1).astype('float64') + 0.05 # so don't take seriously any cut-off where
tn = np.logical_and(P < uP, ~Y).sum(1).astype('float64') + 0.05 # any count is less than 5
fn =
|
np.logical_and(P < uP, Y)
|
numpy.logical_and
|
# Read 2 3D-coordinates from .trr trajectory files
# input: tpr_filename, the filename of .tpr file in Gromacs
# trr_filename, the filename of .trr file in Gromacs
# select_atoms_filename#, the filename including a command-line
# for keeping trajectory of the selected atoms
# output: coordinates#, xyz position of atoms, (#frame x #atoms x 3)
# unit_cells, box dimension (#frame x (x,y,z))
# Example: coordinates1, coordinates2, unit_cells = read_coord_trr_3d_select2('topol.tpr','traj.trr','b.select','peo.select')
def read_trr_3d_select2(tpr_filename, trr_filename, select_atoms_filename1, select_atoms_filename2, mode):
print("io.read_trr_3d_select2:")
# import
import MDAnalysis
import numpy as np
# check the arg, mode
outmode = np.zeros(3,dtype=bool) # use true and false in python as 1 and 0
if 'pos' in mode:
outmode[0] = True
if 'vel' in mode:
outmode[1] = True
if 'forc' in mode:
outmode[2] = True
if not np.any(outmode): # if all is false
raise ValueError(" wrong arg mode {}".format(mode))
ndata = sum(bool(x) for x in outmode)
print(" output data #sets = {} by your mode setting {} ".format(ndata,mode))
# read a line of select command-line for MDAnalysis
select_command = []
for select_atoms_filename in [select_atoms_filename1, select_atoms_filename2]:
if select_atoms_filename is not None:
try:
open_file = open(select_atoms_filename, 'r')
except IOError:
raise IOError(" problem with opening ",select_atoms_filename)
select_command_temp = open_file.readline().strip()
open_file.close()
print(" select written in {}: {}".format(select_atoms_filename,select_command_temp))
select_command.append(select_command_temp)
else:
raise ValueError(" wrong select atom files {}".format(select_atoms_filename))
# Read trajectory using MDAnalysis
u = MDAnalysis.Universe(tpr_filename,trr_filename)
n_frames = len(u.trajectory)
# obtain a set of atom index
n_atoms = []
atoms = []
for iselect in select_command:
list_atoms = u.select_atoms(iselect).indices
print(" You selected {} atoms for selection {}".format(len(list_atoms),iselect))
if len(list_atoms) == 0:
raise ValueError(" No atom is selected. {} may be wrong in grammer.".format(iselect))
atoms.append(list_atoms)
n_atoms.append(len(list_atoms))
print(" selected total #atoms: {}".format(n_atoms))
# initailize variables
if ndata > 1:
data1 = np.zeros((ndata, n_frames, n_atoms[0], 3))
data2 = np.zeros((ndata, n_frames, n_atoms[1], 3))
else:
data1 = np.zeros((n_frames, n_atoms[0], 3))
data2 = np.zeros((n_frames, n_atoms[1], 3))
unit_cells = np.zeros((n_frames, 6))
# read trajectory
print(" starting reading trajectory...")
i_frame = 0
mod_frame = process_init()
for ts in u.trajectory:
try:
if ndata == 1:
if outmode[0]:
tmp = np.array(ts._pos)
if outmode[1]:
tmp = np.array(ts._velocities)
if outmode[2]:
tmp = np.array(ts._forces)
data1[i_frame, :, :] = tmp[atoms[0]]
data2[i_frame, :, :] = tmp[atoms[1]]
else:
dataset = 0
if outmode[0]:
tmp = np.array(ts._pos)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
data2[dataset, i_frame, :, :] = tmp[atoms[1]]
dataset = dataset + 1
if outmode[1]:
tmp = np.array(ts._velocities)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
data2[dataset, i_frame, :, :] = tmp[atoms[1]]
dataset = dataset + 1
if outmode[2]:
tmp = np.array(ts._forces)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
data2[dataset, i_frame, :, :] = tmp[atoms[1]]
dataset = dataset + 1
if dataset != ndata:
raise ValueError(" weird number of data set reading trajectory. {} {}".format(dataset,ndata))
unit_cells[i_frame, :] = ts._unitcell
except IndexError:
raise ValueError(" There are more coordinates to be read than indicated in the header.")
i_frame += 1
mod_frame = process_print(i_frame,n_frames,mod_frame)
# check consistency; final i_frame should be the same as # frames
if i_frame != n_frames:
print(" actual nframes {} in trajectory != the length claimed in header of trajectory {}".format(i_frame, n_frames))
print(" saving trajectory is problem (due to limit of disk quota). Size of your data will be {} by force".format(i_frame))
print(" # frames will be extracted from trajectory = {} (excludes t=0)".format(i_frame-1))
# box info
if all(unit_cells[0,:] == unit_cells[1,:]):
print(" The system may be in NVT ensemble")
else:
# for gromacs (tpr, trr files)
# unit_cells = [length_x, length_y, length_z, angles, ...]
if 'trr' in trr_filename and 'tpr' in tpr_filename:
if unit_cells[0][0] == unit_cells[1][0] and unit_cells[0][1] == unit_cells[1][1]:
print(" may be in NPAT ensemble")
else:
print(" may be in NPT ensemble")
# for openmm (pdb, dcd files)
# unit_cells = [length_x, alpha angle, length_y, beta angle, theta angle, length_z]
if 'dcd' in trr_filename and 'pdb' in tpr_filename:
if unit_cells[0][0] == unit_cells[1][0] and unit_cells[0][2] == unit_cells[1][2]:
print(" may be in NPAT ensemble")
else:
print(" may be in NPT ensemble")
if ndata == 1:
#return data1[0:i_frame-1], data2[0:i_frame-1], unit_cells[0:i_frame-1]
return data1[1:i_frame], data2[1:i_frame], unit_cells[1:i_frame]
else:
return data1[:,1:i_frame,:,:], data2[:,1:i_frame,:,:], unit_cells[1:i_frame]
# Read 3D-coordinates from .trr trajectory files
# input: tpr_filename, the filename of .tpr file in Gromacs
# trr_filename, the filename of .trr file in Gromacs
# select_atoms_filename, the filename including a command-line
# for keeping trajectory of the selected atoms
# output: coordinates, xyz position of atoms, (#frame x #atoms x 3)
# unit_cells, box dimension (#frame x (x,y,z))
# Example: coordinates, unit_cells = read_coord_trr_3d('topol.tpr','traj.trr','b.select')
def read_trr_3d_select1(tpr_filename, trr_filename, select_atoms_filename=None, mode='pos'):
print("read_trr_3d_select1:")
# import
import MDAnalysis
import numpy as np
# check the arg, mode
outmode = np.zeros(3,dtype=bool) # use true and false in python as 1 and 0
if 'pos' in mode:
outmode[0] = True
if 'vel' in mode:
outmode[1] = True
if 'forc' in mode:
outmode[2] = True
if not np.any(outmode): # if all is false
raise ValueError(" wrong arg mode {}".format(mode))
ndata = sum(bool(x) for x in outmode)
print(" output data #sets = {} by your mode setting {} ".format(ndata,mode))
# read a line of select command-line for MDAnalysis
select_command = []
if select_atoms_filename is not None:
try:
open_file = open(select_atoms_filename, 'r')
except IOError:
raise IOError(" problem with opening ",select_atoms_filename)
select_command_temp = open_file.readline().strip()
open_file.close()
print(" select written in {}: {}".format(select_atoms_filename,select_command_temp))
select_command.append(select_command_temp)
#else:
# raise ValueError(" wrong select atom files {}".format(select_atoms_filename))
# Read trajectory using MDAnalysis
u = MDAnalysis.Universe(tpr_filename,trr_filename)
n_frames = len(u.trajectory)
# obtain a set of atom index
n_atoms = []
atoms = []
if select_atoms_filename is not None:
for iselect in select_command:
list_atoms = u.select_atoms(iselect).indices
if len(list_atoms) == 0:
raise ValueError(" No atom is selected. {} may be wrong in grammer.".format(iselect))
else:
list_atoms = u.select_atoms("all").indices
atoms.append(list_atoms)
n_atoms.append(len(list_atoms))
print(" selected total #atoms: {}".format(n_atoms))
# initailize variables
if ndata > 1:
data1 = np.zeros((ndata, n_frames, n_atoms[0], 3))
else:
data1 = np.zeros((n_frames, n_atoms[0], 3))
unit_cells = np.zeros((n_frames, 6))
# read trajectory
print(" starting reading trajectory...")
i_frame = 0
mod_frame = process_init()
for ts in u.trajectory:
try:
if ndata == 1:
if outmode[0]:
tmp = np.array(ts._pos)
if outmode[1]:
tmp = np.array(ts._velocities)
if outmode[2]:
tmp = np.array(ts._forces)
data1[i_frame, :, :] = tmp[atoms[0]]
else:
dataset = 0
if outmode[0]:
tmp = np.array(ts._pos)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
dataset = dataset + 1
if outmode[1]:
tmp = np.array(ts._velocities)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
dataset = dataset + 1
if outmode[2]:
tmp = np.array(ts._forces)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
dataset = dataset + 1
if dataset != ndata:
raise ValueError(" weird number of data set reading trajectory. {} {}".format(dataset,ndata))
unit_cells[i_frame, :] = ts._unitcell
except IndexError:
raise ValueError(" There are more coordinates to be read than indicated in the header.")
i_frame += 1
mod_frame = process_print(i_frame,n_frames,mod_frame)
# check consistency; final i_frame should be the same as # frames
if i_frame != n_frames:
print(" actual nframes {} in trajectory != the length claimed in header of trajectory {}".format(i_frame, n_frames))
print(" saving trajectory is problem (due to limit of disk quota). Size of your data will be {} by force".format(i_frame))
print("# frames will be extracted from trajectory = {} (excludes t=0)".format(i_frame-1))
# box info
if all(unit_cells[0,:] == unit_cells[1,:]):
print("The system may be in NVT ensemble")
else:
# for gromacs (tpr, trr files)
# unit_cells = [length_x, length_y, length_z, angles, ...]
if 'trr' in trr_filename and 'tpr' in tpr_filename:
if unit_cells[0][0] == unit_cells[1][0] and unit_cells[0][1] == unit_cells[1][1]:
print("may be in NPAT ensemble")
else:
print("may be in NPT ensemble")
# for openmm (pdb, dcd files)
# unit_cells = [length_x, alpha angle, length_y, beta angle, theta angle, length_z]
if 'dcd' in trr_filename and 'pdb' in tpr_filename:
if unit_cells[0][0] == unit_cells[1][0] and unit_cells[0][2] == unit_cells[1][2]:
print("may be in NPAT ensemble")
else:
print("may be in NPT ensemble")
if ndata == 1:
return data1[1:i_frame], unit_cells[1:i_frame] #return data1[0:i_frame-1], data2[0:i_frame-1], unit_cells[0:i_frame-1]
else:
return data1[:,1:i_frame,:,:], unit_cells[1:i_frame] # return data1[0]:pos, data1[1]:force if your mode is "pos force"
# Read 2 3D-coordinates from .trr trajectory files
# input: tpr_filename, the filename of .tpr file in Gromacs
# trr_filename, the filename of .trr file in Gromacs
# select_atoms_filename#, the filename including a command-line
# for keeping trajectory of the selected atoms
# output: coordinates#, xyz position of atoms, (#frame x #atoms x 3)
# unit_cells, box dimension (#frame x (x,y,z))
# Example: coordinates1, coordinates2, unit_cells = read_coord_trr_3d_select2('topol.tpr','traj.trr','b.select','peo.select')
def read_trr_3d_select2(tpr_filename, trr_filename, select_atoms_filename1, select_atoms_filename2, mode):
print("io.read_trr_3d_select2:")
# import
import MDAnalysis
import numpy as np
# check the arg, mode
outmode = np.zeros(3,dtype=bool) # use true and false in python as 1 and 0
if 'pos' in mode:
outmode[0] = True
if 'vel' in mode:
outmode[1] = True
if 'forc' in mode:
outmode[2] = True
if not np.any(outmode): # if all is false
raise ValueError(" wrong arg mode {}".format(mode))
ndata = sum(bool(x) for x in outmode)
print(" output data #sets = {} by your mode setting {} ".format(ndata,mode))
# read a line of select command-line for MDAnalysis
select_command = []
for select_atoms_filename in [select_atoms_filename1, select_atoms_filename2]:
if select_atoms_filename is not None:
try:
open_file = open(select_atoms_filename, 'r')
except IOError:
raise IOError(" problem with opening ",select_atoms_filename)
select_command_temp = open_file.readline().strip()
open_file.close()
print(" select written in {}: {}".format(select_atoms_filename,select_command_temp))
select_command.append(select_command_temp)
else:
raise ValueError(" wrong select atom files {}".format(select_atoms_filename))
# Read trajectory using MDAnalysis
u = MDAnalysis.Universe(tpr_filename,trr_filename)
n_frames = len(u.trajectory)
# obtain a set of atom index
n_atoms = []
atoms = []
for iselect in select_command:
list_atoms = u.select_atoms(iselect).indices
print(" You selected {} atoms for selection {}".format(len(list_atoms),iselect))
if len(list_atoms) == 0:
raise ValueError(" No atom is selected. {} may be wrong in grammer.".format(iselect))
atoms.append(list_atoms)
n_atoms.append(len(list_atoms))
print(" selected total #atoms: {}".format(n_atoms))
# initailize variables
if ndata > 1:
data1 = np.zeros((ndata, n_frames, n_atoms[0], 3))
data2 = np.zeros((ndata, n_frames, n_atoms[1], 3))
else:
data1 = np.zeros((n_frames, n_atoms[0], 3))
data2 = np.zeros((n_frames, n_atoms[1], 3))
unit_cells = np.zeros((n_frames, 6))
# read trajectory
print(" starting reading trajectory...")
i_frame = 0
mod_frame = process_init()
for ts in u.trajectory:
try:
if ndata == 1:
if outmode[0]:
tmp = np.array(ts._pos)
if outmode[1]:
tmp = np.array(ts._velocities)
if outmode[2]:
tmp = np.array(ts._forces)
data1[i_frame, :, :] = tmp[atoms[0]]
data2[i_frame, :, :] = tmp[atoms[1]]
else:
dataset = 0
if outmode[0]:
tmp = np.array(ts._pos)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
data2[dataset, i_frame, :, :] = tmp[atoms[1]]
dataset = dataset + 1
if outmode[1]:
tmp = np.array(ts._velocities)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
data2[dataset, i_frame, :, :] = tmp[atoms[1]]
dataset = dataset + 1
if outmode[2]:
tmp = np.array(ts._forces)
data1[dataset, i_frame, :, :] = tmp[atoms[0]]
data2[dataset, i_frame, :, :] = tmp[atoms[1]]
dataset = dataset + 1
if dataset != ndata:
raise ValueError(" weird number of data set reading trajectory. {} {}".format(dataset,ndata))
unit_cells[i_frame, :] = ts._unitcell
except IndexError:
raise ValueError(" There are more coordinates to be read than indicated in the header.")
i_frame += 1
mod_frame = process_print(i_frame,n_frames,mod_frame)
# check consistency; final i_frame should be the same as # frames
if i_frame != n_frames:
print(" actual nframes {} in trajectory != the length claimed in header of trajectory {}".format(i_frame, n_frames))
print(" saving trajectory is problem (due to limit of disk quota). Size of your data will be {} by force".format(i_frame))
print(" # frames will be extracted from trajectory = {} (excludes t=0)".format(i_frame-1))
# box info
if all(unit_cells[0,:] == unit_cells[1,:]):
print(" The system may be in NVT ensemble")
else:
# for gromacs (tpr, trr files)
# unit_cells = [length_x, length_y, length_z, angles, ...]
if 'trr' in trr_filename and 'tpr' in tpr_filename:
if unit_cells[0][0] == unit_cells[1][0] and unit_cells[0][1] == unit_cells[1][1]:
print(" may be in NPAT ensemble")
else:
print(" may be in NPT ensemble")
# for openmm (pdb, dcd files)
# unit_cells = [length_x, alpha angle, length_y, beta angle, theta angle, length_z]
if 'dcd' in trr_filename and 'pdb' in tpr_filename:
if unit_cells[0][0] == unit_cells[1][0] and unit_cells[0][2] == unit_cells[1][2]:
print(" may be in NPAT ensemble")
else:
print(" may be in NPT ensemble")
if ndata == 1:
#return data1[0:i_frame-1], data2[0:i_frame-1], unit_cells[0:i_frame-1]
return data1[1:i_frame], data2[1:i_frame], unit_cells[1:i_frame]
else:
return data1[:,1:i_frame,:,:], data2[:,1:i_frame,:,:], unit_cells[1:i_frame]
# Read some 3D-coordinates from .trr trajectory files
# input: tpr_filename, the filename of .tpr file in Gromacs
# trr_filename, the filename of .trr file in Gromacs
# select_atoms_filename#, the filename including a command-line
# for keeping trajectory of the selected atoms
# output: coordinates#, xyz position of atoms, (#frame x #atoms x 3)
# unit_cells, box dimension (#frame x (x,y,z))
# Example: coordinates, unit_cells = read_coord_trr_3d_select2('topol.tpr','traj.trr','b.select','peo.select')
def read_trr_3d_selects(tpr_filename, trr_filename, select_filename, mode):
print("io.read_trr_3d_selects: ######## INCOMPLETE coding #### ")
# import
import MDAnalysis
import numpy as np
# check the arg, mode
outmode = np.zeros(3,dtype=bool) # use true and false in python as 1 and 0
if 'pos' in mode:
outmode[0] = True
if 'vel' in mode:
outmode[1] = True
if 'forc' in mode:
outmode[2] = True
if not np.any(outmode): # if all is false
raise ValueError(" wrong arg mode {}".format(mode))
ndata = sum(bool(x) for x in outmode)
print(" output data #sets = {} by your mode setting {} ".format(ndata,mode))
# read a line of select command-line for MDAnalysis
select_command = []
if select_filename is not None:
try:
open_file = open(select_filename, 'r')
except IOError:
raise IOError(" problem with opening ",select_atoms_filename)
for line in oepn_file:
iline = line.strip()
# line is blank or comment line
if not iline or iline.startswith('#') or iline.startswith('@'):
continue
print(" select written in {}: {}".format(select_filename,iline))
select_command.append(iline)
n_select = len(select_command)
open_file.close()
else:
raise ValueError(" wrong select atom files {}".format(select_atoms_filename))
# Read trajectory using MDAnalysis
u = MDAnalysis.Universe(tpr_filename,trr_filename)
n_frames = len(u.trajectory)
# obtain a set of atom index
n_atoms = []
atoms = []
for iselect in select_command:
list_atoms = u.select_atoms(iselect).indices
if len(list_atoms) == 0:
raise ValueError(" No atom is selected. {} may be wrong in grammer.".format(iselect))
atoms.append(list_atoms)
n_atoms.append(len(list_atoms))
print(" selected total #atoms: {}".format(n_atoms))
######################################## followings are not complete
# initailize variables with zeros
if ndata > 1:
data1 = np.zeros((ndata, n_frames, n_atoms[0], 3))
data2 = np.zeros((ndata, n_frames, n_atoms[1], 3))
else:
data1 = np.zeros((n_frames, n_atoms[0], 3))
data2 = np.zeros((n_frames, n_atoms[1], 3))
unit_cells =
|
np.zeros((n_frames, 6))
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EnsembleFlexProbs [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EnsembleFlexProbs&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerEnsembleFP).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
import numpy as np
from numpy import arange, array, ones, diff, abs, log, exp, sqrt, r_
from numpy import sum as npsum, min as npmin, max as npmax
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, bar, legend, xlim, ylim, scatter, ylabel, \
xlabel, title, xticks, yticks
import matplotlib.dates as mdates
plt.style.use('seaborn')
np.seterr(all='ignore')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot, struct_to_dict, date_mtop
from intersect_matlab import intersect
from HistogramFP import HistogramFP
from RollPrices2YieldToMat import RollPrices2YieldToMat
from EffectiveScenarios import EffectiveScenarios
from ConditionalFP import ConditionalFP
from Stats import Stats
from ColorCodedFP import ColorCodedFP
# -
# ## Upload data
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True)
SPX = struct_to_dict(db['SPX'])
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_VIX'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_VIX'), squeeze_me=True)
VIX = struct_to_dict(db['VIX'])
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_SwapCurve'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_SwapCurve'), squeeze_me=True)
DF_Rolling = struct_to_dict(db['DF_Rolling'])
# -
# ## Recover the invariants and the time series of the conditioning variables
# +
# invariants (S&P500 returns)
epsi = diff(log(SPX.Price_close))
# CONDITIONING VARIABLES
# 1) VIX (VIX.value)
# 2) 5years Swap Zero Rate
ZeroRates,_ = RollPrices2YieldToMat(DF_Rolling.TimeToMat, DF_Rolling.Prices)
zr5 = ZeroRates[DF_Rolling.TimeToMat == 5,:]
# merging datasets
date,_,_ = intersect(intersect(SPX.Date[1:], VIX.Date), DF_Rolling.Dates)
_, i_spx,_ = intersect(SPX.Date[1:], date)
_, i_vix,_ = intersect(VIX.Date, date)
_, i_zr,_ = intersect(DF_Rolling.Dates, date)
epsi = epsi[i_spx].reshape(1,-1)
z1 = VIX.value[i_vix].reshape(1,-1)
z2 = zr5[0,i_zr].reshape(1,-1)
t_ = len(date)
# -
# ## Compute the Flexible Probabilities conditioning on each of the two factors
# +
alpha = 0.3
# prior
lam = log(2) / 1080
prior = exp(-lam*abs(arange(t_, 1 + -1, -1))).reshape(1,-1)
prior = prior / npsum(prior)
# flex. probs conditioned on VIX (z1)
VIXcond = namedtuple('conditioner', ['Series', 'TargetValue', 'Leeway'])
VIXcond.Series = z1
VIXcond.TargetValue = np.atleast_2d(z1[0,-1])
VIXcond.Leeway = alpha
p1 = ConditionalFP(VIXcond, prior)
# flex. probs conditioned on the swap rate (z2)
ZRcond = namedtuple('conditioner', ['Series', 'TargetValue', 'Leeway'])
ZRcond.Series = z2
ZRcond.TargetValue = np.atleast_2d(z2[[0],[-1]])
ZRcond.Leeway = alpha
p2 = ConditionalFP(ZRcond, prior)
# -
# ## Compute the respective Effective Number of Scenarios and the diversity indicator
# +
# effective number of scenarios
typ = namedtuple('type','Entropy')
typ.Entropy = 'Exp'
ens1 = EffectiveScenarios(p1, typ)
ens2 = EffectiveScenarios(p2, typ)
# diversity indicator
rho2_12 = npsum(sqrt(p1*p2)) # overlap: Bhattacharyya coefficient
dd12 = sqrt(1 - rho2_12) # Hellinger distance
d1 = dd12 # Diversity
d2 = d1
# -
# ## Weights of the Flexible Probabilities Ensemble Posterior
weights = r_[ens1*d1, ens2*d2]
weights = weights / npsum(weights)
# ## Optimal set of Flex. Probs as log-mixture
opt_p = exp(weights[0]*log(p1) + weights[1]*log(p2))
opt_p = opt_p /
|
npsum(opt_p)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
======
lockin
======
This module contains classes and functions for performing digital lock-in
amplifier data analysis.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy import signal, optimize
import matplotlib as mpl
import matplotlib.pyplot as plt
import h5py
import pandas as pd
import sigutils
from scipy.signal.signaltools import _centered
from kpfm.util import next_fast_len
class LockIn(object):
"""A basic digital lock-in amplifier.
Run an input signal x through a digital lock-in amplifier.
A finite impulse response (FIR) lock-in filter can be provided by
`lock` or `lock2`, or a custom FIR filter can be used by directly
calling `run`. After generating the complex lock-in output, the lock-in
can be phased by running `phase`, or `autophase`.
After phasing, the lock-in output channels are X, the in-phase channel and
Y, the out-of-phase channel.
Parameters
----------
t: array_like
Time array
x: array_like
Input signal array
fs: float
Sampling rate
Example
-------
>>> fs = 1000.0
>>> t = np.arange(1000)/fs
>>> A = 1 - 0.1 * t
>>> f = 80 + 0.1 * t
>>> x = A * np.sin(np.cumsum(f)*2*np.pi/fs)
>>> li = LockIn(t, x, fs)
We process the data with a 20 Hz bandwidth lock-in amplifier filter.
>>> li.lock(bw=20.0)
Response:
f mag dB
0.000 1.000 0.000
10.000 0.996 -0.035
20.000 0.500 -6.022
40.025 0.000 -91.020
80.051 0.000 -113.516
500.000 0.000 -204.987
The lock-in amplifier automatically infers the reference frequency.
The printed response shows the lock-in amplifier gain at different
frequencies. For the output to be valid the gain at the reference frequency
must be very small (-60 dB or smaller).
We phase the lock-in amplifier output, and then have the lock-in variables
available for use.
>>> li.phase()
>>> li('t') # Shortcut for accessing masked version of the signal.
"""
def __init__(self, t, x, fs=None):
self.t = t
self.x = x
if fs is not None:
self.fs = fs
else:
self.fs = 1/np.mean(np.gradient(t))
self.f0_est = freq_from_fft(self.x, self.fs)
@classmethod
def from_x(Cls, x, fs, t0=0):
"""Generate the time array internally."""
t = t0 + np.arange(x.size) / fs
return Cls(t, x, fs)
def __call__(self, key):
"""Shorthand for validly masked section of any data array."""
return getattr(self, key)[self.m]
def __repr__(self):
f0 = getattr(self, 'f0', self.f0_est)
return "LockIn(f0={})".format(f0)
def run(self, f0=None, fir=None):
"""Run the lock-in amplifier at reference frequency ``f0``,
using the finite impulse response filter ``fir``.
"""
if f0 is None:
self.f0 = f0 = self.f0_est
else:
self.f0 = f0
if fir is not None:
self.fir = fir
self.z = z = signal.fftconvolve(self.x * np.exp(-2j*np.pi*f0*self.t),
2*self.fir,
"same")
n_fir = self.fir.size
indices = np.arange(self.t.size)
# Valid region mask
# This is borrowed explicitly from scipy.signal.sigtools.fftconvolve
self.m = m = np.zeros_like(self.t, dtype=bool)
mask_indices = _centered(indices, self.t.size - n_fir + 1)
if n_fir % 2 == 0:
mask_indices += 1
self.m[mask_indices] = True
self.A = abs(self.z)
self.phi = np.angle(self.z)
def lock(self, bw=None, f0=None, bw_ratio=0.5, coeff_ratio=9., coeffs=None,
window='blackman'):
"""Standard, windowed finite impulse response filter."""
t = self.t
fs = self.fs
if f0 is None:
self.f0 = f0 = self.f0_est
else:
self.f0 = f0
if bw is None:
if bw_ratio > 1:
raise ValueError("Bandwidth ratio 'bw_ratio' must be < 1 (bw_ratio={}".format(bw_ratio))
bw = bw_ratio * f0 / (self.fs/2)
else:
bw = bw / (self.fs/2)
if coeffs is None:
coeffs = round(coeff_ratio / bw, 0)
if coeffs > self.x.size:
raise ValueError(
"""No valid output when 'coeffs' > t.size (coeffs: {}, t.size: {}).
Reduce coeffs by increasing bw, bw_ratio, or decreasing coeff_ratio,
or provide more data.""".format(coeffs, t.size))
self.fir = b = signal.firwin(coeffs, bw, window=window)
w, rep = signal.freqz(b, worN=np.pi*np.array([0., bw/2, bw, f0/self.fs, f0/(self.fs/2.), 1.]))
print("Response:")
_print_magnitude_data(w, rep, fs)
self.run(f0=f0)
def lock2(self, f0=None, fp_ratio=0.1, fc_ratio=0.4, coeff_ratio=8,
fp=None, fc=None, coeffs=None, window='blackman',
print_response=True):
t = self.t
fs = self.fs
if f0 is None:
self.f0 = f0 = self.f0_est
else:
self.f0 = f0
if fp is None:
fp = fp_ratio * f0
if fc is None:
fc = fc_ratio * f0
self.fir = b = lock2(f0, fp, fc, fs, coeff_ratio, coeffs, window,
print_response=print_response)
if coeffs > self.x.size:
raise ValueError(
"""No valid output when 'coeffs' > t.size (coeffs: {}, t.size: {}).
Reduce coeffs by increasing bw, bw_ratio, decreasing coeff_ratio,
or provide more data.""".format(coeffs, t.size))
self.run(f0=f0)
def lock_butter(self, N, f3dB, t_exclude=0, f0=None, print_response=True):
"""Butterworth filter the lock-in amplifier output"""
t = self.t
fs = self.fs
nyq = fs / 2.
f3dB = f3dB / nyq
self.iir = ba = signal.iirfilter(N, f3dB, btype='low')
if f0 is None:
self.f0 = f0 = self.f0_est
self.z = z = signal.lfilter(self.iir[0], self.iir[1], self.z)
# TODO: Fix accounting on final / initial point
m = self.m
self.m = self.m & (t >= (t[m][0] + t_exclude)) & (t < (t[m][-1] - t_exclude))
self.A = abs(self.z)
self.phi = np.angle(self.z)
if print_response:
w, rep = signal.freqz(self.iir[0], self.iir[1],
worN=np.pi*np.array([0., f3dB/2, f3dB,
0.5*f0/nyq, f0/nyq, 1.]))
print("Response:")
_print_magnitude_data(w, rep, fs)
def _output_df_X_Y(self):
"""Helper function for outputting frequency shift
and lock-in X, Y channels after phasing."""
self.df = np.gradient(self.dphi) * self.fs / (2*np.pi)
self.Z = np.exp(-1j*self.phi_fit) * self.z
self.X = self.Z.real
self.Y = self.Z.imag
def manual_phase(self, phi0, f0corr=None):
"Manually phase the lock-in output with phase phi0 (in radians)."
self.phi0 = phi0
if f0corr is not None:
self.f0corr = f0corr
delta_f0 = f0corr - self.f0
else:
self.f0corr = self.f0
delta_f0 = 0.0
self.phi_fit = self.t * delta_f0 * 2 * np.pi + self.phi0
self.dphi = np.unwrap(((self.phi - self.phi_fit + np.pi) % (2*np.pi))
- np.pi)
self._output_df_X_Y()
def autophase(self, ti=None, tf=None, unwrap=False, x0=[0., 0.], adjust_f0=True):
t = self.t
m = self.m
z = self.z
if unwrap:
phi = np.unwrap(self.phi)
else:
phi = self.phi
if ti is None and tf is None:
mask = m
elif ti is not None and tf is None:
mask = m & (t >= ti)
elif ti is None and tf is not None:
mask = m & (t < tf)
else:
mask = m & (t >= ti) & (t < tf)
self.mb = mb = auto_phase(t[mask], phi[mask], x0, adjust_f0=adjust_f0)
self.phi0 = mb[-1]
self.phi_fit = np.polyval(mb, t)
self.dphi = np.unwrap((
(self.phi - self.phi_fit + np.pi) % (2*np.pi)) - np.pi)
if adjust_f0:
self.f0corr = self.f0 + mb[0] / (2*np.pi)
else:
self.f0corr = self.f0
self._output_df_X_Y()
def phase(self, ti=None, tf=None, weight=True, adjust_f0=True):
t = self.t
m = self.m
z = self.z
poly_order = int(adjust_f0)
if ti is None and tf is None:
mask = m
elif ti is not None and tf is None:
mask = m & (t >= ti)
elif ti is None and tf is not None:
mask = m & (t < tf)
else:
mask = m & (t >= ti) & (t < tf)
phi = np.unwrap(self.phi[mask])
std = np.std(self.phi[mask])
phi_norm = phi / std
try:
if weight:
A = abs(z[mask]) / np.std(abs(z[mask]))
self.mb = mb = np.polyfit(t[mask], phi_norm, poly_order, w=A) * std
else:
self.mb = mb = np.polyfit(t[mask], phi_norm, poly_order) * std
except TypeError:
print(t)
print(ti)
print(tf)
raise
self.phi_fit = np.polyval(mb, t)
self.dphi = np.unwrap(((self.phi - self.phi_fit + np.pi) % (2*np.pi))
- np.pi)
self.phi0 = mb[-1]
if adjust_f0:
self.f0corr = self.f0 + mb[0] / (2*np.pi)
else:
self.f0corr = self.f0
self._output_df_X_Y()
def decimate(self, factor=None):
if factor is None:
factor = int(self.fs//self.f0)
self.dec_t = self.t[self.m][::factor]
self.dec_phi = self.dphi[self.m][::factor]
self.dec_A = self.A[self.m][::factor]
self.dec_df = self.df[self.m][::factor]
self.dec_f0 = self.f0
self.dec_fs = self.fs/factor
self.dec_z = self.z[self.m][::factor]
def phase_dec(self, ti=None, tf=None, weight=True):
t = self.dec_t
m = np.ones_like(self.dec_z, dtype=bool)
z = self.dec_z
if ti is None and tf is None:
mask = m
elif ti is not None and tf is None:
mask = m & (t >= ti)
elif ti is None and tf is not None:
mask = m & (t < tf)
else:
mask = m & (t >= ti) & (t < tf)
phi = np.unwrap(np.angle(z))
std = np.std(phi[mask])
phi_norm = phi / std
try:
if weight:
A = abs(z[mask]) / np.std(abs(z[mask]))
self.mb = mb = np.polyfit(t[mask], phi_norm[mask], 1, w=A) * std
else:
self.mb = mb = np.polyfit(t[mask], phi_norm[mask], 1) * std
except TypeError:
print(t)
print(ti)
print(tf)
raise
phi_fit = np.polyval(mb, t)
dphi = np.unwrap(((phi - phi_fit + np.pi) % (2*np.pi)) - np.pi)
df = np.gradient(dphi) * self.dec_fs / (2*np.pi)
self.f0_dec_direct = self.f0 + mb[0] / (2*np.pi)
def absolute_phase(self, mask, guess=0.0):
"""Perform a curve fit """
phi = self.phi[mask] + self.t[mask]*2*np.pi*self.f0corr
popt, pcov = curve_fit(lambda phi, phi0:
self.A[mask]*
|
np.cos(phi+phi0)
|
numpy.cos
|
import numpy as np
from base_class import TabularRLModel
from schedules import LinearSchedule, ExponentialSchedule
from gym.spaces import Tuple, Discrete
import cloudpickle as pickle
remove = ['hvalues', 'qvalues', 'policy']
class QTabularRLModel(TabularRLModel):
def __init__(
self,
policy,
env,
gamma=0.99,
learning_rate=1e-2,
buffer_size=None,
exploration_type='linear',
exploration_frac=None,
exploration_ep=250,
exploration_initial_eps=1.,
exploration_final_eps=0.05,
double_q=False,
policy_kwargs=None,
seed=None,
intent=False
):
super(QTabularRLModel, self).__init__(
policy,
env,
gamma,
learning_rate,
buffer_size,
exploration_type,
exploration_frac,
exploration_ep,
exploration_initial_eps,
exploration_final_eps,
double_q,
policy_kwargs,
seed,
intent
)
self._aliases()
def _aliases(self):
self.qvalues = self.policy.qvalues
if self.policy.intent:
self.hvalues = self.policy.hvalues
def learn(self, total_timesteps=None, total_episodes=None, log_interval=100, ckpt_interval=100, ckpt_path=None):
last_100rewards = np.zeros(100)
last_100rewards[:] = np.NaN
if total_timesteps and total_episodes:
raise ValueError("Only one of total_timesteps or total_episodes can be specified")
if ckpt_path is None:
print('Checkpoint path is not provided, no intermediate models will be saved')
loop_type = 'episode' if total_episodes else 'timesteps'
loop_var = total_timesteps if total_timesteps is not None else total_episodes
# if self.exploration_frac is None:
# self.exploration = LinearSchedule(frac=self.exploration_ep,
# initial=self.exploration_initial_eps,
# final=self.exploration_final_eps)
# else:
# self.exploration = LinearSchedule(frac=self.exploration_frac * loop_var,
# initial=self.exploration_initial_eps,
# final=self.exploration_final_eps)
if self.exploration_type == 'linear':
self.exploration = LinearSchedule(
frac=self.exploration_frac * loop_var,
initial=self.exploration_initial_eps,
final=self.exploration_final_eps)
elif self.exploration_type == 'exponential':
self.exploration = ExponentialSchedule(
frac=self.exploration_frac,
initial=self.exploration_initial_eps,
final=self.exploration_final_eps)
train = True
done = False
step = 0
ep_reward = 0
obs = self.env.reset()
while train:
if loop_type == 'episode':
update_eps = self.exploration.value(self.ep_done)
if loop_type == 'timesteps':
update_eps = self.exploration.value(self.elapsed_steps)
if np.random.random_sample() > update_eps:
action, value = self.policy.predict(obs, deterministic=True)
else:
action, value = self.policy.predict(obs, deterministic=False)
next_obs, reward, done, info = self.env.step(action)
# print(step, next_obs, self.qvalues[next_obs])
# argmax_a = np.argmax(self.qvalues[next_obs])
# argmax_a, _ = self.policy.predict(obs, deterministic=True)
argmax_a = np.argmax(self.qvalues[next_obs])
if isinstance(self.observation_space, Tuple):
# print(obs, action)
expected_reward = reward + self.gamma*self.qvalues[next_obs + (argmax_a,)]*(1-int(done))-self.qvalues[obs + (action,)]
self.qvalues[obs + (action,)] += self.learning_rate * expected_reward
if self.policy.intent:
intent_update = np.zeros(self.qvalues.shape)
intent_update[obs + (action,)] += 1
expected_intent = intent_update + self.gamma * self.hvalues[next_obs + (argmax_a,)] * (1-int(done)) - self.hvalues[obs + (action,)]
self.hvalues[obs + (action,)] = self.hvalues[obs + (action,)] + self.learning_rate * expected_intent
if isinstance(self.observation_space, Discrete):
expected_reward = reward + self.gamma*np.max(self.qvalues[next_obs])*(1-int(done))-self.qvalues[obs, action]
self.qvalues[obs, action] += self.learning_rate * expected_reward
if self.policy.intent:
intent_update =
|
np.zeros(self.qvalues.shape)
|
numpy.zeros
|
"""
This is the Signal Processor that extracts data from images from the fluoro-sequencer microscope.
Nomenclature
Field
One position of the X/Y stage
Channel
One wavelength of measured light
Cycle
One chemical cycle (Pre, Mock or Edman)
Anomaly
An area of an image that has a problem (dust, etc)
Raw image
Unmodified images
Balance image
A raw image scaled to compensate for uneven illumination
Aligned field stack
The stage is not perfect and may not return to exactly the same position
each cycle, a computational alignment correction finds the optimal X/Y translation.
Border
The extra dead space that is added to an aligned field stack to ensure
that all images are visible.
Composite image
When 1+ of the channels/cycles for a field are stacked
Background removed image
When an image has the background subtracted. This may
result in negative values so be careful when displaying.
Fiducial images
Any image (may just be a transform of raw data) that is intended only
to enhance the alignment algorithm.
Peak
A spot found that, presumably, is generated by a single molecule.
Radmat (aka "Radiometry Matrix")
A matrix such that each row is a peak and each column is a measurement of brightness
(using area-under-the-curve of the peak) for each channel/cycle.
Sometimes stored flatten (n_cycles * n_channel)
cy_ims
A set of images through all cycles for one field/channel.
chcy_ims
A set of images for all channel/cycles for one field.
roi
A Region Of Interest
"""
import numpy as np
import scipy
import cv2
from scipy.stats import norm
import math
import pandas as pd
from scipy import ndimage
from munch import Munch
from plaster.tools.utils.utils import safe_list_get, is_power_of_2
from plaster.tools.zap import zap
from plaster.tools.image import imops
from plaster.tools.image.coord import XY, YX, WH, HW, ROI, Rect
from plaster.tools.schema import check
from plaster.run.sigproc_v1.sigproc_v1_result import SigprocV1Result
def _flatten(s):
return np.reshape(s, (s.shape[0], s.shape[1] * s.shape[2]))
def _quality(im):
"""
Measure the quality of an image by spatial low-pass filter.
High quality images are one where there is very little low-frequency
(but above DC) bands.
"""
a = np.copy(im)
a -= np.mean(a)
power = np.abs(np.fft.fftshift(np.fft.fft2(a)))
power[power == 0] = 1
cen = YX(power.shape) / 2
dim_half = 3
dim = HW(dim_half * 2 + 1, dim_half * 2 + 1)
roi = ROI(cen, dim, center=True)
im = power[roi]
eigen = imops.eigen_moments(im)
score = power.sum() / np.sqrt(eigen.sum())
return score
def _find_anomalies(cy_ims, iqr_bounds):
"""
Given a cy stack of images for a field, find the anomalies.
Arguments:
cy_ims: array (n_cycles, height, width)
iqr_bounds: The inter-quartile-range to use on the distribution to
things outside of the normal range.
Returns:
List of bad rects for each cycle (List[n_cycles] of lists)
"""
import skimage # Defer slow imports
import skimage.transform # Defer slow imports
from scipy.stats import iqr # Defer slow imports
def slice_im_into_squares(im, sub_dim):
# Crazy numpy kung-foo. By reshaping the image into a four-dimensional
# array I can then use the np.mean on the inner dimensions.
r, c = im.shape
assert r == c and r % sub_dim == 0
im = im.reshape(r // sub_dim, sub_dim, r // sub_dim, sub_dim)
# At this point, im is now 4-dimensional like: 256, 2, 256, 2
# But we want the 2, 2 next to each other for simplicity so swap the inner axes
im = im.swapaxes(1, 2) # Now its shape is: 256, 256, 2, 2.
return im
def dens_squares(frame, subdim):
assert frame.ndim == 2
squares = slice_im_into_squares(frame, subdim)
# squares is like: 256, 256, 2, 2. So we need the mean of the last two axes
return np.mean(squares, axis=(2, 3))
assert cy_ims.ndim == 3 and cy_ims.shape[1] == cy_ims.shape[2]
subdim = 4
n_cycles = cy_ims.shape[0]
dens_per_cycle = np.array(
[dens_squares(cy_ims[cy], subdim) for cy in range(n_cycles)]
)
# Use IQR to set the threshold of what will be considered an anomaly across all cycles
den_threshold = np.median(dens_per_cycle) + scipy.stats.iqr(
dens_per_cycle, rng=(100 - iqr_bounds, iqr_bounds)
)
# FIND the bad rects for each cycle
bad_rects_by_cycle = []
bad_by_cycle = []
mask_by_cycle = []
for cy in range(n_cycles):
bad_rects = []
im = cy_ims[cy]
dens = dens_per_cycle[cy]
bad_mask = np.zeros_like(dens)
bad_mask[dens > den_threshold] = 1
# EXPAND the bad areas by erosion and dilate.
# Erosion gets rid of the single-pixel hits and dilation expands the bad areas
kernel = np.ones((3, 3), np.uint8)
mask = cv2.erode(bad_mask, kernel, iterations=1)
mask = cv2.dilate(mask, kernel, iterations=3)
scale = im.shape[0] // mask.shape[0]
full_size_mask = skimage.transform.rescale(
mask, scale=scale, multichannel=False, mode="constant", anti_aliasing=False
).astype(bool)
# FIND rect contours of bad areas
contours, hierarchy = cv2.findContours(
full_size_mask.astype("uint8"), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
for cnt in contours:
bad_rects += [cv2.boundingRect(cnt)]
bad_rects_by_cycle += [bad_rects]
bad_by_cycle += [bad_mask]
mask_by_cycle += [mask]
return bad_rects_by_cycle, dens_per_cycle, bad_by_cycle, mask_by_cycle
def _mask_anomalies(cy_ims, bad_rects_by_cycle):
"""
Given a cycle stack of images and the list of bad rects,
fill all these rects with background noise so that the aligner
won't be confused by those anomalies.
Arguments:
cy_ims: array (n_cycles, height, width)
bad_rects_by_cycle: List of bad rects for each cycle
Returns:
A copy of cy_ims with the bad rects masked with noise
"""
assert cy_ims.ndim == 3 and cy_ims.shape[1] == cy_ims.shape[2]
n_cycles, _, _ = cy_ims.shape
masked_ims = np.zeros_like(cy_ims)
for cy in range(n_cycles):
src_im = cy_ims[cy]
bad_rects = bad_rects_by_cycle[cy]
# MAKE a mask_im with 0 inside bad rects, 1 otherwise
mask_im = np.ones_like(src_im)
for rect in bad_rects:
imops.fill(
mask_im, loc=XY(rect[0], rect[1]), dim=WH(rect[2], rect[3]), val=0
)
# FIND the characteristics of a normal distribution that fits the
# data that is not masked out (that is, we don't want the anomalies
# in this distribution). If src_im is entirely masked, mean=std=0.
# TASK: This could be accelerated by subsampling.
mean = std = 0
if np.any(mask_im):
mean, std = norm.fit(src_im[mask_im > 0])
bg_noise = norm.rvs(loc=mean, scale=std, size=src_im.shape, random_state=None)
masked_ims[cy] = np.where(mask_im < 1, bg_noise, src_im)
return masked_ims
def _roi_from_edges(b, t, l, r):
return ROI(loc=YX(b, l), dim=HW(t - b, r - l))
def _hat_masks(hat_rad, brim_rad):
"""
Returns hat and brim boolean masks. brim_rad is from the center, _not_ in addition to hat_rad
"""
mea = 2 * brim_rad + 1
hat = imops.generate_circle_mask(hat_rad, mea)
brim = imops.generate_circle_mask(brim_rad, mea)
brim = brim & ~hat
return hat, brim
def _remove_nans_from_radiometry(signal, noise, localbg):
assert signal.shape == noise.shape
flat_sig = _flatten(signal)
flat_noi = _flatten(noise)
flat_lbg = _flatten(localbg)
nan_rows = np.any(
np.isnan(flat_sig) | np.isnan(flat_noi) | np.isnan(flat_lbg), axis=1
)
rows = np.argwhere(nan_rows)
signal[rows, :, :] = 0.0
noise[rows, :, :] = 0.0
localbg[rows, :, :] = 0.0
def _radiometry(im, loc, dim, bg_bias, localbg_mask):
"""
Arguments:
im: 2D image (some field, channel, cycle)
loc: the (y,x) coordinates of the peak
dim: the integer dimensions of the mask
bg_bias: the background bias to remove
Returns:
signal, noise (or NAN if something goes wrong). Both are always non-negative
localbg: The median of a surrounding aria
After a careful analysis considering three types of radiometers the Kernel Method was chosen
The others considered were:
* Hat method where the signal is the sum(hat_pixel - brim_median).
* This is fast but scores poorly compared to Kernel Method.
And, it can not estimate noise.
* Fitted method where a 2D Gaussian is fit to the data.
* This method is very expensive, fails to converge in various edge cases,
and doesn't score any better than the Kernel Method.
* Kernel method (chosen) where a center-of-mass calculation is used
to generate a unit-area Gaussian Kernel which is then used to
weigh the peak data. Noise can be estimated by squaring that mask and
then computing residuals.
"""
assert localbg_mask.shape[0] == dim[0] and localbg_mask.shape[1] == dim[1]
roi = ROI(loc, dim, center=True)
nans = (np.nan, np.nan, np.nan)
# REJECT if too near edges
if (
roi[0].start < 0
or roi[1].start < 0
or roi[0].stop >= im.shape[0]
or roi[1].stop >= im.shape[1]
):
return nans
peak_im = im[roi[0], roi[1]]
localbg = np.median(peak_im[localbg_mask])
# CENTER by finding the Center Of Mass (COM)
positive = np.where(peak_im > 0, peak_im, 0.0)
if positive.sum() == 0.0:
# Avoid COM warning from library by testing for all zeros
return nans
com = ndimage.measurements.center_of_mass(positive)
offset_y = com[0] - int(peak_im.shape[0] / 2)
offset_x = com[1] - int(peak_im.shape[1] / 2)
if not (-2 <= offset_y <= 2 and -2 <= offset_x <= 2):
# Data is so poor that the center-of-mass outside of reasonable bounds
return nans
# REMOVE the background
peak_im = (peak_im - bg_bias).clip(min=0)
kernel = imops.generate_gauss_kernel(1.0, offset_x, offset_y, mea=peak_im.shape[0])
kernel_squared = kernel * kernel
kernel_squared_sum = kernel_squared.sum()
# WEIGH the data with the kernel and then normalize by the kernel_squared_sum to estimate signal
weighted = kernel * peak_im
signal = weighted.sum() / kernel_squared_sum
# COMPUTE the noise by examining the residuals
residuals = peak_im - signal * kernel
var_residuals = np.var(residuals)
noise = np.sqrt(var_residuals / kernel_squared_sum)
assert noise >= 0.0
return max(0, signal), noise, localbg
def _step_1_measure_quality(raw_chcy_ims, sigproc_params):
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
quality = np.zeros((n_inchannels, n_cycles))
for cy in range(n_cycles):
for ch in range(n_inchannels):
quality[ch, cy] = _quality(raw_chcy_ims[ch, cy])
return quality
def _step_2a_mask_anomalies(raw_chcy_ims, sigproc_params):
"""
Find anomalies and fill them with background noise so that
they won't fool the aligner.
Arguments:
raw_chcy_ims: (n_channel, n_cycles, dim, dim)
sigproc_params: SigprocParams
Returns:
raw_mask_rects: a list of bad rect lists (channel, cycle)
i.e. for each channel there is a list inside of which is a list per cycle
inside of which is a list of bad rects
anomaly_removed_chcy_ims: Copies of the raw_chcy_ims with the bad areas
filled in with background-like noise so that the aligner won't get confused.
"""
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
anom_iqr_cutoff = sigproc_params.anomaly_iqr_cutoff
raw_mask_rects = [None] * n_inchannels
dst_chcy_ims = np.copy(raw_chcy_ims)
if anom_iqr_cutoff is not None:
for ch in range(n_inchannels):
raw_mask_rects[ch], _, _, _ = _find_anomalies(
raw_chcy_ims[ch, :], anom_iqr_cutoff
)
dst_chcy_ims[ch, :] = _mask_anomalies(
raw_chcy_ims[ch, :], raw_mask_rects[ch]
)
return raw_mask_rects, dst_chcy_ims
def _step_2b_find_bg_median(chcy_ims, sigproc_params):
"""
Computes the median of the image on each channel/cycle
Returns:
medians_by_ch_cy: A matrix of median values
"""
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
medians_by_ch_cy = np.zeros((n_inchannels, n_cycles))
for cy in range(n_cycles):
for ch in range(n_inchannels):
medians_by_ch_cy[ch, cy] = np.median(chcy_ims[ch, cy])
return medians_by_ch_cy
def _step_2c_composite_channels(chcy_ims, medians_by_ch_cy, sigproc_params):
"""
Merges specified channels for each cycle after background subtraction.
The channel list is sometimes partial because there are some
experiments where data was recorded in a channel that is bad.
Returns:
composite cy_ims (channels merged) after background subtraction
"""
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
dst_cy_ims = np.zeros((n_cycles, dim, dim))
for cy in range(n_cycles):
for inch in range(n_inchannels):
if inch in sigproc_params.channel_indices_for_alignment:
imops.accum_inplace(
dst_cy_ims[cy],
(chcy_ims[inch, cy] - medians_by_ch_cy[inch, cy]).clip(min=0),
)
return dst_cy_ims
def _step_2_align(raw_chcy_ims, sigproc_params):
"""
Each cycle the stage moves, but it is not perfectly accurate when it returns
to the same field.
The stage does _not_ move between channels, therefore
the channels for each field over each cycle can be merged to improve alignment.
Returns:
A DataFrame of all the results, most importantly the shift_y, shift_x
needed to each cycle to align the images.
ch_merged_cy_ims
"""
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
raw_mask_rects, anomaly_removed_ims = _step_2a_mask_anomalies(
raw_chcy_ims, sigproc_params
)
medians_by_ch_cy = _step_2b_find_bg_median(raw_chcy_ims, sigproc_params)
ch_merged_cy_ims = _step_2c_composite_channels(
anomaly_removed_ims, medians_by_ch_cy, sigproc_params
)
# GENERATE fiducial_ims
kernel = imops.generate_gauss_kernel(1.0)
kernel = kernel - kernel.mean() # Eliminate DC bias
fiducial_ims = np.array(
[imops.convolve(im.clip(min=0), kernel) for im in ch_merged_cy_ims]
)
alignment_offsets = imops.align(fiducial_ims)
ch_out_to_in = sigproc_params.output_channel_to_input_channel
field_df = pd.DataFrame(
[
dict(
cycle_i=cy,
shift_y=off[0],
shift_x=off[1],
channel_i=outch,
bg_median=medians_by_ch_cy[ch_out_to_in(outch), cy],
n_mask_rects=len(raw_mask_rects[ch_out_to_in(outch)][cy]),
mask_area=sum(
[
rect[2] * rect[3]
for rect in raw_mask_rects[ch_out_to_in(outch)][cy]
]
),
)
for outch in range(n_outchannels)
for cy, off in zip(range(n_cycles), alignment_offsets)
]
)
return field_df, ch_merged_cy_ims, raw_mask_rects
def _step_3_composite_aligned_images(
field_df, ch_merged_cy_ims, raw_chcy_ims, sigproc_params
):
"""
Generate aligned images and composites
"""
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
# Note offsets are the same for each channel, and we only want one set of
# offsets because we're aligning channel-merged images.
offsets = [
XY(row.shift_x, row.shift_y)
for row in field_df[field_df.channel_i == 0]
.set_index("cycle_i")
.sort_index()[["shift_y", "shift_x"]]
.itertuples()
]
# Needs to be a list of Coords
median_by_ch_cy = (
field_df.set_index(["channel_i", "cycle_i"])
.sort_index()
.bg_median.values.reshape((n_outchannels, n_cycles))
)
chcy_composite_im, border_size = imops.composite(
ch_merged_cy_ims,
offsets,
start_accum=sigproc_params.peak_find_start,
limit_accum=sigproc_params.peak_find_n_cycles,
)
# GENERATE aligned images in the new coordinate system
aligned_dim = HW(chcy_composite_im.shape)
aligned_ims = np.zeros((n_outchannels, n_cycles, aligned_dim.h, aligned_dim.w,))
aligned_raw_chcy_ims = np.zeros_like(aligned_ims)
border = YX(border_size, border_size)
for outch in range(n_outchannels):
inch = sigproc_params.output_channel_to_input_channel(outch)
for cy, offset in zip(range(n_cycles), offsets):
imops.accum_inplace(
aligned_raw_chcy_ims[outch, cy],
src=raw_chcy_ims[inch, cy],
loc=border - offset,
)
imops.accum_inplace(
aligned_ims[outch, cy],
src=(raw_chcy_ims[inch, cy] - median_by_ch_cy[outch, cy]).clip(min=0),
loc=border - offset,
)
# BLACK out the borders by clipping in only pixels that are in every cycle
l = border_size - field_df.shift_x.min()
r = aligned_dim.w - border_size - field_df.shift_x.max()
b = border_size - field_df.shift_y.min()
t = aligned_dim.h - border_size - field_df.shift_y.max()
roi = _roi_from_edges(b, t, l, r)
aligned_roi_rect = Rect(b, t, l, r)
aligned_composite_chcy_im = np.zeros(aligned_dim)
aligned_composite_chcy_im[roi] = chcy_composite_im[roi]
med =
|
np.median(chcy_composite_im[roi])
|
numpy.median
|
import pandas as pd
import numpy as np
import os
from .constants import random_index
from importlib import resources
import io
def total(matrix, num_of_params):
'''total function sums the column of matrix and returns a 1d-array
Args:
matrix: It is the pairwise comparison matrix after taking user input
num_of_params: Number of factors taken for comparison'''
tot = np.full((num_of_params), 0, dtype=float)
for i in range(num_of_params):
for j in range(num_of_params):
tot[i] = tot[i] + matrix[j, i]
return(tot)
def normalization(sum_of_column, matrix, num_of_params):
''''normalization function computes the matrix with ouput from total function and returns matrix
Args:
sum_of_column: It is the sum of each column of pariwise comparison matrix and also a output from total function
matrix: It is the pariwise comparison matrix after taking user input
num_of_params: Number of factors taken for comparison'''
norm =
|
np.full((num_of_params, num_of_params), 1, dtype=float)
|
numpy.full
|
# Adapted from http://www.3dkingdoms.com/weekly/weekly.php?a=3
# Input:
# The Axis-Aligned bounding box is defined by B1 and B2
# B1:[x,y,z], the smallest values of X, Y, Z
# B2:[x,y,z], the largest values of X, Y, Z
# L1: [x,y,z], point 1 on the line
# L2: [x,y,z], point 2 on the line
#
# Output:
# return True if line (L1, L2) intersects with the box (B1, B2)
import numpy as np
class Point:
def __init__(self, x,y,z):
self.x=np.array(x)
self.y=np.array(y)
self.z=
|
np.array(z)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
import itertools
import copy
from os import path, getcwd, makedirs
from plotter_params import plot_setup
from qutip import sigmax, sigmay, sigmaz, qeye, basis, Qobj, tensor, cnot, gate_expand_1toN, gate_expand_2toN
""" Testing whether the figure of merit F ~ Tr[U(rho_k)W_k']Tr[E(rho_k)W_k'] preserves
the ordering of circuits (i.e. if one circuit implements a higher fidelity unitary than another is this reflected in the values for the figure of merit?).
"""
def get_pauli_basis(nqubits):
iters = [''.join(i) for i in itertools.product('0123', repeat=nqubits)]
p_ops = {'0': qeye(2), '1': sigmax(), '2': sigmay(), '3': sigmaz()}
basis = []
for item in iters:
_ops = []
for k in item:
_ops.append(p_ops[k])
basis.append(tensor(_ops))
return basis
def get_state_basis(nqubits):
iters = [''.join(i) for i in itertools.product('0123', repeat=nqubits)]
# s_ops = {'0': Qobj([[1, 0], [0, 0]]),
# '1': Qobj([[0, 0], [0, 1]]),
# '2': Qobj([[0.5, 0.5], [0.5, 0.5]]),
# '3': Qobj([[0.5, -1j*0.5], [1j*0.5, 0.5]])}
a = np.sqrt(1)
c = np.sqrt(0)
e = np.sqrt(0.5)
g = 1j*np.sqrt(0.5)
b, d, f, h = [np.sqrt(1 - np.abs(i)**2) for i in (a, c, e, g)]
# h = 1j*h
# A = Qobj([[1.0, 0.0]])
# B = Qobj([[0.57714519003, 0.81664155516]])
# C = Qobj([[0.57714519003, 0.471320746 + 0.66690343j]])
# D = Qobj([[0.57714519003, 0.471320746 - 0.66690343j]])
#
# A, B, C, D = [_op/(_op*_op.dag()).tr() for _op in (A, B, C, D)]
s_ops = {'0': Qobj([[a*np.conj(a), a*np.conj(b)], [np.conj(a)*b, b*np.conj(b)]]),
'1': Qobj([[c*np.conj(c), c*np.conj(d)], [np.conj(c)*d, d*np.conj(d)]]),
'2': Qobj([[e*np.conj(e), e*np.conj(f)], [np.conj(e)*f, f*np.conj(f)]]),
'3': Qobj([[g*np.conj(g), g*np.conj(h)], [np.conj(g)*h, h*np.conj(h)]])}
# s_ops = {'0': A.dag()*A,
# '1': B.dag()*B,
# '2': C.dag()*C,
# '3': D.dag()*D
# }
basis = []
for item in iters:
_ops = []
for k in item:
_ops.append(s_ops[k])
basis.append(tensor(_ops))
return basis
def fidelity(U, V, nqubits):
basis = get_pauli_basis(nqubits)
d = 2**nqubits
sum = 0
for op in basis:
sum += (U*op*U.dag()*V*op*V.dag()).tr()
return np.abs(sum/d**3)
def figure_of_merit(U, V, nqubits):
basis = get_state_basis(nqubits)
d = 2**nqubits
sum = 0
for op in basis:
sum += (U*op*U.dag()*V*op*V.dag()).tr()
return np.abs(sum/d**2)
def generate_u3(theta, phi, lam):
u_00 = np.cos(theta/2)
u_01 = -np.exp(1j*lam)*np.sin(theta/2)
u_10 = np.exp(1j*phi)*np.sin(theta/2)
u_11 =
|
np.exp(1j*(lam + phi))
|
numpy.exp
|
import os
import gdal
import numpy as np
import warnings
def main(input_folder, output_folder, Date):
# Do not show warnings
warnings.filterwarnings('ignore')
import pyWAPOR.ETLook as ETLook
import pyWAPOR.Functions.Processing_Functions as PF
import pyWAPOR.ETLook.outputs as out
# Define Date string
Date_str = "%d%02d%02d" %(Date.year, Date.month, Date.day)
# Input folder Date
input_folder_date = os.path.join(input_folder, Date_str)
############################ Define inputs ################################
#input_files
ALBEDO_filename = os.path.join(input_folder_date, "ALBEDO_%s.tif" %Date_str)
NDVI_filename = os.path.join(input_folder_date, "NDVI_%s.tif" %Date_str)
LST_filename = os.path.join(input_folder_date, "LST_%s.tif" %Date_str)
Time_filename = os.path.join(input_folder_date, "Time_%s.tif" %Date_str)
Lat_filename = os.path.join(input_folder_date, "Lat_%s.tif" %Date_str)
Lon_filename = os.path.join(input_folder_date, "Lon_%s.tif" %Date_str)
DEM_filename = os.path.join(input_folder_date, "DEM.tif")
Slope_filename = os.path.join(input_folder_date, "Slope.tif")
Aspect_filename = os.path.join(input_folder_date, "Aspect.tif")
LandMask_filename = os.path.join(input_folder_date, "LandMask.tif")
Bulk_filename =os.path.join(input_folder_date, "Bulk_Stomatal_resistance.tif")
MaxObs_filename = os.path.join(input_folder_date, "Maximum_Obstacle_Height.tif")
Pair_24_0_filename = os.path.join(input_folder_date, "Pair_24_0_%s.tif" %Date_str)
Pair_inst_0_filename = os.path.join(input_folder_date, "Pair_inst_0_%s.tif" %Date_str)
Pair_inst_filename = os.path.join(input_folder_date, "Pair_inst_%s.tif" %Date_str)
Pre_filename = os.path.join(input_folder_date, "Precipitation_%s.tif" %Date_str)
Hum_24_filename = os.path.join(input_folder_date, "qv_24_%s.tif" %Date_str)
Hum_inst_filename = os.path.join(input_folder_date, "qv_inst_%s.tif" %Date_str)
Tair_24_filename = os.path.join(input_folder_date, "tair_24_%s.tif" %Date_str)
Tair_inst_filename = os.path.join(input_folder_date,"tair_inst_%s.tif" %Date_str)
Tair_amp_filename = os.path.join(input_folder_date, "Tair_amp_%s.tif" %Date_str)
Wind_24_filename = os.path.join(input_folder_date, "wind_24_%s.tif" %Date_str)
Wind_inst_filename = os.path.join(input_folder_date, "wind_inst_%s.tif" %Date_str)
WatCol_inst_filename = os.path.join(input_folder_date, "wv_inst_%s.tif" %Date_str)
Trans_24_filename = os.path.join(input_folder_date, "Trans_24_%s.tif" %Date_str)
############################ Define outputs ###############################
# Output folder Date
output_folder_date = os.path.join(output_folder, Date_str)
if not os.path.exists(output_folder_date):
os.makedirs(output_folder_date)
#output_files
vc_filename = os.path.join(output_folder_date, "vc_%s.tif" %Date_str)
lai_filename = os.path.join(output_folder_date, "LAI_%s.tif" %Date_str)
lai_eff_filename= os.path.join(output_folder_date, "LAI_eff_%s.tif" %Date_str)
sf_soil_filename = os.path.join(output_folder_date, "sf_soil_%s.tif" %Date_str)
lat_filename= os.path.join(output_folder_date, "lat_%s.tif" %Date_str)
slope_filename= os.path.join(output_folder_date, "slope_%s.tif" %Date_str)
aspect_filename = os.path.join(output_folder_date, "aspect_%s.tif" %Date_str)
ra_24_toa_filename = os.path.join(output_folder_date, "ra_24_toa_%s.tif" %Date_str)
ws_filename = os.path.join(output_folder_date, "ws_%s.tif" %Date_str)
diffusion_index_filename = os.path.join(output_folder_date, "diffusion_index_%s.tif" %Date_str)
ra_24_filename = os.path.join(output_folder_date, "ra_24_%s.tif" %Date_str)
stress_rad_filename = os.path.join(output_folder_date, "stress_rad_%s.tif" %Date_str)
p_air_24_filename = os.path.join(output_folder_date, "p_air_24_%s.tif" %Date_str)
vp_24_filename = os.path.join(output_folder_date, "vp_24_%s.tif" %Date_str)
svp_24_filename = os.path.join(output_folder_date, "svp_24_%s.tif" %Date_str)
vpd_24_filename = os.path.join(output_folder_date, "vpd_24_%s.tif" %Date_str)
stress_vpd_filename = os.path.join(output_folder_date, "stress_vpd_%s.tif" %Date_str)
stress_temp_filename = os.path.join(output_folder_date, "stress_temp_%s.tif" %Date_str)
r_canopy_0_filename= os.path.join(output_folder_date, "r_canopy_0_%s.tif" %Date_str)
t_air_k_24_filename = os.path.join(output_folder_date, "t_air_k_24_%s.tif" %Date_str)
l_net_filename = os.path.join(output_folder_date, "l_net_%s.tif" %Date_str)
int_mm_filename = os.path.join(output_folder_date, "int_mm_%s.tif" %Date_str)
lh_24_filename = os.path.join(output_folder_date, "lh_24_%s.tif" %Date_str)
int_wm2_filename = os.path.join(output_folder_date, "int_wm2_%s.tif" %Date_str)
rn_24_filename = os.path.join(output_folder_date, "rn_24_%s.tif" %Date_str)
rn_24_canopy_filename= os.path.join(output_folder_date, "rn_24_canopy_%s.tif" %Date_str)
t_air_k_i_filename = os.path.join(output_folder_date, "t_air_k_i_%s.tif" %Date_str)
vp_i_filename = os.path.join(output_folder_date, "vp_i_%s.tif" %Date_str)
ad_moist_i_filename= os.path.join(output_folder_date, "ad_moist_i_%s.tif" %Date_str)
ad_dry_i_filename = os.path.join(output_folder_date, "ad_dry_i_%s.tif" %Date_str)
ad_i_filename= os.path.join(output_folder_date, "ad_i_%s.tif" %Date_str)
u_b_i_bare_filename= os.path.join(output_folder_date, "u_b_i_bare_%s.tif" %Date_str)
lon_filename= os.path.join(output_folder_date, "lon_%s.tif" %Date_str)
ha_filename= os.path.join(output_folder_date, "ha_%s.tif" %Date_str)
ied_filename= os.path.join(output_folder_date, "ied_%s.tif" %Date_str)
h0_filename = os.path.join(output_folder_date, "h0_%s.tif" %Date_str)
h0ref_filename = os.path.join(output_folder_date, "h0ref_%s.tif" %Date_str)
m_filename = os.path.join(output_folder_date, "m_%s.tif" %Date_str)
rotm_filename = os.path.join(output_folder_date, "rotm_%s.tif" %Date_str)
Tl2_filename = os.path.join(output_folder_date, "Tl2_%s.tif" %Date_str)
B0c_filename = os.path.join(output_folder_date, "B0c_%s.tif" %Date_str)
Bhc_filename = os.path.join(output_folder_date, "Bhc_%s.tif" %Date_str)
Dhc_filename = os.path.join(output_folder_date, "Dhc_%s.tif" %Date_str)
ra_hor_clear_i_filename = os.path.join(output_folder_date, "ra_hor_clear_i_%s.tif" %Date_str)
emiss_atm_i_filename = os.path.join(output_folder_date, "emiss_atm_i_%s.tif" %Date_str)
rn_bare_filename = os.path.join(output_folder_date, "rn_bare_%s.tif" %Date_str)
rn_full_filename= os.path.join(output_folder_date, "rn_full_%s.tif" %Date_str)
u_b_i_full_filename = os.path.join(output_folder_date, "u_b_i_full_%s.tif" %Date_str)
u_star_i_bare_filename = os.path.join(output_folder_date, "u_star_i_bare_%s.tif" %Date_str)
u_star_i_full_filename = os.path.join(output_folder_date, "u_star_i_full_%s.tif" %Date_str)
u_i_soil_filename = os.path.join(output_folder_date, "u_i_soil_%s.tif" %Date_str)
ras_filename = os.path.join(output_folder_date, "ras_%s.tif" %Date_str)
raa_filename = os.path.join(output_folder_date, "raa_%s.tif" %Date_str)
rac_filename= os.path.join(output_folder_date, "rac_%s.tif" %Date_str)
t_max_bare_filename = os.path.join(output_folder_date, "t_max_bare_%s.tif" %Date_str)
t_max_full_filename= os.path.join(output_folder_date, "t_max_full_%s.tif" %Date_str)
w_i_filename = os.path.join(output_folder_date, "w_i_%s.tif" %Date_str)
t_dew_i_filename = os.path.join(output_folder_date, "t_dew_i_%s.tif" %Date_str)
t_wet_i_filename = os.path.join(output_folder_date, "t_wet_i_%s.tif" %Date_str)
t_wet_k_i_filename = os.path.join(output_folder_date, "t_wet_k_i_%s.tif" %Date_str)
lst_max_filename = os.path.join(output_folder_date, "lst_max_%s.tif" %Date_str)
se_root_filename = os.path.join(output_folder_date, "se_root_%s.tif" %Date_str)
stress_moist_filename= os.path.join(output_folder_date, "stress_moist_%s.tif" %Date_str)
r_canopy_0_filename= os.path.join(output_folder_date, "r_canopy_0_%s.tif" %Date_str)
r_canopy_filename= os.path.join(output_folder_date, "r_canopy_%s.tif" %Date_str)
z_obst_filename = os.path.join(output_folder_date, "z_obst_%s.tif" %Date_str)
z_oro_filename = os.path.join(output_folder_date, "z_oro_%s.tif" %Date_str)
z0m_filename = os.path.join(output_folder_date, "z0m_%s.tif" %Date_str)
ra_canopy_init_filename = os.path.join(output_folder_date, "ra_canopy_init_%s.tif" %Date_str)
u_b_24_filename = os.path.join(output_folder_date, "u_b_24_%s.tif" %Date_str)
disp_filename = os.path.join(output_folder_date, "disp_%s.tif" %Date_str)
u_star_24_init_filename = os.path.join(output_folder_date, "u_star_24_init_%s.tif" %Date_str)
ad_dry_24_filename = os.path.join(output_folder_date, "ad_dry_24_%s.tif" %Date_str)
ad_moist_24_filename = os.path.join(output_folder_date, "ad_moist_24_%s.tif" %Date_str)
ad_24_filename = os.path.join(output_folder_date, "ad_24_%s.tif" %Date_str)
psy_24_filename = os.path.join(output_folder_date, "psy_24_%s.tif" %Date_str)
ssvp_24_filename = os.path.join(output_folder_date, "ssvp_24_%s.tif" %Date_str)
t_24_init_filename = os.path.join(output_folder_date, "t_24_init_%s.tif" %Date_str)
h_canopy_24_init_filename= os.path.join(output_folder_date, "h_canopy_24_init_%s.tif" %Date_str)
t_24_filename= os.path.join(output_folder_date, "t_24_%s.tif" %Date_str)
t_24_mm_filename= os.path.join(output_folder_date, "t_24_mm_%s.tif" %Date_str)
sf_soil_filename= os.path.join(output_folder_date, "sf_soil_%s.tif" %Date_str)
rn_24_soil_filename= os.path.join(output_folder_date, "rn_24_soil_%s.tif" %Date_str)
r_soil_filename= os.path.join(output_folder_date, "r_soil_%s.tif" %Date_str)
ra_soil_init_filename= os.path.join(output_folder_date, "ra_soil_init_%s.tif" %Date_str)
u_b_24_filename= os.path.join(output_folder_date, "u_b_24_%s.tif" %Date_str)
u_star_24_soil_init_filename= os.path.join(output_folder_date, "u_star_24_soil_init_%s.tif" %Date_str)
g0_bs_filename= os.path.join(output_folder_date, "g0_bs_%s.tif" %Date_str)
g0_24_filename= os.path.join(output_folder_date, "g0_24_%s.tif" %Date_str)
e_24_init_filename= os.path.join(output_folder_date, "e_24_init_%s.tif" %Date_str)
h_soil_24_init_filename= os.path.join(output_folder_date, "h_soil_24_init_%s.tif" %Date_str)
e_24_filename= os.path.join(output_folder_date, "e_24_%s.tif" %Date_str)
e_24_mm_filename= os.path.join(output_folder_date, "e_24_mm_%s.tif" %Date_str)
et_24_mm_filename= os.path.join(output_folder_date, "et_24_mm_%s.tif" %Date_str)
rn_24_grass_filename= os.path.join(output_folder_date, "rn_24_grass_%s.tif" %Date_str)
et_ref_24_filename= os.path.join(output_folder_date, "et_ref_24_%s.tif" %Date_str)
et_ref_24_mm_filename= os.path.join(output_folder_date, "et_ref_24_mm_%s.tif" %Date_str)
########################## Open input rasters #############################
dest_lst = gdal.Open(LST_filename)
lst = dest_lst.GetRasterBand(1).ReadAsArray()
lst[lst == -9999] = np.nan
dest_albedo = gdal.Open(ALBEDO_filename)
r0 = dest_albedo.GetRasterBand(1).ReadAsArray()
r0[np.isnan(lst)] = np.nan
dest_ndvi = gdal.Open(NDVI_filename)
ndvi = dest_ndvi.GetRasterBand(1).ReadAsArray()
ndvi[np.isnan(lst)] = np.nan
desttime = gdal.Open(Time_filename)
dtime = desttime.GetRasterBand(1).ReadAsArray()
dtime[np.isnan(lst)] = np.nan
dest_lat = gdal.Open(Lat_filename)
lat_deg = dest_lat.GetRasterBand(1).ReadAsArray()
lat_deg[np.isnan(lst)] = np.nan
dest_lon = gdal.Open(Lon_filename)
lon_deg = dest_lon.GetRasterBand(1).ReadAsArray()
lon_deg[np.isnan(lst)] = np.nan
dest_dem = gdal.Open(DEM_filename)
z = dest_dem.GetRasterBand(1).ReadAsArray()
z[np.isnan(lst)] = np.nan
dest_slope = gdal.Open(Slope_filename)
slope_deg = dest_slope.GetRasterBand(1).ReadAsArray()
slope_deg[np.isnan(lst)] = np.nan
dest_aspect = gdal.Open(Aspect_filename)
aspect_deg = dest_aspect.GetRasterBand(1).ReadAsArray()
aspect_deg[np.isnan(lst)] = np.nan
dest_lm = gdal.Open(LandMask_filename)
land_mask = dest_lm.GetRasterBand(1).ReadAsArray()
land_mask[np.isnan(lst)] = np.nan
#dest_bulk = gdal.Open(Bulk_filename)
#bulk = dest_bulk.GetRasterBand(1).ReadAsArray()
dest_maxobs = gdal.Open(MaxObs_filename)
z_obst_max = dest_maxobs.GetRasterBand(1).ReadAsArray()
z_obst_max[np.isnan(lst)] = np.nan
dest_pairsea24 = gdal.Open(Pair_24_0_filename)
p_air_0_24 = dest_pairsea24.GetRasterBand(1).ReadAsArray()
p_air_0_24 = ETLook.meteo.air_pressure_kpa2mbar(p_air_0_24)
p_air_0_24[np.isnan(lst)] = np.nan
dest_pairseainst = gdal.Open(Pair_inst_0_filename)
p_air_0_i = dest_pairseainst.GetRasterBand(1).ReadAsArray()
p_air_0_i = ETLook.meteo.air_pressure_kpa2mbar(p_air_0_i)
p_air_0_i[np.isnan(lst)] = np.nan
dest_pairinst = gdal.Open(Pair_inst_filename)
p_air_i = dest_pairinst.GetRasterBand(1).ReadAsArray()
p_air_i = ETLook.meteo.air_pressure_kpa2mbar(p_air_i)
p_air_i[np.isnan(lst)] = np.nan
dest_precip = gdal.Open(Pre_filename)
P_24 = dest_precip.GetRasterBand(1).ReadAsArray()
P_24[np.isnan(lst)] = np.nan
dest_hum24 = gdal.Open(Hum_24_filename)
qv_24 = dest_hum24.GetRasterBand(1).ReadAsArray()
qv_24[np.isnan(lst)] = np.nan
dest_huminst = gdal.Open(Hum_inst_filename)
qv_i = dest_huminst.GetRasterBand(1).ReadAsArray()
qv_i[
|
np.isnan(lst)
|
numpy.isnan
|
from control4.core.mdp import MDP
from control4.config import floatX
import numpy as np
def idx2onehot(i,n):
out = np.zeros(n,floatX)
out[i] = 1
return out
class Nav2dState(object):
def __init__(self,pos,targpos,t):
self.pos = pos
self.targpos = targpos
self.t = t
self.path = [pos.copy()]
class Nav2D(MDP):
def __init__(self, halfsize=3.0, obs_efference=True, obs_cur_pos=False, target_mode="four"):
self.halfsize = halfsize
self.thresh_dist = 0.8
self.sample_frac = 0.2
self.t_max = 50
self.viewer = None
self.target_mode = target_mode
self.obs_efference=obs_efference
self.obs_cur_pos=obs_cur_pos
self._obs_dim = 4
if self.target_mode == "four":
self._obs_dim += 4
if self.obs_efference:
self._obs_dim += 2
if self.obs_cur_pos:
self._obs_dim += 2
def call(self, input_arrs):
state = input_arrs["x"]
u = input_arrs["u"]
assert u.shape[0]==1
u = u[0]
u = np.clip(u,-1,1)
ytarg = state.pos + u
halfsize = self.halfsize
components = [ytarg > halfsize, ytarg < -halfsize]
if self.target_mode == "four":
components.append(np.zeros(4,floatX))
ytarg = np.clip(ytarg, -halfsize, halfsize)
state.path.append(ytarg.copy())
state.pos = ytarg
targ_pos = state.targpos
state.t += 1
done = int(state.t == self.t_max or np.square(targ_pos - state.pos).sum() < self.thresh_dist**2)
cost = np.array([done*(state.t - self.t_max)],floatX)
################################
# Observation
if self.obs_efference: components.append(u)
if self.obs_cur_pos: components.append(targ_pos) # XXX
o = np.concatenate(components)
return {
"x" : state,
"o" : o.reshape(1,-1),
"c" : cost.reshape(1,-1),
"done" : done
}
def initialize_mdp_arrays(self):
frac = self.sample_frac
pos = (2*np.random.rand(2)-.1)*self.halfsize*frac
pos = pos.astype(floatX)
halfsize = self.halfsize
if self.target_mode == "four":
targs_42 = np.array([[halfsize,0],[0,halfsize],[-halfsize,0],[0,-halfsize]],floatX)
targidx = np.random.randint(4)
targpos = targs_42[targidx]
elif self.target_mode == "unif":
targpos = np.random.uniform(low=-halfsize,high=halfsize,size=(2,)).astype(floatX)
targidx = None
x_init = Nav2dState(pos, targpos, 0)
if targidx is None:
components = [np.zeros(4,floatX)]
else:
components = [np.zeros(4,floatX),idx2onehot(targidx,4)]
if self.obs_efference: components.append(np.zeros(2,floatX))
if self.obs_cur_pos: components.append(pos)
o_init = np.concatenate(components)
c_init =
|
np.array([0],floatX)
|
numpy.array
|
# pylint: disable=R0201
import json
from unittest.mock import MagicMock
import numpy as np
import pytest
from napari.utils import Colormap
from PartSegCore.image_operations import RadiusType
from PartSegCore.json_hooks import EventedDict, ProfileDict, ProfileEncoder, profile_hook, recursive_update_dict
def test_recursive_update_dict_basic():
dict1 = {"a": 1, "b": 2}
dict2 = {"b": 3, "c": 4}
dict1_copy = dict1.copy()
dict1.update(dict2)
recursive_update_dict(dict1_copy, dict2)
assert dict1 == dict1_copy
def test_recursive_update_dict():
dict1 = {"a": {"k": 1, "l": 2}, "b": {"k": 1, "l": 2}}
dict2 = {"a": {"m": 3, "l": 4}, "b": 3, "c": 4}
recursive_update_dict(dict1, dict2)
assert dict1 == {"a": {"k": 1, "l": 4, "m": 3}, "b": 3, "c": 4}
class TestEventedDict:
def test_simple_add(self):
receiver = MagicMock()
dkt = EventedDict()
dkt.setted.connect(receiver.set)
dkt.deleted.connect(receiver.delete)
dkt["a"] = 1
assert dkt["a"] == 1
assert receiver.set.call_count == 1
assert "'a': 1" in str(dkt)
assert "'a': 1" in repr(dkt)
dkt["a"] = 2
assert dkt["a"] == 2
assert receiver.set.call_count == 2
assert len(dkt) == 1
del dkt["a"]
assert receiver.set.call_count == 2
assert receiver.delete.call_count == 1
assert len(dkt) == 0
def test_simple_add_remove(self):
callback_list = []
def callback_add():
callback_list.append(1)
def callback_delete():
callback_list.append(2)
dkt = EventedDict()
dkt.setted.connect(callback_add)
dkt.deleted.connect(callback_delete)
dkt[1] = 1
dkt[2] = 1
assert len(dkt) == 2
assert callback_list == [1, 1]
del dkt[1]
assert len(dkt) == 1
assert callback_list == [1, 1, 2]
def test_nested_evented(self):
dkt = EventedDict(bar={"foo": {"baz": 1}})
assert isinstance(dkt["bar"], EventedDict)
assert isinstance(dkt["bar"]["foo"], EventedDict)
assert dkt["bar"]["foo"]["baz"] == 1
dkt["baz"] = {"bar": {"foo": 1}}
assert isinstance(dkt["baz"], EventedDict)
assert isinstance(dkt["baz"]["bar"], EventedDict)
assert dkt["baz"]["bar"]["foo"] == 1
def test_serialize(self, tmp_path):
dkt = EventedDict(
**{"a": {"b": {"c": 1, "d": 2, "e": 3}, "f": 1}, "g": {"h": {"i": 1, "j": 2}, "k": [6, 7, 8]}}
)
with (tmp_path / "test_dict.json").open("w") as f_p:
json.dump(dkt, f_p, cls=ProfileEncoder)
with (tmp_path / "test_dict.json").open("r") as f_p:
dkt2 = json.load(f_p, object_hook=profile_hook)
assert isinstance(dkt2, EventedDict)
assert isinstance(dkt2["a"], EventedDict)
assert dkt["g"]["k"] == [6, 7, 8]
def test_signal_names(self):
receiver = MagicMock()
dkt = EventedDict(baz={"foo": 1})
dkt.setted.connect(receiver.set)
dkt.deleted.connect(receiver.deleted)
dkt["foo"] = 1
assert receiver.set.call_count == 1
receiver.set.assert_called_with("foo")
dkt["bar"] = EventedDict()
assert receiver.set.call_count == 2
receiver.set.assert_called_with("bar")
dkt["bar"]["baz"] = 1
assert receiver.set.call_count == 3
receiver.set.assert_called_with("bar.baz")
dkt["baz"]["foo"] = 2
assert receiver.set.call_count == 4
receiver.set.assert_called_with("baz.foo")
del dkt["bar"]["baz"]
assert receiver.deleted.call_count == 1
receiver.deleted.assert_called_with("bar.baz")
del dkt["bar"]
assert receiver.deleted.call_count == 2
receiver.deleted.assert_called_with("bar")
def test_propagate_signal(self):
receiver = MagicMock()
dkt = EventedDict(baz={"foo": 1})
dkt.setted.connect(receiver.set)
dkt.deleted.connect(receiver.deleted)
dkt["baz"].base_key = ""
dkt["baz"]["foo"] = 2
receiver.set.assert_called_with("foo")
receiver.set.assert_called_once()
del dkt["baz"]["foo"]
receiver.deleted.assert_called_with("foo")
receiver.deleted.assert_called_once()
class TestProfileDict:
def test_simple(self):
dkt = ProfileDict()
dkt.set("a.b.c", 1)
dkt.set("a.b.a", 2)
assert dkt.get("a.b.c") == 1
with pytest.raises(KeyError):
dkt.get("a.b.d")
dkt.get("a.b.d", 3)
assert dkt.get("a.b.d") == 3
assert dkt.get("a.b") == {"a": 2, "c": 1, "d": 3}
with pytest.raises(TypeError):
dkt.set("a.b.c.d", 3)
def test_update(self):
dkt = ProfileDict()
dkt.update(a=1, b=2, c=3)
assert dkt.my_dict == {"a": 1, "b": 2, "c": 3}
dkt2 = ProfileDict()
dkt2.update(c=4, d={"a": 2, "e": 7})
assert dkt2.get("d.e") == 7
dkt.update(dkt2)
assert dkt.get("d.e") == 7
assert dkt.get("c") == 4
dkt.update({"g": 1, "h": 4})
assert dkt.get("g") == 1
dkt.update({"w": 1, "z": 4}, w=3)
assert dkt.get("w") == 3
assert dkt.verify_data()
assert dkt.filter_data() == []
dkt.set("e.h.l", {"aaa": 1, "__error__": True})
assert not dkt.verify_data()
assert dkt.filter_data() == ["e.h"]
def test_serialize(self, tmp_path):
dkt = ProfileDict()
dkt.set("a.b.c", 1)
dkt.set("a.b.a", 2)
with open(tmp_path / "test.json", "w") as f_p:
json.dump(dkt, f_p, cls=ProfileEncoder)
with open(tmp_path / "test.json") as f_p:
dkt2 = json.load(f_p, object_hook=profile_hook)
assert dkt.my_dict == dkt2.my_dict
def test_callback(self):
def dummy_call():
receiver.dummy()
receiver = MagicMock()
dkt = ProfileDict()
dkt.connect("", receiver.empty)
dkt.connect("", dummy_call)
dkt.connect("b", receiver.b)
dkt.connect(["d", "c"], receiver.dc)
dkt.set("test.a", 1)
assert receiver.empty.call_count == 1
assert receiver.dummy.call_count == 1
receiver.empty.assert_called_with("a")
receiver.dummy.assert_called_with()
dkt.set("test.a", 1)
assert receiver.empty.call_count == 1
receiver.b.assert_not_called()
dkt.set("test2.a", 1)
assert receiver.empty.call_count == 2
receiver.b.assert_not_called()
dkt.set(["test", "b"], 1)
assert receiver.empty.call_count == 3
assert receiver.b.call_count == 1
dkt.set("test.d.c", 1)
receiver.dc.assert_called_once()
dkt.set("test.a", 2)
assert receiver.empty.call_count == 5
def test_profile_hook_colormap_load(bundle_test_dir):
with open(bundle_test_dir / "view_settings_v0.12.6.json") as f_p:
json.load(f_p, object_hook=profile_hook)
def test_colormap_dump(tmp_path):
cmap_list = [Colormap([(0, 0, 0), (1, 1, 1)]), Colormap([(0, 0, 0), (1, 1, 1)], controls=[0, 1])]
with open(tmp_path / "test.json", "w") as f_p:
json.dump(cmap_list, f_p, cls=ProfileEncoder)
with open(tmp_path / "test.json") as f_p:
cmap_list2 = json.load(f_p, object_hook=profile_hook)
assert np.array_equal(cmap_list[0].colors, cmap_list2[0].colors)
assert
|
np.array_equal(cmap_list[0].controls, cmap_list2[0].controls)
|
numpy.array_equal
|
import numpy as np
import random
import math
import os
class QL(object):
def __init__(self):
self.action_table = [[0] * 3 for _ in range(120)]
self.q_table = np.array([[1] * 120 for _ in range(16)])
self.q0_table = self.q_table
self.p_table = np.array([[1/120] * 120 for _ in range(16)])
self.gen_action_table()
self.beta = 0.6
self.lr = 0.15
self.gamma = 0.80
#生成行为表
def gen_action_table(self):
index = 0
for a in range(10, 25, 2):
for b in range(10, 25, 2):
for c in range(10, 25, 2):
for d in range(10, 25, 2):
if a + b + c + d == 70 - 16:
self.action_table[index] = [a, b, c]
index += 1
#行为决策
def select_action(self, status):
p_select_line = self.p_table[status]
q_select_line = self.q_table[status]
min_q_poss = np.where(q_select_line==np.min(q_select_line))[0]
min_q_pos = random.choice(min_q_poss)
p_select_line[min_q_pos] += self.beta * (1 - p_select_line[min_q_pos])
for i in range(0,120):
if i != min_q_pos:
p_select_line[i] -= self.beta * p_select_line[i]
self.p_table[status] = p_select_line
p_select_cumsum = np.cumsum(p_select_line)
p_select_cumsum = np.insert(p_select_cumsum, 0, 0)
random_float = random.random()
#二分查找法查找随机浮点数对应行为
a = 0
b = 120
while(a != b):
c = math.ceil((a + b)/2)
if random_float >= p_select_cumsum[c]:
a = c
else:
b = c - 1
return a
#学习,更新Q表
def learn(self, status, action, reward, status_):
q_predict = self.q_table[status][action]
q_target = reward + self.gamma * min(self.q_table[status_])
self.q_table[status][action] = q_predict + self.lr*(q_target - q_predict)
#返回核查阶段的行为值
def check(self, status):
q_select_line = self.q_table[status]
min_q_poss = np.where(q_select_line==np.min(q_select_line))[0]
min_q_pos = random.choice(min_q_poss)
return min_q_pos
#存储q表,p表
def save_data(self):
np.save("q.npy", self.q_table)
np.save("p.npy", self.p_table)
if(os.path.exists("Q.xls")):
os.remove("Q.xls")
output = open("Q.xls", 'w', encoding='utf-8')
for i in range(len(self.q_table)):
for j in range(len(self.q_table[i])):
output.write(str(self.q_table[i][j]))
output.write('\t')
output.write('\n')
output.close()
print("Have save Q table to file.")
#读取q表,p表
def load_data(self):
if os.path.exists("q.npy"):
try:
self.q_table = np.load("q.npy")
print("已读取q表")
except:
print("读取q表有误,请删除后重试")
else:
print("q表不存在,使用初始q表")
if os.path.exists("p.npy"):
try:
self.p_table =
|
np.load("p.npy")
|
numpy.load
|
import numpy as np
from scipy.optimize import brentq
from scipy.integrate import quad
from scipy.stats import norm,nbinom
from scipy.stats import multivariate_normal as mvn
from prime_utils import normal_logpdf
from prime_model import modelPred
import datetime
from dateutil import parser
def logpost(state,params):
"""
Compute log-posterior density values; this function assumes
the likelihood is a product of independent Gaussian distributions
Parameters
----------
state: python list or numpy array
model parameters
params: dictionary
detailed settings for the epidemiological model
Returns
-------
llik: float
natural logarithm of the likelihood density
lpri: float
natural logarithm of the prior density
"""
# parameters
new_cases = params['new_cases']
prior_types = params['prior_types']
prior_info = params['prior_info']
model_type = params['model_type']
error_model_type = params['error_model_type']
error_weight = params['error_weight']
assert(params['days_extra']==0)
# evaluate the model
people_with_symptoms_cdf = modelPred(state,params,is_cdf=True)
people_with_symptoms = np.zeros(people_with_symptoms_cdf.shape[0])
for i in range(1,people_with_symptoms.shape[0]):
people_with_symptoms[i] = people_with_symptoms_cdf[i]-people_with_symptoms_cdf[i-1]
# print(people_with_symptoms_cdf,people_with_symptoms)
# quit()
# log-likelihood
ndays = params['days_since_day0'].shape[0]
llik = 0.0
if error_model_type == "add":
# additive error
err = np.exp(state[-1])
elif error_model_type == "addMult":
# additive & multiplicative error
err = np.exp(state[-2])+np.exp(state[-1])*people_with_symptoms
# apply weighting to error terms if specified
if error_weight is not None:
err *= error_weight
# kchowdh: vectorize log norm pdf
npws = (people_with_symptoms - new_cases)/err
# apply weighting to error terms if specified
llik = np.sum(norm._logpdf(npws) - np.log(err))
# log-prior
lpri = 0.0
for i in range(state.shape[0]):
if prior_types[i]=='g':
log_pdf_vals = normal_logpdf(state[i],loc=prior_info[i][0],scale=prior_info[i][1])
lpri = lpri+log_pdf_vals
return [llik,lpri]
def logpost_negb(state,params):
"""
Compute log-posterior density values; this function assumes
the likelihood is a product of negative-binomial distributions
Parameters
----------
state: python list or numpy array
model parameters
params: dictionary
detailed settings for the epidemiological model
Returns
-------
llik: float
natural logarithm of the likelihood density
lpri: float
natural logarithm of the prior density
"""
# parameters
new_cases = params['new_cases']
prior_types = params['prior_types']
prior_info = params['prior_info']
model_type = params['model_type']
error_weight = params['error_weight']
assert(params['days_extra']==0)
# compute cases
# people_with_symptoms = modelPred(state,params)
people_with_symptoms_cdf = modelPred(state,params,is_cdf=True)
people_with_symptoms = np.zeros(people_with_symptoms_cdf.shape[0])
for i in range(1,people_with_symptoms.shape[0]):
people_with_symptoms[i] = people_with_symptoms_cdf[i]-people_with_symptoms_cdf[i-1]
# log-likelihood
alpha_ind = 4
if model_type == "twoWave":
alpha_ind = 8
elif model_type == "threeWave":
alpha_ind = 12
alpha = np.exp(state[alpha_ind])
prob = alpha/(alpha+people_with_symptoms)
llkarray=np.array([np.log(1e-10+nbinom._pmf(obs, n=alpha, p=p)) for obs,p in zip(new_cases,prob)])
# apply weighting to error terms if specified
if error_weight is not None:
llkarray += np.log(error_weight)
llik = np.sum(llkarray[1:])
# log-prior
lpri = 0.0
for i in range(state.shape[0]):
if prior_types[i]=='g':
log_pdf_vals = normal_logpdf(state[i],loc=prior_info[i][0],scale=prior_info[i][1])
lpri = lpri+log_pdf_vals
return [llik,lpri]
def logpost_poisson(state,params):
"""
Compute log-posterior density values; this function assumes
the likelihood is a product of poisson distributions
Parameters
----------
state: python list or numpy array
model parameters
params: dictionary
detailed settings for the epidemiological model
Returns
-------
llik: float
natural logarithm of the likelihood density
lpri: float
natural logarithm of the prior density
"""
# parameters
new_cases = params['new_cases']
prior_types = params['prior_types']
prior_info = params['prior_info']
error_weight = params['error_weight']
assert(params['days_extra']==0)
# compute cases
# people_with_symptoms = modelPred(state,params)
people_with_symptoms_cdf = modelPred(state,params,is_cdf=True)
people_with_symptoms = np.zeros(people_with_symptoms_cdf.shape[0])
for i in range(1,people_with_symptoms.shape[0]):
people_with_symptoms[i] = people_with_symptoms_cdf[i]-people_with_symptoms_cdf[i-1]
# log-likelihood
# alpha = np.exp(state[4])
llkarray=np.array([-lbd+k*np.log(lbd+1.e-4) for k,lbd in zip(new_cases,people_with_symptoms)])
# apply weighting to error terms if specified
if error_weight is not None:
llkarray +=
|
np.log(error_weight)
|
numpy.log
|
import numpy as np
from matplotlib import pyplot as plt
class IMU:
def __init__(self, accel_sigma=0.1, gyro_sigma=0.5):
self.accel_sigma = accel_sigma
self.gyro_sigma = gyro_sigma
def measure(self, true_state):
"""true_state is [theta, phi, q, p]"""
theta = true_state[0] + np.random.normal(0, self.accel_sigma)
phi = true_state[1] + np.random.normal(0, self.accel_sigma)
q = true_state[2] + np.random.normal(0, self.gyro_sigma)
p = true_state[3] + np.random.normal(0, self.gyro_sigma)
return
|
np.array([theta,phi,q,p])
|
numpy.array
|
import numpy as np
import pandas as pd
from scipy.optimize import fsolve
def wavenumber(f, h, g=9.81):
""" solves the dispersion relation, returns the wave number k
INPUTS:
omega: wave cyclic frequency [rad/s], scalar or array-like
h : water depth [m]
g: gravity [m/s^2]
OUTPUTS:
k: wavenumber
"""
omega = 2*np.pi*f
if hasattr(omega, '__len__'):
k = np.array([fsolve(lambda k: om**2/g - k*np.tanh(k*h), (om**2)/g)[0] for om in omega])
else:
func = lambda k: omega**2/g - k*np.tanh(k*h)
k_guess = (omega**2)/g
k = fsolve(func, k_guess)[0]
return k
# Functions
def elevation2d(a, f, k, eps, t, x=0):
""" wave elevation (eta)
INPUTS:
a : amplitudes, scalar or array-like of dimension nf
f : frequencies, scalar or array-like of dimension nf
k : wavenumbers, scalar or array-like of dimension nf
t : time, scalar or array-like of dimension nt
x : longitudinal position, scalar or array like of dimension (nx)
OUTPUTS:
eta: wave elevation
"""
t = np.atleast_1d(t)
a = np.atleast_1d(a)
f = np.atleast_1d(f)
k = np.atleast_1d(k)
eps = np.atleast_1d(eps)
x = np.atleast_1d(x)
omega = 2*np.pi * f
if len(t)==1:
eta = np.zeros(x.shape)
for ai,oi,ki,ei in zip(a,omega,k,eps):
eta += ai * np.cos(oi*t - ki*x + ei)
elif len(x)==1:
eta = np.zeros(t.shape)
for ai,oi,ki,ei in zip(a,omega,k,eps):
eta += ai * np.cos(oi*t - ki*x + ei)
else:
raise NotImplementedError()
return eta
#
def kinematics2d(a, f, k, eps, h, t, z, x=None, Wheeler=False, eta=None):
"""
2D wave kinematics, longitudinal velocity and acceleration along x
z ^
|
|--> x z=0 (sea level)
-> vel(z,t)
~~~~~~ z=-h (sea bed)
INPUTS:
a : amplitudes, scalar or array-like of dimension (nf)
f : frequencies, scalar or array-like of dimension (nf)
k : wavenumbers, scalar or array-like of dimension (nf)
t : time, scalar or array-like of dimension nt
z : vertical position, scalar or 1d or nd-array-like of dimension(s) (n x ..). NOTE: z=0 sea level, z=-h sea floor
x : longitudinal position, scalar or 1d or nd-array-like of dimension(s) (n x ..)
OUTPUTS:
vel: wave velocity at t,z,x
acc: wave acceleartion at t,z,x
"""
t = np.atleast_1d(t)
f = np.atleast_1d(f)
a = np.atleast_1d(a)
eps = np.atleast_1d(eps)
k = np.atleast_1d(k)
z = np.atleast_1d(z)
if x is None:
x=z*0
else:
x = np.asarray(x)
omega = 2 * np.pi * f # angular frequency
if Wheeler:
if eta is None:
raise Exception('Provide wave elevation (eta), scalar, for Wheeler')
# User need to provide eta for wheeler stretching
if len(t)==1:
z = (z-eta)*h/(h+eta)
else:
raise NotImplementedError('Wheeler stretching, need to consider cases where t is function of time')
z = z+h # 0 at sea bed
if len(t)==1:
vel = np.zeros(z.shape)
acc =
|
np.zeros(z.shape)
|
numpy.zeros
|
import numpy as np
from .preprocessing import no_preproc, split_train_val
__author__ = '<NAME>'
class Dataset(object):
def __init__(self, name, features, labels, indices_train, indices_test, indices_val,
num_classes=None, feature_preproc_fn=no_preproc, class_names=None):
self.name = name
self.features = features
self.labels = labels
self.indices_train = indices_train
self.indices_val = indices_val
self.indices_test = indices_test
self.feature_preproc_fn = feature_preproc_fn
self.num_val = len(self.indices_val)
self.num_test = len(self.indices_test)
self.num_samples = len(features)
self.features_shape = features[0].shape
self.num_features =
|
np.prod(features[0].shape)
|
numpy.prod
|
import numpy as np
import scipy as sp
import random
import warnings
import math
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KDTree, BallTree
from sklearn.metrics import pairwise_distances
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
TOL = 1e-15
INF = 1e15
### GAMMA LINKAGE
def lazy_intersection(increasing, increasing2, f2) :
# find first occurence of increasing[i] >= f2(increasing2[i])
first = 0
last = len(increasing)-1
if increasing[first] >= f2(increasing2[first]) :
return first, False
if increasing[last] < f2(increasing2[last]) :
return last, True
while first+1 < last :
midpoint = (first + last)//2
if increasing[midpoint] >= f2(increasing2[midpoint]) :
last = midpoint
else:
first = midpoint
return last, False
class MPSpace :
"""Implements a finite metric probability space that can compute \
its kernel density estimates"""
POSSIBLE_KERNELS = {'square', 'triangle', 'epanechnikov'}
def __init__(self, X, metric = 'minkowski', measure = None, leaf_size = 40, p = 2) :
# if metric = 'precomputed' then assumes that X is a distance matrix
# to do: check that input is correct
self.metric = metric
self.p = p
self.size = X.shape[0]
if measure is None :
self.measure = np.full(self.size, 1./self.size)
self.counting_measure = True
self.dimension = X.shape[1]
self.metric = metric
if metric != 'precomputed' :
self.points = X
else :
self.points = np.array(range(self.size))
self.fit_on = None
self.kernel = None
self.fitted_nn = False
self.fitted_density_estimates = False
self.nn_distance = None
self.nn_indices = None
self.square_kernel_estimate = None
self.delta = None
self.kernel_estimate = None
self.maxk = None
self.maxs = None
self.tol = TOL
if metric in KDTree.valid_metrics :
self.tree = KDTree(X, metric=metric, leaf_size=leaf_size, p = p)
elif metric in BallTree.valid_metrics :
self.tree = BallTree(X, metric=metric, leaf_size=leaf_size)
elif metric == 'precomputed':
self.dist_mat = X
else :
raise Exception("Metric given is not supported.")
def fit(self, maxk = None, maxs = 0, kernel = 'square', fit_on = None) :
self.fit_nn(maxk = maxk, maxs = maxs, fit_on = fit_on)
self.fit_density_estimates(kernel = kernel)
def fit_nn(self, maxk, maxs, fit_on) :
# to do: check input
if fit_on == None :
fit_on = range(0,self.size)
# fit_on can be just a number < 1
if isinstance(fit_on, float) and fit_on < 1 :
n_samples = int(self.size * fit_on)
fit_on = random.sample(range(self.size),n_samples)
# or > 1
if isinstance(fit_on, int) and fit_on > 1 :
n_samples = fit_on
fit_on = random.sample(range(self.size),n_samples)
self.fit_on = fit_on
fit_on = self.points[fit_on]
if maxk == None :
maxk = self.size
if maxk == 1 and maxs == 0 :
warnings.warn("Fitting with k = 1 and s = 0.")
if maxk > self.size :
warnings.warn("Trying to fit with k > |data set|. Changing to k = |data set|.")
maxk = self.size
self.maxs = maxs
self.maxk = maxk
if self.metric != 'precomputed' :
# to do: check if dualtree or breadth_first set to False is faster
k_neighbors = self.tree.query(\
fit_on, self.maxk, return_distance = True, sort_results = True,
dualtree = True, breadth_first = True)
k_neighbors = (np.array(k_neighbors[1]),np.array(k_neighbors[0]))
maxs_given_by_maxk = np.min(k_neighbors[1][:,-1])
neighbors = []
nn_distance = []
if maxs < maxs_given_by_maxk :
self.maxs = maxs_given_by_maxk
neighbors = k_neighbors[0]
nn_distance = k_neighbors[1]
else :
s_neighbors = self.tree.query_radius(\
fit_on, maxs, return_distance = True, sort_results = True)
for i in range(len(fit_on)) :
# can this be done more efficiently at a lower level?
if len(k_neighbors[0][i]) >= len(s_neighbors[0][i]) :
neighbors.append(k_neighbors[0][i])
nn_distance.append(k_neighbors[1][i])
else :
neighbors.append(s_neighbors[0][i])
nn_distance.append(s_neighbors[1][i])
else :
warnings.warn("For now, for distance matrix we assume maxk = number of points.")
self.maxk = self.size
neighbors = np.argsort(self.dist_mat)
nn_distance = self.dist_mat[np.arange(len(self.dist_mat)), neighbors.transpose()].transpose()
self.nn_indices = np.array(neighbors)
self.nn_distance = np.array(nn_distance)
self.fitted_nn = True
def fit_density_estimates(self, kernel) :
self.kernel = kernel
self.fitted_density_estimates = True
self.square_kernel_estimate = np.cumsum(self.measure[self.nn_indices], axis = 1)
if kernel == 'square' :
self.kernel_estimate = self.square_kernel_estimate
else :
with np.errstate(divide='ignore'):
inv_width = np.where(self.nn_distance < self.tol, 0, np.divide(1.,self.nn_distance))
if kernel == 'triangle' :
self.delta = np.cumsum(self.measure[self.nn_indices] * self.nn_distance, axis = 1)
self.kernel_estimate = self.square_kernel_estimate - inv_width * self.delta
elif kernel == 'epanechnikov' :
self.delta = np.cumsum(self.measure[self.nn_indices] * np.square(self.nn_distance), axis = 1)
self.kernel_estimate = self.square_kernel_estimate - np.square(inv_width) * self.delta
def kde_at_index_width(self, point_index, neighbor_index, width = None) :
# to do: check input
if width is None :
width = self.nn_distance[point_index][neighbor_index]
if self.kernel == 'square' :
return self.square_kernel_estimate[point_index][neighbor_index]
else :
with np.errstate(divide='ignore'):
inv_width = np.where(width < self.tol, 0, np.divide(1.,width))
if self.kernel == 'triangle' :
return self.square_kernel_estimate[point_index][neighbor_index] -\
inv_width * self.delta[point_index][neighbor_index]
elif self.kernel == 'epanechnikov' :
return self.square_kernel_estimate[point_index][neighbor_index] -\
np.square(inv_width) * self.delta[point_index][neighbor_index]
def kde(self, point_index, width) :
# to do: check input (in particular that the index is in bound)
width = np.array(width)
# find the index (resp. indices) of the neighbor (resp. neighbors)
# whose distance is (left) closest to width (resp. each element of width)
pos = np.searchsorted(self.nn_distance[point_index], width, side = 'right')
pos -= 1
# number of local neighbors of the point
n_neighbors = len(self.nn_distance[point_index])
# check if the k value we computed is exact or only a lower bound
# (in that case, annotate it in the out_of_range list)
if n_neighbors < self.size :
if width.ndim == 1 :
# two conditions needed for out of bound
out_of_range = np.where(pos == n_neighbors-1, True, False)
if self.maxs > self.tol :
out_of_range_ = np.where(width > self.maxs, True, False)
out_of_range = np.logical_and(out_of_range, out_of_range_)
else :
out_of_range = (pos == n_neighbors-1 and self.nn_distance[pos] > self.maxs)
else :
if width.ndim == 1 :
out_of_range = np.full(len(width),False)
else :
out_of_range = False
return self.kde_at_index_width(point_index,pos,width), out_of_range
# to do: use multiple cores when have lots of point indices
def core_scale(self, point_index, gamma) :
"""Given a curve gamma (that takes an r and returns s,t,k) and a
list of (indices of) points in the space, returns the r-time at which
the points are born."""
# point_index can be a list
point_index = np.array(point_index)
if gamma.s_component.is_constant :
#warnings.warn("s component is constant.")
return self.core_scale_constant_s(point_index, gamma)
elif gamma.k_component.is_constant :
#warnings.warn("k component is constant.")
return self.core_scale_varying_s_constant_k(point_index, gamma)
else :
return self.core_scale_varying_s_k(point_index, gamma)
def core_scale_constant_s(self, point_index, gamma) :
s0 = gamma.s_component.func(gamma.minr)
kde_s0 = np.vectorize(lambda i : self.kde(i,s0))
kdes, out_of_range = kde_s0(point_index)
if np.any(out_of_range) :
warnings.warn("Don't have enough neighbors to properly calculate core scale.")
return gamma.k_component.inverse(kdes)
def core_scale_varying_s_constant_k(self, point_index, gamma) :
k0 = gamma.k_component.func(gamma.minr)
if k0 < TOL :
#warnings.warn("k is constant and 0, output is just single-linkage.")
return np.zeros((len(point_index)))
if k0 > 1 :
warnings.warn("The curve doesn't intersect the shadow.")
zero = np.vectorize(lambda x : 0)
return zero(point_index)
if self.kernel == 'square' and self.counting_measure :
# square kernel with couting measure and constant k
i_indices = int(np.ceil(k0 * self.size)) - 1
if i_indices + 1 > self.maxk :
if self.maxk < self.size :
# to do: check that the boundary cases are correct here
out_of_range = np.where((i_indices + 1 >\
np.apply_along_axis(len,-1,self.nn_indices[point_index])) &\
(i_indices + 1 < self.size), True, False)
if np.any(out_of_range) :
warnings.warn("Don't have enough neighbors to properly compute core scale.")
else :
i_indices = []
for p in point_index :
i_indices.append(np.searchsorted(self.kernel_estimate[p],k0, side = 'left'))
i_indices = np.array(i_indices)
if self.maxk < self.size :
out_of_range = np.where((i_indices >=\
np.apply_along_axis(len,-1,self.nn_indices[point_index])) &\
(np.apply_along_axis(len,-1,self.nn_indices[point_index]) < self.size), True, False)
if np.any(out_of_range) :
warnings.warn("Don't have enough neighbors to properly compute core scale.")
if self.kernel == 'square' :
return gamma.s_component.inverse(self.nn_distance[(point_index, i_indices)])
if self.kernel == 'triangle' :
op = lambda p, i : np.divide(self.delta[p,i-1], (self.square_kernel_estimate[p,i-1] - k0))
elif self.kernel == 'epanechnikov' :
op = lambda p, i : np.sqrt(np.divide(self.delta[p,i-1], self.square_kernel_estimate[p,i-1] - k0))
return gamma.s_component.inverse(np.where(i_indices == 0, 0, op(point_index,i_indices)))
def core_scale_varying_s_k(self, point_index, gamma) :
k_s_inv = lambda d : gamma.k_component.func(gamma.s_component.func_inv(d))
i_indices = []
for p in point_index :
i_indices.append(lazy_intersection(self.kernel_estimate[p], self.nn_distance[p], k_s_inv))
i_indices = np.array(i_indices)
out_of_range = i_indices[:,1]
if np.any(out_of_range) :
# to do: better message for second condition
warnings.warn("Don't have enough neighbors to properly compute core scale, or point takes too long to appear.")
i_indices = i_indices[:,0]
if self.kernel == 'square' :
op = lambda p, i : np.where(self.kernel_estimate[p,i-1] >= k_s_inv(self.nn_distance[p,i]),\
gamma.s_component.func(gamma.k_component.func_inv(self.kernel_estimate[p,i-1])),
self.nn_distance[p,i])
return gamma.s_component.inverse(np.where(i_indices == 0, 0, op(point_index,i_indices)))
else :
# to do: set tolerance so user can choose it, and handle nonconvergence in some controlled way
op_ = lambda p, i : sp.optimize.brentq(lambda s : self.kde(p, s)[0] -\
gamma.k_component.func(gamma.s_component.func_inv(s)),
self.nn_distance[p,i-1], self.nn_distance[p,i], disp = True)
op = lambda p, i : 0 if i == 0 else op_(p,i)
return gamma.s_component.inverse(np.array(list(map(op, point_index, i_indices))))
def its_shadow(self, gridk = 1.1, grids = 1, n_bins = 250) :
# to do: check input
if not self.fitted_density_estimates :
raise Exception("Must fit before computing shadow.")
# create grid if not given explicitly
if isinstance(gridk, float) or isinstance(gridk, int) :
n_bins = n_bins
max_grid_k = gridk
gridk = np.array(range(0,n_bins))/(n_bins-1) * max_grid_k
max_grid_s = grids
grids = np.array(range(0,n_bins))/(n_bins-1) * max_grid_s
shadow = np.zeros((len(gridk),len(grids)))
mask = np.full((len(gridk),len(grids)),False)
for i in range(len(self.fit_on)) :
estimates, out_of_bounds = self.kde(i, grids)
k_indices = np.searchsorted(gridk, estimates, side = 'left')
k_indices -= 1
shadow[(k_indices, range(0,len(k_indices)))] += self.measure[i]
for s_index, b in enumerate(out_of_bounds) :
if b :
mask[k_indices[s_index]][s_index] = True
shadow = shadow[::-1].cumsum(axis = 0)[::-1]
normalize_by = np.sum(self.measure[self.fit_on])
shadow /= normalize_by
mask = np.logical_or.accumulate(mask)
return Shadow(gridk, grids, shadow, mask)
def gamma_linkage(self, gamma, consistent = False, intrinsic_dim = 1) :
covariant = gamma.covariant
if self.metric == "precomputed" :
sl_dist = self.dist_mat.copy()
else :
sl_dist = pairwise_distances(self.points, metric = self.metric, p = self.p)
indices = np.arange(self.size)
core_scales = self.core_scale(indices, gamma)
sl_dist = gamma.t_component.inverse(sl_dist)
if not covariant :
sl_dist = np.minimum(sl_dist, core_scales)
sl_dist = np.minimum(sl_dist.T,core_scales).T
sl_dist[sl_dist < TOL] = TOL
sl_dist = np.reciprocal(sl_dist)
else :
sl_dist = np.maximum(sl_dist, core_scales)
sl_dist = np.maximum(sl_dist.T,core_scales).T
sl_dist[sl_dist > INF] = INF
sl = linkage(squareform(sl_dist, checks=False), 'single')
merges = sl[:,0:2].astype(int)
merges_heights = sl[:,2]
if not covariant :
merges_heights = np.reciprocal(merges_heights)
merges_heights[merges_heights >= INF/2] = np.infty
merges_heights[merges_heights <= TOL*2] = 0
ret = HierarchicalClustering(self.points, covariant, core_scales, merges, merges_heights, gamma.minr, gamma.maxr)
if consistent :
d = intrinsic_dim
if d == 1 :
if self.kernel == "square" :
cons = 2
elif self.kernel == "triangle" :
cons = 1
elif self.kernel == "epanechnikov" :
cons = 4/3
else :
if self.kernel == "square" :
cons = (np.pi ** (d/2)) / math.gamma(d/2 + 1)
elif self.kernel == "triangle" :
cons = (2 * np.pi**((d-1)/2))/(math.gamma((d-1)/2) * d * (d+1))
elif self.kernel == "epanechnikov" :
cons = (2 * np.pi**((d-1)/2) * 2)/(math.gamma((d-1)/2) * d * (d+2))
v_s = lambda s : cons * s**d
inverse_rescaling = np.vectorize(lambda r : gamma.k_component.func(r) / v_s(gamma.s_component.func(r)))
# new covariance is False
ret.reindex(inverse_rescaling, 0, np.inf, False)
return ret
def connection_radius(self) :
gamma = Gamma_curve.constant_k_alpha_s_indexed(0)
return self.gamma_linkage(gamma).start_and_end()[1]
class PersCluster:
def __init__(self, identifier=None, child=None, parents=None,
size=2, index=None, min_m=2, max_m=2):
self.id = identifier
self.child = child
self.parents = parents
self.size = size
self.index = index
self.min_m = min_m
self.max_m = max_m
self.score_summands = np.zeros(shape = max_m - min_m + 1, dtype=np.float64)
self.id_after_pruning = identifier
self.score_summand_after_pruning = 0
def reset_after_pruning(self):
self.id_after_pruning = self.id
self.score_summand_after_pruning = 0
def score(self, m):
return np.sum(self.score_summands[m - self.min_m:]) + self.score_summand_after_pruning
def update_score_summands(self, current_index):
if self.size >= self.min_m:
if self.size <= self.max_m:
self.score_summands[self.size - self.min_m] = self.size * np.abs(current_index - self.index)
else:
self.score_summands[self.max_m - self.min_m] += self.size * np.abs(current_index - self.index)
class PCTree:
def __init__(self, pers_clusters, pc_of_points, min_m, max_m, roots):
self.pers_clusters = pers_clusters
self.pc_of_points = pc_of_points
self.min_m = min_m
self.max_m = max_m
self.roots = roots
def point_ancestors_of_x_to_y(self, x, y, dictionary):
dictionary[x] = y
pc = self.pers_clusters[x]
if pc.parents != None:
for parent in pc.parents:
self.point_ancestors_of_x_to_y(parent, y, dictionary)
def optimal_clustering(self, ident, m):
pc = self.pers_clusters[ident]
if pc.parents == None:
if pc.size < m:
return [], 0
if pc.size >= m:
return [pc.id_after_pruning], pc.score(m)
else:
parent_0_id = pc.parents[0]
parent_1_id = pc.parents[1]
parent_0 = self.pers_clusters[parent_0_id]
parent_1 = self.pers_clusters[parent_1_id]
if parent_0.size >= m and parent_1.size >= m:
clustering_0, score_0 = self.optimal_clustering(parent_0_id, m)
clustering_1, score_1 = self.optimal_clustering(parent_1_id, m)
if score_0 + score_1 >= pc.score(m):
return clustering_0 + clustering_1, score_0 + score_1
else:
return [pc.id_after_pruning], pc.score(m)
if parent_0.size < m and parent_1.size >= m:
parent_1.id_after_pruning = pc.id_after_pruning
parent_1.score_summand_after_pruning = pc.score(m)
return self.optimal_clustering(parent_1_id, m)
if parent_1.size < m and parent_0.size >= m:
parent_0.id_after_pruning = pc.id_after_pruning
parent_0.score_summand_after_pruning = pc.score(m)
return self.optimal_clustering(parent_0_id, m)
if parent_0.size < m and parent_1.size < m:
if pc.size < m:
return [], 0
if pc.size >= m:
return [pc.id_after_pruning], pc.score(m)
def measure_based_flattening_PC(self, m, verbose=False,
allow_one_cluster=False):
if m < self.min_m:
raise Exception("m is smaller than min_m!")
if m > self.max_m:
raise Exception("m is larger than max_m!")
pc_of_points = self.pc_of_points.copy()
num_points = pc_of_points.shape[0]
result_ids = []
if allow_one_cluster:
# compute optimal clustering
for root in self.roots:
clustering, score = self.optimal_clustering(root, m)
result_ids += clustering
else:
if len(self.roots) == 1:
root = self.roots[0]
pc = self.pers_clusters[root]
pc.score_summand_after_pruning = -np.inf
#compute optimal clustering
clustering, score = self.optimal_clustering(root, m)
result_ids = clustering
else:
# compute optimal clustering
for root in self.roots:
clustering, score = self.optimal_clustering(root, m)
result_ids += clustering
# if x is a member of the optimal solution,
# and x' is an ancestor of x, then
# the points of x' should be in the same cluster as x
id_to_flat_id = {}
for count, ident in enumerate(result_ids):
self.point_ancestors_of_x_to_y(ident, count, id_to_flat_id)
for p in range(num_points):
if pc_of_points[p] in id_to_flat_id.keys():
pc_of_points[p] = id_to_flat_id[pc_of_points[p]]
else:
pc_of_points[p] = -1
# reset the after-pruning data of all persistent clusters,
# in case we flatten again with a different m
for key in self.pers_clusters.keys():
pc = self.pers_clusters[key]
pc.reset_after_pruning()
if verbose:
return pc_of_points, result_ids
else:
return pc_of_points
class HierarchicalClustering :
"""Implements a hierarchical clustering of a dataset"""
def __init__(self, X, covariant, heights, merges, merges_heights, minr, maxr) :
self.points = X
self.covariant = covariant
#self.dend = dend
self.merges = merges
self.merges_heights = merges_heights
self.heights = heights
self.maxr = maxr
self.minr = minr
def persistence_based_flattening(self, threshold = None, num_clusters = None) :
if threshold == None and num_clusters == None :
raise Exception("Either threshold or num_clusters must be given.")
if threshold != None and num_clusters != None :
warnings.warn("Both threshold and num_clusters given, using threshold.")
elif threshold == None :
bd = self.PD(end="infinity")[0]
pers = np.abs(bd[:,0] - bd[:,1])
if num_clusters + 1 > bd.shape[0] :
threshold = 0
else :
spers = np.sort(pers)
threshold = (spers[-num_clusters] + spers[-(num_clusters+1)])/2
heights = self.heights.copy()
merges_heights = self.merges_heights.copy()
if not self.covariant :
heights = -heights - TOL
merges_heights = -merges_heights
else :
heights = heights - TOL
# for numerical reasons, it may be that a point is merged before it appears,
# we subtract TOL, above, to make sure this doesn't happen
appearances = np.argsort(heights)
uf = UnionFind()
clusters_birth = {}
clusters_died = {}
clusters = []
hind = 0
mind = 0
n_points = heights.shape[0]
while True :
while hind < n_points and heights[appearances[hind]] <= merges_heights[mind] :
uf.find(appearances[hind])
clusters_birth[appearances[hind]] = heights[appearances[hind]]
hind += 1
if hind == n_points :
current_height = np.infty
else :
current_height = heights[appearances[hind]]
while mind < merges_heights.shape[0] and merges_heights[mind] < current_height :
xy = self.merges[mind]
x, y = xy
rx = uf.find(x)
ry = uf.find(y)
if rx not in clusters_died and ry not in clusters_died :
bx = clusters_birth[rx]
by = clusters_birth[ry]
if bx > merges_heights[mind] - threshold or by > merges_heights[mind] - threshold :
del clusters_birth[rx]
del clusters_birth[ry]
uf.union(x,y)
uf.union(x,mind + n_points)
rxy = uf.find(x)
clusters_birth[rxy] = min(bx, by)
else :
# they both must die
if clusters_birth[rx] + threshold <= merges_heights[mind] :
clusters.append(uf.equivalence_class(x))
if clusters_birth[ry] + threshold <= merges_heights[mind] :
clusters.append(uf.equivalence_class(y))
uf.union(x,y)
uf.union(x,mind + n_points)
rxy = uf.find(x)
clusters_died[rxy] = True
elif rx in clusters_died and ry in clusters_died :
# both of them are dead
uf.union(x,y)
uf.union(x,mind + n_points)
rxy = uf.find(x)
clusters_died[rxy] = True
else :
if rx in clusters_died :
x, y = y, x
rx, ry = ry, rx
# ry already died and rx just died
if clusters_birth[rx] + threshold <= merges_heights[mind] :
clusters.append(uf.equivalence_class(x))
uf.union(x,y)
uf.union(x,mind + n_points)
rxy = uf.find(x)
clusters_died[rxy] = True
mind += 1
if mind == merges_heights.shape[0] :
break
death = np.inf
#if self.covariant :
# death = np.inf
#else :
# death = -self.minr
for x in range(n_points) :
rx = uf.find(x)
if rx not in clusters_died :
if clusters_birth[rx] + threshold <= death :
clusters.append(uf.equivalence_class(x))
clusters_died[rx] = True
current_cluster = 0
res = np.full(n_points, -1)
for cl in clusters :
for x in cl :
if x < n_points :
res[x] = current_cluster
current_cluster += 1
return current_cluster, res
def reindex(self, inverse_rescaling, new_min, new_max, new_covariance) :
self.minr = new_min
self.maxr = new_max
self.covariant = new_covariance
self.merges_heights = inverse_rescaling(self.merges_heights)
self.heights = inverse_rescaling(self.heights)
def start_and_end(self) :
#returns the first and last things that happen in the hierarchical clustering
if self.covariant :
return np.min(self.heights), np.max(self.merges_heights)
else :
return np.max(self.heights), np.min(self.merges_heights)
def interleaving_distance(self, hc) :
"""Computes the interleaving distance between self and the given hierarchical clustering.\
Assumes that self and the given hierarchical clustering are defined over the same set."""
heights1 = self.heights
heights2 = hc.heights
merges1 = self.merges
merges2 = hc.merges
merges_heights1 = self.merges_heights.copy()
merges_heights2 = hc.merges_heights.copy()
if not self.covariant :
merges_heights1 = - merges_heights1
merges_heights2 = - merges_heights2
nmerges1 = len(merges1)
nmerges2 = len(merges2)
npoints = len(self.heights)
#to do: fail if different number of points
dist = np.amax(np.abs(heights1 - heights2))
# compute the assymetric interleaving distance from 1 to 2
# to do: we assume that everything merges together right after the last thing that happens
# maybe this should be done at the level of dendrograms?
i = 0
epsilon1 = dist
uf1 = UnionFind()
uf1_ = UnionFind()
for xy,r,n in zip(merges1, merges_heights1, range(nmerges1)):
x,y = xy
uf1_.union(x,y)
uf1_.union(x,n + npoints)
while i < nmerges2 and merges_heights2[i] < r + epsilon1 :
uf1.union(merges2[i,0], merges2[i,1])
uf1.union(merges2[i,0], i + npoints)
i = i + 1
rx = uf1_.find(x)
ry = uf1_.find(y)
while i < nmerges2 and uf1.find(rx) != uf1.find(ry) :
epsilon1 = merges_heights2[i] - r
uf1.union(merges2[i,0], merges2[i,1])
uf1.union(merges2[i,0], i + npoints)
i = i + 1
i = 0
epsilon2 = epsilon1
uf2 = UnionFind()
uf2_ = UnionFind()
for xy,r in zip(merges2, merges_heights2):
x,y = xy
uf2_.union(x,y)
uf2_.union(x,n + npoints)
while i < nmerges1 and merges_heights1[i] < r + epsilon2 :
uf2.union(merges1[i,0], merges1[i,1])
uf2.union(merges1[i,0], i + npoints)
i = i + 1
rx = uf2_.find(x)
ry = uf2_.find(y)
while i < nmerges1 and uf2.find(rx) != uf2.find(ry) :
epsilon2 = merges_heights1[i] - r
uf2.union(merges1[i,0], merges1[i,1])
uf2.union(merges1[i,0], i + npoints)
i = i + 1
return epsilon2
def PD(self, end = None) :
# ti is the terminal index:
# a point in the pd that never dies will have ti as its death index.
heights = self.heights.copy()
merges = self.merges.copy()
merges_heights = self.merges_heights.copy()
covariant = self.covariant
if end == "infinity" :
if covariant :
ti = INF
else :
ti = TOL
else :
if covariant :
ti = self.maxr
else :
ti = self.minr
num_points = heights.shape[0]
num_merges = merges.shape[0]
# initialize persistence diagram
# in the end, pers_diag[i, 0] will be the birth,
# and pers_diag[i, 1] will be the death
# of the point represented by the datapoint i.
# if pers_diag[i, :] = [-1, -1] at the end, we ignore it.
pers_diag = np.empty(shape=(num_points, 2), dtype=np.float64)
pers_diag.fill(-1)
# initialize an array of cluster representatives
# for the cluster i in the stepwise dendrogram,
# cluster_reps[i] is a datapoint in that cluster
cluster_reps = np.empty(shape=num_points + num_merges, dtype=np.int64)
cluster_reps.fill(-1)
# if the dendrogram is contravariant,
# we reindex by taking the reciprocal
if covariant == False:
heights = np.reciprocal(heights)
merges_heights[merges_heights < TOL] = TOL
merges_heights[merges_heights > INF] = INF
merges_heights = np.reciprocal(merges_heights)
if ti <= TOL:
ti = INF
elif ti >= INF :
ti = TOL
else:
ti = np.reciprocal(ti)
for i in range(num_merges):
cluster_0 = merges[i, 0]
cluster_1 = merges[i, 1]
# if both clusters are singletons
if cluster_0 < num_points and cluster_1 < num_points:
height_0 = heights[cluster_0]
height_1 = heights[cluster_1]
current_height = merges_heights[i]
# if cluster_0 was just born, but cluster_1 was already alive
if np.abs(height_0 - current_height) < TOL and np.abs(height_1 - current_height) >= TOL:
pers_diag[cluster_1, :] = [height_1, ti]
cluster_reps[num_points + i] = cluster_1
# if cluster_1 was just born, but cluster_0 was already alive
if np.abs(height_1 - current_height) < TOL and np.abs(height_0 - current_height) >= TOL:
pers_diag[cluster_0, :] = [height_0, ti]
cluster_reps[num_points + i] = cluster_0
# if cluster_0 and cluster_1 were just born
if np.abs(height_0 - current_height) < TOL and np.abs(height_1 - current_height) < TOL:
pers_diag[cluster_0, :] = [height_0, ti]
cluster_reps[num_points + i] = cluster_0
# if cluster_0 and cluster_1 were both already alive
if np.abs(height_0 - current_height) >= TOL and np.abs(height_1 - current_height) >= TOL:
# if cluster_1 is born first
if height_0 >= height_1:
pers_diag[cluster_0, :] = [height_0, current_height]
pers_diag[cluster_1, :] = [height_1, ti]
cluster_reps[num_points + i] = cluster_1
# if cluster_0 is born first
if height_0 < height_1:
pers_diag[cluster_0, :] = [height_0, ti]
pers_diag[cluster_1, :] = [height_1, current_height]
cluster_reps[num_points + i] = cluster_0
# if cluster_0 is a singleton and cluster_1 is not
if cluster_0 < num_points and cluster_1 >= num_points:
height_0 = heights[cluster_0]
rep_1 = cluster_reps[cluster_1]
height_1 = pers_diag[rep_1, 0]
current_height = merges_heights[i]
# if cluster_0 was just born
if np.abs(height_0 - current_height) < TOL:
cluster_reps[num_points + i] = rep_1
# if cluster_0 was already alive
if np.abs(height_0 - current_height) >= TOL:
# the singleton is younger than the cluster
if height_0 >= height_1:
pers_diag[cluster_0, :] = [height_0, current_height]
cluster_reps[num_points + i] = rep_1
# the singleton is older than the cluster
if height_0 < height_1:
pers_diag[cluster_0, :] = [height_0, ti]
pers_diag[rep_1, 1] = current_height
cluster_reps[num_points + i] = cluster_0
# if cluster_1 is a singleton and cluster_0 is not
if cluster_1 < num_points and cluster_0 >= num_points:
height_1 = heights[cluster_1]
rep_0 = cluster_reps[cluster_0]
height_0 = pers_diag[rep_0, 0]
current_height = merges_heights[i]
# if cluster_1 was just born
if np.abs(height_1 - current_height) < TOL:
cluster_reps[num_points + i] = rep_0
# if cluster_1 was already alive
if np.abs(height_1 - current_height) >= TOL:
# the singleton is younger than the cluster
if height_1 >= height_0:
pers_diag[cluster_1, :] = [height_1, current_height]
cluster_reps[num_points + i] = rep_0
# the singleton is older than the cluster
if height_1 < height_0:
pers_diag[cluster_1, :] = [height_1, ti]
pers_diag[rep_0, 1] = current_height
cluster_reps[num_points + i] = cluster_1
# if neither cluster is a singleton
if cluster_0 >= num_points and cluster_1 >= num_points:
rep_0 = cluster_reps[cluster_0]
height_0 = pers_diag[rep_0, 0]
rep_1 = cluster_reps[cluster_1]
height_1 = pers_diag[rep_1, 0]
current_height = merges_heights[i]
# cluster_0 is younger than cluster_1
if height_0 >= height_1:
pers_diag[rep_0, 1] = current_height
cluster_reps[num_points + i] = rep_1
# cluster_1 is younger than cluster_0
if height_0 < height_1:
pers_diag[rep_1, 1] = current_height
cluster_reps[num_points + i] = rep_0
# check if there are points in the dataset
# that never appeared in the dendrogram
appeared = np.zeros(shape=num_points, dtype=np.int64)
for i in range(num_merges):
cluster_0 = merges[i, 0]
cluster_1 = merges[i, 1]
if cluster_0 < num_points:
appeared[cluster_0] = 1
if cluster_1 < num_points:
appeared[cluster_1] = 1
for i in range(num_points):
if appeared[i] == 0:
pers_diag[i, :] = [heights[i], ti]
# remove all rows from the persistence diagram that were never modified
non_empty_indices = []
for i in range(num_points):
if pers_diag[i, 0] > -1:
non_empty_indices.append(i)
trimmed_pers_diag = np.empty(shape=(len(non_empty_indices), 2), dtype=np.float64)
#representatives = np.empty(shape=(len(non_empty_indices), 1), dtype=np.int32)
for i in range(len(non_empty_indices)):
trimmed_pers_diag[i, 0] = pers_diag[non_empty_indices[i], 0]
trimmed_pers_diag[i, 1] = pers_diag[non_empty_indices[i], 1]
if covariant == False:
trimmed_pers_diag[:, [0, 1]] = np.reciprocal(trimmed_pers_diag[:, [0, 1]])
trimmed_pers_diag[trimmed_pers_diag <= TOL*2] = 0
trimmed_pers_diag[trimmed_pers_diag >= INF/2] = np.infty
# set the death of the first born point to -infinity
if covariant == False and end == "infinity" :
#trimmed_pers_diag[np.argmax(trimmed_pers_diag[:,0]),1] = -np.infty
first_birth = np.max(trimmed_pers_diag[:,0])
first_born = np.argwhere(trimmed_pers_diag[:,0] > first_birth - TOL).flatten()
# of the first born, take the last to die
most_persistent = np.argmin(trimmed_pers_diag[first_born,1])
index = first_born[most_persistent]
trimmed_pers_diag[index,1] = -np.infty
non_trivial_points = np.abs(trimmed_pers_diag[:,0] - trimmed_pers_diag[:,1]) > TOL
return trimmed_pers_diag[non_trivial_points], np.array(non_empty_indices)[non_trivial_points]
def measure_based_flattening(self, m):
# compute poset of persistent clusters
X = self.PC(min_m = m, max_m = m)
return X.measure_based_flattening_PC(m = m)
def measure_based_flattening_several_m(self, min_m, max_m):
# compute poset of persistent clusters
X = self.PC(min_m = min_m, max_m = max_m)
# compute all measure-based flattenings
num_points = self.heights.shape[0]
labels = np.empty(shape=(max_m - min_m + 1, num_points), dtype=np.int64)
ids = {}
for m in range(min_m, max_m + 1):
labels_m, ids_m = X.measure_based_flattening_PC(m = m, verbose = True)
labels[m - min_m, :] = labels_m
ids[m] = ids_m
critical_m = [(min_m, len(ids[min_m]))]
for m in range(min_m + 1, max_m + 1):
if len(ids[m-1]) != len(ids[m]):
critical_m.append((m, len(ids[m])))
else:
if set(ids[m-1]) != set(ids[m]):
critical_m.append((m, len(ids[m])))
return labels, critical_m
# Returns the PC of the hierarchical clustering,
# after the measure-based pruning with m=2.
# From this, one can compute the measure-based flattening with any m
# such that min_m <= m <= max_m.
def PC(self, min_m, max_m):
covariant = self.covariant
if covariant :
ti = self.maxr
else :
ti = self.minr
num_points = self.heights.shape[0]
num_merges = self.merges.shape[0]
pc_of_points = np.empty(shape=num_points, dtype=np.int64)
pc_of_points.fill(-1)
X = PCTree(pers_clusters = {},
pc_of_points = pc_of_points,
min_m = min_m, max_m = max_m,
roots = [])
# Initialize an array of cluster identifiers.
# For the cluster i in the stepwise dendrogram,
# cluster_ids[i] will be the identifier of the persistent cluster
# to which i belongs.
cluster_ids = np.empty(shape=num_merges, dtype=np.int64)
cluster_ids.fill(-1)
for i in range(num_merges):
cluster_0 = self.merges[i, 0]
cluster_1 = self.merges[i, 1]
# if both clusters are singletons
if cluster_0 < num_points and cluster_1 < num_points:
# add persistent cluster to the PC
pers_cluster = PersCluster(identifier = i,
index = self.merges_heights[i],
min_m = min_m, max_m = max_m)
X.pers_clusters[i] = pers_cluster
# cluster i in the stepwise dendrogram belongs to
# the persistent cluster i
cluster_ids[i] = i
# both singletons belong to the persistent cluster i
pc_of_points[cluster_0] = i
pc_of_points[cluster_1] = i
# if cluster_0 is not a singleton and cluster_1 is a singleton
if cluster_0 >= num_points and cluster_1 < num_points:
# find the persistent cluster to which cluster_0 belongs
ident = cluster_ids[cluster_0 - num_points]
pc = X.pers_clusters[ident]
current_index = self.merges_heights[i]
# update the score of pc
pc.update_score_summands(current_index)
# update the index where pc was last visited
pc.index = current_index
# pc has increased in size, since cluster_1 was added
pc.size += 1
# cluster_1 belongs to pc
pc_of_points[cluster_1] = ident
# cluster i in the stepwise dendrogram belongs to
# the persistent cluster ident
cluster_ids[i] = ident
# if cluster_1 is not a singleton and cluster_0 is a singleton
if cluster_1 >= num_points and cluster_0 < num_points:
# find the persistent cluster to which cluster_1 belongs
ident = cluster_ids[cluster_1 - num_points]
pc = X.pers_clusters[ident]
current_index = self.merges_heights[i]
# update the score of pc
pc.update_score_summands(current_index)
# update the index where pc was last visited
pc.index = current_index
# pc has increased in size, since cluster_0 was added
pc.size += 1
# cluster_0 belongs to pc
pc_of_points[cluster_0] = ident
# cluster i in the stepwise dendrogram belongs to
# the persistent cluster ident
cluster_ids[i] = ident
# if both clusters are not singletons
if cluster_0 >= num_points and cluster_1 >= num_points:
# find the persistent cluster to which cluster_0 belongs
ident_0 = cluster_ids[cluster_0 - num_points]
pc_0 = X.pers_clusters[ident_0]
# find the persistent cluster to which cluster_1 belongs
ident_1 = cluster_ids[cluster_1 - num_points]
pc_1 = X.pers_clusters[ident_1]
current_index = self.merges_heights[i]
# update the score of pc_0
pc_0.update_score_summands(current_index)
# update the score of pc_1
pc_1.update_score_summands(current_index)
# Since pc_0 and pc_1 have merged,
# they create a child in X
pers_cluster = PersCluster(identifier = i,
parents = [ident_0, ident_1],
size = pc_0.size + pc_1.size,
index = current_index,
min_m = min_m, max_m = max_m)
X.pers_clusters[i] = pers_cluster
pc_0.child = i
pc_1.child = i
# cluster i in the stepwise dendrogram belongs to
# the persistent cluster i
cluster_ids[i] = i
# find the roots of the PC
for ident in X.pers_clusters.keys():
if X.pers_clusters[ident].child == None:
X.roots.append(ident)
# we have to finish computing the scores of root elements
current_index = ti
for root in X.roots:
# find the persistent cluster to which root belongs
pc = X.pers_clusters[root]
# update the score of pc
pc.update_score_summands(current_index)
return X
class Shadow :
# returns an empty shadow
def __init__(self, gridk, grids, matrix, mask) :
self.gridk = gridk
self.grids = grids
self.matrix = matrix
self.mask = mask
### UNION FIND
class UnionFind:
def __str__(self) :
return 'par: ' + str(self.parent) + '\n' +\
'rnk: ' + str(self.rank) + '\n' +\
'siz: ' + str(self.size) + '\n' +\
'rep: ' + str(self.representatives)
def __init__(self):
self.parent = {}
self.rank = {}
self.size = {}
self.representatives = set()
self.next = {}
def __copy__(self):
new_uf = UnionFind()
new_uf.parent = self.parent.copy()
new_uf.rank = self.rank.copy()
new_uf.size = self.size.copy()
new_uf.representatives = self.representatives.copy()
new_uf.next = self.next.copy()
return new_uf
def class_size(self, obj) :
root = self.find(obj)
return self.size[root]
def class_representatives(self) :
return self.representatives
def insert_object(self, obj):
if not obj in self.parent :
self.parent[obj] = obj
self.rank[obj] = 0
self.size[obj] = 1
self.representatives.add(obj)
self.next[obj] = obj
def find(self, obj):
if not obj in self.parent :
self.insert_object(obj)
return obj
if self.parent[obj] != obj :
self.parent[obj] = self.find(self.parent[obj])
return self.parent[obj]
def union(self, obj1, obj2):
root1 = self.find(obj1)
root2 = self.find(obj2)
if root1 == root2 :
return
if self.rank[obj1] < self.rank[obj2] :
root1, root2 = root2, root1
self.parent[root2] = root1
self.size[root1] = self.size[root1] + self.size[root2]
self.representatives.remove(root2)
self.next[root1], self.next[root2] = self.next[root2], self.next[root1]
if self.rank[root1] == self.rank[root2] :
self.rank[root1] = self.rank[root1] + 1
def equivalence_class(self, obj) :
next_obj = self.next[obj]
cl = [obj]
while next_obj != obj :
cl.append(next_obj)
next_obj = self.next[next_obj]
return cl
### CURVES
def line(slope, intercept, r) :
return slope * r + intercept
def constant(const, r) :
return const
def identity(r) :
return r
def times(alpha,r) :
return alpha * r
class Parametrized_interval :
def __init__(self, dom_min, dom_max, cod_min, cod_max, func, covariant, func_inv = None) :
# to do: check input
self.covariant = covariant
self.dom_min = dom_min
self.dom_max = dom_max
self.cod_min = cod_min
self.cod_max = cod_max
self.func = func
# could be None
self.func_inv = func_inv
if func_inv == None :
self.is_constant = True
else :
self.is_constant = False
def inverse(self, r) :
# to do: we don't assume the curve is invertible
# in the constant case self.cod_min == self.cod_max,
# make sure that the conditions are mutually exclusive
# and their union is everything
condlist = [ r < self.cod_min, r >= self.cod_max ]
if self.covariant :
choicelist = [ self.dom_min, self.dom_max ]
else :
choicelist = [ self.dom_max, self.dom_min ]
#to do: handle non-invertible curves better
if self.func_inv == None :
return np.select(condlist,choicelist, default = 0)
else:
return np.select(condlist,choicelist, default = self.func_inv(r))
def linear(dom_min, dom_max, cod_min, cod_max, slope, intercept, slope_inv = None, intercept_inv = None) :
dom_min = dom_min
dom_max = dom_max
cod_min = cod_min
cod_max = cod_max
func = lambda r : line(slope,intercept, r)
if slope_inv != None :
func_inv = lambda r : line(slope_inv,intercept_inv, r)
else :
func_inv = None
if slope >= 0 :
covariant = True
else :
covariant = False
return Parametrized_interval(dom_min, dom_max, cod_min, cod_max, func, covariant, func_inv = func_inv)
def linear_increasing(dom_min, dom_max, cod_min, cod_max) :
slope = (cod_max - cod_min)/(dom_max - dom_min)
intercept = cod_min - slope * dom_min
slope_inv = 1./slope
intercept_inv = -intercept * slope_inv
return Parametrized_interval.linear(dom_min, dom_max, cod_min, cod_max,\
slope, intercept, slope_inv = slope_inv, intercept_inv = intercept_inv)
def linear_decreasing(dom_min, dom_max, cod_min, cod_max) :
slope = (cod_min - cod_max)/(dom_max - dom_min)
intercept = cod_max - slope * dom_min
slope_inv = 1./slope
intercept_inv = -intercept * slope_inv
return Parametrized_interval.linear(dom_min, dom_max, cod_min, cod_max,\
slope, intercept, slope_inv = slope_inv, intercept_inv = intercept_inv)
def constant(dom_min, dom_max, const, covariant) :
# to do: test
dom_min = dom_min
dom_max = dom_max
cod_min = const
cod_max = const
func = lambda r : constant(const, r)
return Parametrized_interval(dom_min, dom_max, cod_min, cod_max, func, covariant)
def identity() :
dom_min = 0
dom_max = np.infty
cod_min = 0
cod_max = np.infty
func = identity
func_inv = func
return Parametrized_interval(dom_min, dom_max, cod_min, cod_max, func, True, func_inv = func_inv)
def times(alpha) :
dom_min = 0
dom_max = np.infty
cod_min = 0
cod_max = np.infty
func = lambda r : times(alpha, r)
func_inv = lambda r : times(1./alpha,r)
return Parametrized_interval(dom_min, dom_max, cod_min, cod_max, func, True, func_inv = func_inv)
class Gamma_curve :
def __init__(self, s_component, t_component, k_component) :
# to do: actually, it does make sense for both components to be constant
if s_component.is_constant and k_component.is_constant :
raise Exception("Both components shouldn't be constant.")
if s_component.is_constant :
self.covariant = not k_component.covariant
else :
self.covariant = s_component.covariant
self.s_component = s_component
self.t_component = t_component
self.k_component = k_component
#self.k_s_inv = None
# to do: check that domains coincide (and min should be 0)
self.maxr = s_component.dom_max
self.minr = 0
def linear_interpolator_alpha_k_indexed(k, s, alpha = 1) :
k_component = Parametrized_interval.linear_increasing(0,k,0,k)
s_component = Parametrized_interval.linear_decreasing(0,k,0,s)
t_component = Parametrized_interval.linear_decreasing(0,k,0,alpha * s)
return Gamma_curve(s_component,t_component,k_component)
def linear_interpolator_alpha_s_indexed(k, s, alpha = 1) :
k_component = Parametrized_interval.linear_decreasing(0,s,0,k)
s_component = Parametrized_interval.linear_increasing(0,s,0,s)
t_component = Parametrized_interval.linear_increasing(0,s,0,alpha * s)
return Gamma_curve(s_component,t_component,k_component)
def constant_k_alpha_s_indexed(k,alpha = 1, maxs = np.infty) :
k_component = Parametrized_interval.constant(0,maxs,k,False)
s_component = Parametrized_interval.identity()
t_component = Parametrized_interval.times(alpha)
return Gamma_curve(s_component,t_component,k_component)
def constant_s_t_k_indexed(s,t,maxk = np.infty) :
k_component = Parametrized_interval.identity()
s_component = Parametrized_interval.constant(0,maxk,s,False)
t_component = Parametrized_interval.constant(0,maxk,t,False)
return Gamma_curve(s_component,t_component,k_component)
### PLOTTING SHADOWS
def latex_float(f):
# https://stackoverflow.com/a/13490601/2171328
float_str = "{0:.2g}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
#return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
return r"{0}e{{{1}}}".format(base, int(exponent))
else:
return float_str
def ticks_in_scientific_notation(tks) :
#return [ "$" + latex_float(t) + "$" for t in tks]
return [ latex_float(t) for t in tks]
def plot_shadow(shadow, n_ticks = 11, n_shadows = 10, maxt = None, gammas = None, n_samples_curve_domain = 10000, h_size = 10, v_size = 5) :
#sns.set(rc={'text.usetex': True})
# set size of final picture
sns.set(rc={'figure.figsize':(h_size,v_size)})
# draw heat map
ax = sns.heatmap(np.flip(shadow.matrix,0), cmap = sns.color_palette("Blues", n_shadows), mask = np.flip(shadow.mask,0), rasterized=True)
ax.set_facecolor('Grey')
old_xticks = ax.get_xticks()
new_xticks = np.linspace(np.min(old_xticks), np.max(old_xticks), n_ticks)
new_xlabels = ticks_in_scientific_notation(np.linspace(shadow.grids[0], shadow.grids[-1], n_ticks))
ax.set_xticks(new_xticks)
ax.set_xticklabels(new_xlabels)
old_yticks = ax.get_yticks()
new_yticks = np.linspace(np.min(old_yticks), np.max(old_yticks), n_ticks)
new_ylabels = ticks_in_scientific_notation(
|
np.linspace(shadow.gridk[-1], shadow.gridk[0], n_ticks)
|
numpy.linspace
|
import unittest
import numpy as np
from railrl.misc import np_util
from railrl.testing.np_test_case import NPTestCase
class TestNpUtil(NPTestCase):
def test_softmax_1d(self):
values = np.array([1, 2])
denom_1 = np.exp(1) +
|
np.exp(2)
|
numpy.exp
|
####
#
# Copyright 2020. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
#
####
## label recognition
## Authors: <NAME> and <NAME>
# Standard packages
import argparse
import glob
import copy
import numpy as np
import re
# Image packages
from PIL import Image # Needed for reading TIF
import scipy.ndimage
from scipy.spatial import Delaunay
from skimage.color import rgb2gray
from skimage.filters import threshold_otsu
import skimage
from skimage.io import imsave
# Text packages
import pytesseract
### Alpha-shape calculation with helper functions ###
def edge_length(point_a, point_b):
"""
point_a and point_b are 2-tuples or lists of length 2,
representing endpoints of an edge. Returns the
Euclidean distance between point_a and point_b.
"""
return np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)
def add_alpha_edge(edges, i, j, only_outer=True):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
assert (j, i) in edges, "Alpha-shape: Can't go twice over the same directed edge"
if only_outer:
# if both neighboring triangles are in shape, it's not a boundary edge
edges.remove((j, i))
return
edges.add((i, j))
def alpha_shape(points, alpha, only_outer=True):
assert points.shape[0] > 3, "Alpha-shape needs at least four points"
tri = Delaunay(points)
edges = set()
for ia, ib, ic in tri.vertices:
pa = points[ia]
pb = points[ib]
pc = points[ic]
a = edge_length(pa, pb)
b = edge_length(pb, pc)
c = edge_length(pc, pa)
s = (a + b + c) / 2.0
area = np.sqrt(s * (s - a) * (s - b) * (s - c)) #overflow potential?
circum_r = a * b * c / (4.0 * area)
if circum_r < alpha:
add_alpha_edge(edges, ia, ib, only_outer)
add_alpha_edge(edges, ib, ic, only_outer)
add_alpha_edge(edges, ic, ia, only_outer)
return edges
### Helper functions ###
def draw_edges(points, edges, image_shape):
edge_imarray = np.zeros(image_shape, dtype=np.uint8)
for i,j in edges:
row_i, col_i = points[i]
row_j, col_j = points[j]
rows, cols = skimage.draw.line(row_i, col_i, row_j, col_j)
# or skimage.draw.line_aa(edge) for anti-aliased line
edge_imarray[rows, cols] = 1
return edge_imarray
def draw_mask(points, edges, image_shape):
# Start with the polygon outlines
mask_imarray = draw_edges(points, edges, image_shape)
# Then fill them in
mask_imarray = scipy.ndimage.binary_fill_holes(mask_imarray)
return mask_imarray
def pad_box(bounding_box, padding, image_shape):
"""
Add padding around given bounding box making sure not to exceed boundaries
of the image. bounding_box is 4-tuple (min_row, min_col, max_row, max_col).
Returns a new bounding_box as a 4-tuple.
"""
(min_row, min_col, max_row, max_col) = bounding_box
min_row = max(min_row - padding, 0)
min_col = max(min_col - padding, 0)
max_row = min(max_row + padding, image_shape[0])
max_col = min(max_col + padding, image_shape[1])
return (min_row, min_col, max_row, max_col)
def is_landscape(min_row, min_col, max_row, max_col):
# Returns true if box is in "landscape" mode (wider than tall), and
# false otherise.
# Box should be rotated if it is taller than it is wide
height = max_row - min_row
width = max_col - min_col
return width > height
def save_image(filename, bin_array):
"""
Takes a binary array, inverts black/white, then saves to the given filename.
"""
imsave(filename, skimage.img_as_ubyte(np.logical_not(bin_array)), check_contrast=False)
return
### Label extraction function ###
def label_extraction(im, name, kernel_width,
region_size_min, region_size_max,
alpha, box_padding, output_dir,
regexp_filter=None,
save_regions=False, save_all_images=False):
"""
Finds the figure labels in the given image. Returns a list of bounding boxes and text strings of the
identified figure labels. Optionally, will save the images associated with the bounding box and/or
all images associated with intermediate image processing steps.
"""
# Convert image to array
orig_im = np.array(im)
imarray = copy.deepcopy(orig_im)
# If not grayscale, make it grayscale
if len(imarray.shape) != 2:
imarray = rgb2gray(imarray)
# Threshold foreground/background
threshold = threshold_otsu(imarray)
img_bw = imarray < threshold
if save_all_images:
save_image(output_dir + "bw_" + name + ".png", img_bw)
# Fill regions to segment
img_fill = scipy.ndimage.binary_fill_holes(img_bw)
#img_fill = np.array(img_fill, dtype=np.uint8)
if save_all_images:
save_image(output_dir + "fill_" + name + ".png", img_fill)
# Find connected components
# Use skimage instead of opencv for consistency and reduced package requirements
pixels_labeled_by_component = skimage.measure.label(img_fill)
regions_list = skimage.measure.regionprops(pixels_labeled_by_component)
sizes = [x.area for x in regions_list]
nb_components = len(regions_list)
# Filter connected components based on size
label_candidate_pixels = np.zeros((imarray.shape))
for i in range(nb_components):
if sizes[i] <= region_size_max and sizes[i] >= region_size_min:
label_candidate_pixels[pixels_labeled_by_component == i + 1] = 1
if save_all_images:
save_image(output_dir + "candidate_" + name + ".png", label_candidate_pixels)
# Check if any pixels are potentially text
if not label_candidate_pixels.any():
print("No text found in image " + name)
return (None, None)
# Erosion (dilation on inverse image) (dashed-line removal)
kernel = np.ones((1,kernel_width), np.uint8) # horizontal kernel
label_area = skimage.morphology.binary_erosion(label_candidate_pixels, kernel)
kernel = np.ones((kernel_width,1), np.uint8) # vertical kernel
label_area = skimage.morphology.binary_erosion(label_area, kernel)
if save_all_images:
save_image(output_dir + "dashgone_" + name + ".png", label_area)
# Find alpha-shape (non-convex hull)
points = np.transpose(np.where(label_area == 1))
# Alpha-shape needs at least 4 points
if not points.shape[0] > 3:
print("No text found in image " + name)
return (None, None)
try:
edges = alpha_shape(points, alpha=alpha, only_outer=True)
except:
print("Alpha shape failed for " + name)
return (None, None)
# Get mask of alpha-shapes
edges_imarray = draw_edges(points, edges, label_area.shape)
mask_imarray = draw_mask(points, edges, label_area.shape)
if save_all_images:
save_image(output_dir + "alphamask_" + name + ".png", mask_imarray)
save_image(output_dir + "alpha_" + name + ".png", edges_imarray)
# Get bounding boxes
pixels_labeled_by_component = skimage.measure.label(mask_imarray)
regions_list = skimage.measure.regionprops(pixels_labeled_by_component, orig_im)
boxes = [] # list of results
labels = []
for i, region in enumerate(regions_list):
bounding_box = region.bbox # (min_row, min_col, max_row, max_col)
convex_mask = region.convex_image # binary image with same size as bounding_box
# Check if there is any difference between the bounding box and the convex mask
# i.e. noise pixels between the convex mask and the bounding box
#region_image = region.intensity_image # not working as expected, so do it explicitly
if box_padding > 0:
bounding_box = pad_box(bounding_box, box_padding, imarray.shape)
(min_row, min_col, max_row, max_col) = bounding_box
region_image = skimage.img_as_ubyte(orig_im[min_row:max_row, min_col:max_col])
# OCR the region by masking the whole image (OCR works better as full-page than on small image)
# First check if it needs to be rotated
if not is_landscape(min_row, min_col, max_row, max_col):
region_image = skimage.transform.rotate(region_image, 270, resize=True)
# Make blank image the size of the original image
ocr_img = np.array(np.ones(imarray.shape) * np.amax(orig_im), dtype=np.uint8)
# Copy region into blank image
ocr_img[min_row:max_row, min_col:max_col] =
|
np.array(orig_im[min_row:max_row, min_col:max_col], dtype=np.uint8)
|
numpy.array
|
import networkx as nx
import numpy as np
from tqdm import tqdm
from pathlib import Path
from argparse import ArgumentParser
import os
from datetime import datetime
import pickle
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
import operator
import matplotlib.pyplot as plt
def analyze_results(rootdir='results', dstdir='analysis', filepattern='experiment_results.pkl',
tensorboard=False, tb_k_best=1, csv=False, final_adaptive=False, plot=False,
starting_policies_abspath=None, avg=False):
# make directory where to save analysis files - tables, tensorboard etc.
if not os.path.exists(dstdir):
os.makedirs(dstdir)
# load all results files stored in the rootdir
summary = []
for path in tqdm(Path(rootdir).rglob(filepattern), desc='Loading files'):
with open(path, 'rb') as f:
res = pickle.load(f)
summary.append(res)
def str_hparams(hparams_dict):
""" Serialize predefined key-value pairs into a string,
useful to define tensorboard logdirs,
such that configs can be identified and filtered on tensorboard scalars tab
:param hparams_dict: a dictionary of hparam, value pairs.
:returns s: a string consists of acronyms of keys and their corresponding values.
"""
short_keys = {
'policy': 'plc',
# MCCORMIC_CYCLE_SEPARATOR PARAMETERS
'max_per_node': 'mpnd',
'max_per_round': 'mprd',
'criterion': 'crt',
'max_per_root': 'mprt',
'forcecut': 'fct',
# SCIP SEPARATING PARAMETERS
'objparalfac': 'opl',
'dircutoffdistfac': 'dcd',
'efficacyfac': 'eff',
'intsupportfac': 'isp',
'maxcutsroot': 'mcr',
}
s = 'cfg'
for k, sk in short_keys.items():
v = hparams_dict.get(k, None)
if v is not None:
s += '-{}={}'.format(sk, v)
return s
##### PARSING LOG FILES #####
# parse the experiment result files
results = {} # stats of cycle inequalities policies
baselines = {} # stats of some predefined baselines
datasets = {} # metadata for grouping/parsing results
analysis = {} # return dict containing some important info
# statistics are stored in results/baselines dictionaries in the following hierarchy
# results[<dataset str>][<config tuple>][<stat_key str>][<graph_idx int>][<seed int>]
for s in tqdm(summary, desc='Parsing files'):
dataset = s['config']['data_abspath'].split('/')[-1] # the name of the dataset
if dataset not in datasets.keys():
print('Adding dataset ', dataset)
analysis[dataset] = {}
datasets[dataset] = {}
datasets[dataset]['config_keys'] = [k for k in s['config'].keys() if k != 'scip_seed' and k != 'graph_idx' and k != 'sweep_config' and k != 'data_abspath' and k != 'starting_policies_abspath']
# store these two to ensure that all the experiments completed successfully.
datasets[dataset]['scip_seeds'] = set(s['config']['sweep_config']['sweep']['scip_seed']['values'])
datasets[dataset]['graph_idx_range'] = list(range(s['config']['sweep_config']['sweep']['graph_idx']['range']))
datasets[dataset]['missing_experiments'] = []
datasets[dataset]['sweep_config'] = s['config']['sweep_config']
datasets[dataset]['configs'] = {}
datasets[dataset]['experiment'] = s['experiment']
datasets[dataset]['optimal_values'] = {}
datasets[dataset]['baseline'] = {}
datasets[dataset]['max_lp_iterations'] = {graph_idx: {}
for graph_idx in
range(s['config']['sweep_config']['sweep']['graph_idx']['range'])}
datasets[dataset]['best_dualbound'] = {graph_idx: {}
for graph_idx in
range(s['config']['sweep_config']['sweep']['graph_idx']['range'])}
results[dataset] = {}
baselines[dataset] = {}
# for a case some scip seed was missing when the dictionary was created
datasets[dataset]['scip_seeds'].update(s['config']['sweep_config']['sweep']['scip_seed']['values'])
graph_idx = s['config']['graph_idx']
scip_seed = s['config']['scip_seed']
# read and update the instance optimal value (MAXCUT)
if graph_idx not in datasets[dataset]['optimal_values'].keys():
# read the annotated graph and update its optimal value if any
filepath = os.path.join(s['config']['data_abspath'], 'graph_idx_{}.pkl'.format(graph_idx))
# filepath = 'data/barabasi-albert-n50-m10-weights-normal-seed100/graph_idx_0.pkl'
with open(filepath, 'rb') as f:
G, baseline = pickle.load(f)
cut = nx.get_edge_attributes(G, 'cut')
# if len(cut) > 0:
# weight = nx.get_edge_attributes(G, 'weight')
# datasets[dataset]['optimal_values'][graph_idx] = sum([weight[e] for e in G.edges if cut[e]])
# else:
# datasets[dataset]['optimal_values'][graph_idx] = 0 # default
datasets[dataset]['optimal_values'][graph_idx] = baseline['optimal_value'] # cut_selection_dqn experiment file format
datasets[dataset]['baseline'][graph_idx] = baseline # cut_selection_dqn experiment file format
# set baselines policy string for plots legend:
if s['config']['maxcutsroot'] == 2000 and \
s['config']['maxcutsroot'] == 2000 and \
s['config']['intsupportfac'] == 0.1 and \
s['config']['efficacyfac'] == 1 and \
s['config']['dircutoffdistfac'] == 0.5 and \
s['config']['objparalfac'] == 0.1 and \
not s['config']['forcecut'] and \
s['config']['policy'] != 'adaptive':
s['config']['policy'] = 'default_cut_selection'
elif s['config']['policy'] == 'baseline' and s['config']['max_per_root'] == 0:
s['config']['policy'] = 'no_cycles'
elif s['config']['policy'] == 'baseline' and s['config']['max_per_round'] > 0:
s['config']['policy'] = 'force{}{}'.format(s['config']['max_per_round'], s['config']['criterion'])
elif s['config']['policy'] == 'baseline' and s['config']['max_per_round'] == -1:
s['config']['policy'] = 'force_all_cycles'
# create a hashable config identifier
config = tuple([s['config'][k] for k in datasets[dataset]['config_keys']])
# create skeleton for storing stats collected from experiments with config
if config not in datasets[dataset]['configs'].keys():
datasets[dataset]['configs'][config] = s['config']
if s['config']['policy'] == 'expert' or (s['config']['policy'] == 'adaptive' and not final_adaptive):
results[dataset][config] = {stat_key: {graph_idx: {}
for graph_idx in range(s['config']['sweep_config']['sweep']['graph_idx']['range'])}
for stat_key in s['stats'].keys()}
else:
baselines[dataset][config] = {stat_key: {graph_idx: {}
for graph_idx in range(s['config']['sweep_config']['sweep']['graph_idx']['range'])}
for stat_key in s['stats'].keys()}
# now store the experiment results in the appropriate dictionary
dictionary = results if s['config']['policy'] == 'expert' or (s['config']['policy'] == 'adaptive' and not final_adaptive) else baselines
for stat_key, value in s['stats'].items():
dictionary[dataset][config][stat_key][graph_idx][scip_seed] = value
##### PROCESSING RESULTS #####
# if an experiment is missing, generate its configuration and append to missing_experiments
# the script will generate a configuration file, and command line to run in order to
# accomplish all the missing experiments
for dataset in datasets.keys():
bsl = baselines[dataset]
res = results[dataset]
max_lp_iterations = datasets[dataset]['max_lp_iterations']
best_dualbound = datasets[dataset]['best_dualbound']
###########################################################################
# 1. find missing experiments, and by the way,
# store the best_dualbound and max_lp_iterations for each graph and seed
###########################################################################
for dictionary in [bsl, res]:
for config, stats in tqdm(dictionary.items(), desc='Detecting missing experiments'):
# compute the integral of dual_bound w.r.t lp_iterations
# report missing seeds/graphs
missing_graph_and_seed = []
dualbounds = stats['dualbound']
lp_iterations = stats['lp_iterations']
for graph_idx in datasets[dataset]['graph_idx_range']:
for scip_seed in datasets[dataset]['scip_seeds']:
if scip_seed not in dualbounds[graph_idx].keys():
if (graph_idx, scip_seed) not in missing_graph_and_seed:
experiment_config = datasets[dataset]['configs'][config].copy()
experiment_config['graph_idx'] = graph_idx
experiment_config['scip_seed'] = scip_seed
datasets[dataset]['missing_experiments'].append(experiment_config)
missing_graph_and_seed.append((graph_idx, scip_seed))
continue
# find the best dualbound achieved and the maximal lp_iterations
max_lp_iterations[graph_idx][scip_seed] = max(max_lp_iterations[graph_idx].get(scip_seed, 0),
lp_iterations[graph_idx][scip_seed][-1])
best_dualbound[graph_idx][scip_seed] = min(best_dualbound[graph_idx].get(scip_seed, 0),
dualbounds[graph_idx][scip_seed][-1])
###############################################################################################
# 2. for each config, graph and seed, compute the dualbound integral w.r.t the lp_iterations.
# then, compute the mean and std across all seeds within graphs,
# and also std of stds across all graphs
###############################################################################################
for dictionary in [bsl, res]:
for config, stats in tqdm(dictionary.items(), desc='Computing dualbound integral'):
dualbounds = stats['dualbound']
lp_iterations = stats['lp_iterations']
all_values = [] # all dualbound integrals to compute overall average
all_stds = [] # all graph-wise integral std to compute std of stds
stats['dualbound_integral'] = {}
stats['cuts_applied'] = {}
stats['cuts_applied_normalized'] = {}
stats['cuts_generated'] = {}
stats['cuts_generated_normalized'] = {}
stats['maxcutsroot'] = {}
for graph_idx in datasets[dataset]['graph_idx_range']:
values = [] # dualbound integrals of the current graph to compute average and std across seeds
stats['dualbound_integral'][graph_idx] = {}
stats['cuts_applied'][graph_idx] = {}
stats['cuts_applied_normalized'][graph_idx] = {}
stats['cuts_generated'][graph_idx] = {}
stats['cuts_generated_normalized'][graph_idx] = {}
stats['maxcutsroot'][graph_idx] = {}
for scip_seed in dualbounds[graph_idx].keys():
# compute some more stats:
cuts_applied = np.array(stats['ncuts_applied'][graph_idx][scip_seed])
cuts_generated = np.array(stats['ncuts'][graph_idx][scip_seed])
cuts_applied[1:] -= cuts_applied[:-1]
cuts_generated[1:] -= cuts_generated[:-1]
hp = datasets[dataset]['configs'][config]
if hp['policy'] == 'adaptive' and final_adaptive:
with open(starting_policies_abspath, 'rb') as f:
sp = pickle.load(f)
freq = hp['policy_update_freq']
n_iter = hp['n_policy_iterations']
maxcutsroot = [sp[it]['maxcutsroot'] for it in range(n_iter) for rnd in range(freq)]
if len(maxcutsroot) > len(cuts_applied):
maxcutsroot = maxcutsroot[:len(cuts_applied)]
else:
maxcutsroot += [2000]*(len(cuts_applied) - len(maxcutsroot))
maxcutsroot = np.array(maxcutsroot)
else:
maxcutsroot = np.array([hp['maxcutsroot']]*len(cuts_applied))
cuts_applied_normalized = cuts_applied / maxcutsroot
cuts_generated_normalized = cuts_generated / maxcutsroot
stats['cuts_applied'][graph_idx][scip_seed] = cuts_applied.tolist()
stats['cuts_generated'][graph_idx][scip_seed] = cuts_generated.tolist()
stats['cuts_applied_normalized'][graph_idx][scip_seed] = cuts_applied_normalized.tolist()
stats['cuts_generated_normalized'][graph_idx][scip_seed] = cuts_generated_normalized.tolist()
stats['maxcutsroot'][graph_idx][scip_seed] = maxcutsroot.tolist()
# the integral support is [0, max_lp_iterations]
# TODO: check if extension is saved to the source object.
support_end = max_lp_iterations[graph_idx][scip_seed]
dualbound = dualbounds[graph_idx][scip_seed]
lp_iter = lp_iterations[graph_idx][scip_seed]
if lp_iter[-1] < support_end:
lp_iter.append(support_end)
# dualbound.append(dualbound[-1])
# extend all stats with their last value (constant extension)
for k, s in stats.items():
if k not in ['dualbound_integral', 'lp_iterations']:
s[graph_idx][scip_seed].append(s[graph_idx][scip_seed][-1])
dualbound = np.array(dualbound)
# compute the lp iterations executed at each round to compute the dualbound_integral by Riemann sum
lp_iter_intervals = np.array(lp_iter)
lp_iter_intervals[1:] -= lp_iter_intervals[:-1]
integral = np.sum(dualbound * lp_iter_intervals)
stats['dualbound_integral'][graph_idx][scip_seed] = integral
values.append(integral)
all_values.append(integral)
if len(values) > 0:
# compute the average and std of the integral across seeds, and store in stats
stats['dualbound_integral'][graph_idx]['avg'] = np.mean(values)
stats['dualbound_integral'][graph_idx]['std'] = np.std(values)
all_stds.append(
|
np.std(values)
|
numpy.std
|
import sys, os
cur_file_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(cur_file_path, '..'))
import importlib, time, math, shutil, json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils.logging import Logger, mkdir
from utils.transforms import rotation_matrix_to_angle_axis, batch_rodrigues
from datasets.amass_utils import CONTACT_INDS
from body_model.utils import SMPL_JOINTS, KEYPT_VERTS, smpl_to_openpose
from fitting.fitting_utils import OP_IGNORE_JOINTS, parse_floor_plane, compute_cam2prior, OP_EDGE_LIST, log_cur_stats
from fitting.fitting_loss import FittingLoss
import gc
LINE_SEARCH = 'strong_wolfe'
J_BODY = len(SMPL_JOINTS) - 1 # no root
CONTACT_THRESH = 0.5
class MotionOptimizer():
''' Fits SMPL shape and motion to observation sequence '''
def __init__(
self,
device,
body_model, # SMPL model to use (its batch_size should be B*T)
num_betas, # beta size in SMPL model
batch_size, # number of sequences to optimize
seq_len, # length of the sequences
observed_modalities, # list of the kinds of observations to use
loss_weights, # dict of weights for each loss term
pose_prior, # VPoser model
motion_prior=None, # humor model
init_motion_prior=None, # dict of GMM params to use for prior on initial motion state
optim_floor=False, # if true, optimize the floor plane along with body motion (need 2d observations)
camera_matrix=None, # camera intrinsics to use for reprojection if applicable
robust_loss_type='none',
robust_tuning_const=4.6851,
joint2d_sigma=100,
stage3_tune_init_state=True,
stage3_tune_init_num_frames=15,
stage3_tune_init_freeze_start=30,
stage3_tune_init_freeze_end=50,
stage3_contact_refine_only=False,
use_chamfer=False,
im_dim=(1080, 1080)): # image dimensions to use for visualization
B, T = batch_size, seq_len
self.batch_size = B
self.seq_len = T
self.body_model = body_model
self.num_betas = num_betas
self.optim_floor = optim_floor
self.stage3_tune_init_state = stage3_tune_init_state
self.stage3_tune_init_num_frames = stage3_tune_init_num_frames
self.stage3_tune_init_freeze_start = stage3_tune_init_freeze_start
self.stage3_tune_init_freeze_end = stage3_tune_init_freeze_end
self.stage3_contact_refine_only = stage3_contact_refine_only
self.im_dim = im_dim
#
# create the optimization variables
#
# number of states to explicitly optimize for
# For first stages this will always be the full sequence
num_state_steps = T
# latent body pose
self.pose_prior = pose_prior
self.latent_pose_dim = self.pose_prior.latentD
self.latent_pose = torch.zeros(
(B, num_state_steps, self.latent_pose_dim)).to(device)
# root (global) transformation
self.trans = torch.zeros((B, num_state_steps, 3)).to(device)
self.root_orient = torch.zeros(
(B, num_state_steps, 3)).to(device) # aa parameterization
self.root_orient[:, :, 0] = np.pi
# body shape
self.betas = torch.zeros(
(B, num_betas)).to(device) # same shape for all steps
self.motion_prior = motion_prior
self.init_motion_prior = init_motion_prior
self.latent_motion = None
if self.motion_prior is not None:
# need latent dynamics sequence as well
self.latent_motion_dim = self.motion_prior.latent_size
self.cond_prior = self.motion_prior.use_conditional_prior
# additional optimization params to set later
self.trans_vel = None
self.root_orient_vel = None
self.joints_vel = None
else:
Logger.log(
'Need the motion prior to use all-implicit parameterization!')
exit()
self.init_fidx = np.zeros(
(B)
) # the frame chosen to use for the initial state (first frame by default)
self.cam_f = self.cam_center = None
if self.optim_floor:
if camera_matrix is None:
Logger.log(
'Must have camera intrinsics (camera_matrix) to optimize the floor plane!'
)
exit()
# NOTE: we assume a static camera, so we optimize the params of the floor plane instead of camera extrinsics
self.floor_plane = torch.zeros(
(B,
3)).to(device) # normal vector (a, b, c) scaled by offset (d)
self.floor_plane[:, 2] = 1.0 # up axis initially
# will not be optimized, extra auxiliary variables which are determined from the floor plane and root orient pose
# we only have one transformation for the chosen "key" frame in the sequence
self.cam2prior_R = torch.eye(3).reshape((1, 3, 3)).expand(
(B, 3, 3)).to(device)
self.cam2prior_t = torch.zeros((B, 3)).to(device)
self.cam2prior_root_height = torch.zeros((B, 1)).to(device)
cam_fx = camera_matrix[:, 0, 0]
cam_fy = camera_matrix[:, 1, 1]
cam_cx = camera_matrix[:, 0, 2]
cam_cy = camera_matrix[:, 1, 2]
# focal length and center are same for all timesteps
self.cam_f = torch.stack([cam_fx, cam_fy], dim=1)
self.cam_center = torch.stack([cam_cx, cam_cy], dim=1)
self.use_camera = self.cam_f is not None and self.cam_center is not None
#
# create the loss function
#
self.smpl2op_map = smpl_to_openpose(body_model.model_type,
use_hands=False,
use_face=False,
use_face_contour=False,
openpose_format='coco25')
self.fitting_loss = FittingLoss(loss_weights,
self.init_motion_prior,
self.smpl2op_map,
OP_IGNORE_JOINTS,
self.cam_f,
self.cam_center,
robust_loss_type,
robust_tuning_const,
joints2d_sigma=joint2d_sigma,
use_chamfer=use_chamfer).to(device)
def initialize(self, observed_data):
if self.optim_floor:
# initialize the floor
# assumes observed floor is (a, b, c, d) where (a, b, c) is the normal and (d) the offset
floor_normals = observed_data['floor_plane'][:, :3]
floor_offsets = observed_data['floor_plane'][:, 3:]
self.floor_plane = floor_normals * floor_offsets
self.floor_plane = self.floor_plane.to(
torch.float).clone().detach()
self.floor_plane.requires_grad = True
# optimizing from 2D data, must initialize cam/body trans
if 'points3d' in observed_data:
# initialize with mean of point cloud
point_seq = observed_data['points3d'] # B x T x N x 3
self.trans = torch.mean(point_seq, dim=2).clone().detach()
elif 'joints2d' in observed_data:
# only have RGB data to use
# use focal length and bone lengths to approximate
# (based on PROX https://github.com/mohamedhassanmus/prox/blob/master/prox/fitting.py)
# get 3D joints mapped to OpenPose
body_pose = self.latent2pose(self.latent_pose)
pred_data, _ = self.smpl_results(self.trans, self.root_orient,
body_pose, self.betas)
joints3d_full = torch.cat(
[pred_data['joints3d'], pred_data['joints3d_extra']],
dim=2)
joints3d_op = joints3d_full[:, :, self.smpl2op_map, :]
# openpose observations
joints2d_obs = observed_data['joints2d'][:, :, :, :2]
joints2d_conf = observed_data['joints2d'][:, :, :, 2]
# find least-occluded 2d frame
num_2d_vis = torch.sum(joints2d_conf > 0.0, dim=2)
best_2d_idx = torch.max(num_2d_vis, dim=1)[1]
# calculate bone lengths and confidence in each bone length
bone3d = []
bone2d = []
conf2d = []
for pair in OP_EDGE_LIST:
diff3d = torch.norm(joints3d_op[:, 0, pair[0], :] -
joints3d_op[:, 0, pair[1], :],
dim=1) # does not change over time
diff2d = torch.norm(joints2d_obs[:, :, pair[0], :] -
joints2d_obs[:, :, pair[1], :],
dim=2)
minconf2d = torch.min(joints2d_conf[:, :, pair[0]],
joints2d_conf[:, :, pair[1]])
bone3d.append(diff3d)
bone2d.append(diff2d)
conf2d.append(minconf2d)
bone3d = torch.stack(bone3d, dim=1)
bone2d = torch.stack(bone2d, dim=2)
bone2d = bone2d[np.arange(self.batch_size), best_2d_idx, :]
conf2d = torch.stack(conf2d, dim=2)
conf2d = conf2d[np.arange(self.batch_size), best_2d_idx, :]
# mean over all
mean_bone3d = torch.mean(bone3d, dim=1)
mean_bone2d = torch.mean(bone2d * (conf2d > 0.0), dim=1)
# approx z based on ratio
init_z = self.cam_f[:, 0] * (mean_bone3d / mean_bone2d)
self.trans[:, :, 2] = init_z.unsqueeze(1).expand(
(self.batch_size, self.seq_len)).detach()
def run(self,
observed_data,
data_fps=30,
lr=1.0,
num_iter=[30, 70, 70],
lbfgs_max_iter=30,
stages_res_out=None,
fit_gender='neutral'):
if len(num_iter) != 3:
print(
'Must have num iters for 3 stages! But %d stages were given!' %
(len(num_iter)))
exit()
per_stage_outputs = {} # SMPL results after each stage
out_names = [i.split("/")[-1] for i in stages_res_out]
#
# Initialization
#
self.initialize(observed_data)
#
# Stage I: Only global root and orientation
#
Logger.log(
'Optimizing stage 1 - global root translation and orientation for %d interations...'
% (num_iter[0]))
self.fitting_loss.set_stage(0)
self.trans.requires_grad = True
self.root_orient.requires_grad = True
self.betas.requires_grad = False
self.latent_pose.requires_grad = False
root_opt_params = [self.trans, self.root_orient]
root_optim = torch.optim.LBFGS(root_opt_params,
max_iter=lbfgs_max_iter,
lr=lr,
line_search_fn=LINE_SEARCH)
for i in range(num_iter[0]):
Logger.log('ITER: %d' % (i))
self.fitting_loss.cur_optim_step = i
stats_dict = None
def closure():
root_optim.zero_grad()
pred_data = dict()
# Use current params to go through SMPL and get joints3d, verts3d, points3d
body_pose = self.latent2pose(self.latent_pose)
pred_data, _ = self.smpl_results(self.trans, self.root_orient,
body_pose, self.betas)
# compute data losses only
loss, stats_dict = self.fitting_loss.root_fit(
observed_data, pred_data)
# log_cur_stats(stats_dict, loss, iter=i)
loss.backward()
return loss
root_optim.step(closure)
print(
f"Stage I: ======= iter {i}: {observed_data['seq_interval'].cpu().numpy().tolist()} {out_names}======="
)
body_pose = self.latent2pose(self.latent_pose)
stage1_pred_data, _ = self.smpl_results(self.trans, self.root_orient,
body_pose, self.betas)
per_stage_outputs['stage1'] = stage1_pred_data
if stages_res_out is not None:
res_betas = self.betas.clone().detach().cpu().numpy()
res_trans = self.trans.clone().detach().cpu().numpy()
res_root_orient = self.root_orient.clone().detach().cpu().numpy()
res_body_pose = body_pose.clone().detach().cpu().numpy()
for bidx, res_out_path in enumerate(stages_res_out):
cur_res_out_path = os.path.join(res_out_path,
'stage1_results.npz')
np.savez(cur_res_out_path,
betas=res_betas[bidx],
trans=res_trans[bidx],
root_orient=res_root_orient[bidx],
pose_body=res_body_pose[bidx])
gc.collect()
#
# Stage II full pose and shape
#
Logger.log(
'Optimizing stage 2 - full shape and pose for %d iterations..' %
(num_iter[1]))
self.fitting_loss.set_stage(1)
self.trans.requires_grad = True
self.root_orient.requires_grad = True
self.betas.requires_grad = True
self.latent_pose.requires_grad = True
smpl_opt_params = [
self.trans, self.root_orient, self.betas, self.latent_pose
]
smpl_optim = torch.optim.LBFGS(smpl_opt_params,
max_iter=lbfgs_max_iter,
lr=lr,
line_search_fn=LINE_SEARCH)
for i in range(num_iter[1]):
Logger.log('ITER: %d' % (i))
def closure():
smpl_optim.zero_grad()
pred_data = dict()
# Use current params to go through SMPL and get joints3d, verts3d, points3d
body_pose = self.latent2pose(self.latent_pose)
pred_data, _ = self.smpl_results(self.trans, self.root_orient,
body_pose, self.betas)
pred_data['latent_pose'] = self.latent_pose
pred_data['betas'] = self.betas
# compute data losses and pose prior
loss, stats_dict = self.fitting_loss.smpl_fit(
observed_data, pred_data, self.seq_len)
# log_cur_stats(stats_dict, loss, iter=i)
loss.backward()
return loss
print(
f"Stage II: ======= iter {i}: {observed_data['seq_interval'].cpu().numpy().tolist()} {out_names}======="
)
smpl_optim.step(closure)
body_pose = self.latent2pose(self.latent_pose)
stage2_pred_data, _ = self.smpl_results(self.trans, self.root_orient,
body_pose, self.betas)
per_stage_outputs['stage2'] = stage2_pred_data
if stages_res_out is not None:
res_betas = self.betas.clone().detach().cpu().numpy()
res_trans = self.trans.clone().detach().cpu().numpy()
res_root_orient = self.root_orient.clone().detach().cpu().numpy()
res_body_pose = body_pose.clone().detach().cpu().numpy()
for bidx, res_out_path in enumerate(stages_res_out):
cur_res_out_path = os.path.join(res_out_path,
'stage2_results.npz')
np.savez(cur_res_out_path,
betas=res_betas[bidx],
trans=res_trans[bidx],
root_orient=res_root_orient[bidx],
pose_body=res_body_pose[bidx])
if self.motion_prior is None:
# No need to continue optimizing
return self.get_optim_result(body_pose), per_stage_outputs
gc.collect()
#
# Stage III full pose and shape with motion prior
#
Logger.log(
'Optimizing stage 3 - shape and pose with motion prior for %d iterations..'
% (num_iter[2]))
self.fitting_loss.set_stage(2)
og_overlap_consist_weight = self.fitting_loss.loss_weights[
'rgb_overlap_consist']
prior_opt_params = []
# initialize latent motion with inference from the current SMPL sequence
cur_body_pose = self.latent2pose(self.latent_pose)
if self.optim_floor:
# initialize camera2prior transformation
init_smpl_data, _ = self.smpl_results(self.trans, self.root_orient,
cur_body_pose, self.betas)
self.cam2prior_R, self.cam2prior_t, self.cam2prior_root_height = compute_cam2prior(
self.floor_plane, self.trans[np.arange(self.batch_size),
self.init_fidx],
self.root_orient[np.arange(self.batch_size), self.init_fidx],
init_smpl_data['joints3d'][np.arange(self.batch_size),
self.init_fidx])
# save stage 2 output in prior frame later (after we get final floor estimate) to compare to
stage2_result_data_dict = {
'trans': self.trans.clone().detach(),
'root_orient': self.root_orient.clone().detach(),
'pose_body': cur_body_pose.clone().detach(),
'betas': self.betas.clone().detach()
}
self.latent_motion = self.infer_latent_motion(self.trans,
self.root_orient,
cur_body_pose,
self.betas,
data_fps).detach()
self.latent_motion.requires_grad = True
# also need additional optim params for additional prior inputs at first frame (to enable rollout)
if self.motion_prior.model_data_config in [
'smpl+joints', 'smpl+joints+contacts'
]:
# initialize from current SMPL sequence
vel_trans = self.trans
vel_root_orient = self.root_orient
if self.optim_floor:
# velocities are always kept in the canonical space since they are only used here for rollout
data_dict = {
'trans': self.trans,
'root_orient': self.root_orient
}
prior_data_dict = self.apply_cam2prior(
data_dict, self.cam2prior_R, self.cam2prior_t,
self.cam2prior_root_height, cur_body_pose, self.betas,
self.init_fidx)
vel_trans = prior_data_dict['trans']
vel_root_orient = prior_data_dict['root_orient']
self.trans_vel, self.joints_vel, self.root_orient_vel = \
self.estimate_velocities(self.trans, self.root_orient, cur_body_pose, self.betas, data_fps)
self.trans_vel = self.trans_vel[:, :1].detach()
self.joints_vel = self.joints_vel[:, :1].detach()
self.root_orient_vel = self.root_orient_vel[:, :1].detach()
self.trans_vel.requires_grad = True
self.joints_vel.requires_grad = True
self.root_orient_vel.requires_grad = True
prior_opt_params = [
self.trans_vel, self.joints_vel, self.root_orient_vel
]
else:
raise NotImplementedError('data return config not supported')
# update SMPL optim variables to be only initial state (initialized to current value)
self.trans = self.trans[:, :1].detach()
self.root_orient = self.root_orient[:, :1].detach()
self.latent_pose = self.latent_pose[:, :1].detach()
self.trans.requires_grad = True
self.root_orient.requires_grad = True
self.latent_pose.requires_grad = True
if self.optim_floor:
self.floor_plane.requires_grad = True
self.betas.requires_grad = True
motion_opt_params = [
self.trans, self.root_orient, self.latent_pose, self.betas
]
motion_opt_params += [self.latent_motion]
motion_opt_params += prior_opt_params
if self.optim_floor:
motion_opt_params += [self.floor_plane]
# record intiialization stats
body_pose = self.latent2pose(self.latent_pose)
rollout_results, cam_rollout_results = self.rollout_latent_motion(
self.trans,
self.root_orient,
body_pose,
self.betas,
prior_opt_params,
self.latent_motion,
fit_gender=fit_gender)
stage3_init_pred_data, _ = self.smpl_results(
cam_rollout_results['trans'].clone().detach(),
cam_rollout_results['root_orient'].clone().detach(),
cam_rollout_results['pose_body'].clone().detach(), self.betas)
if 'contacts' in rollout_results:
stage3_init_pred_data['contacts'] = rollout_results[
'contacts'].clone().detach()
per_stage_outputs['stage3_init'] = stage3_init_pred_data
if stages_res_out is not None:
res_body_pose = cam_rollout_results['pose_body'].clone().detach(
).cpu().numpy()
res_trans = cam_rollout_results['trans'].clone().detach().cpu(
).cpu().numpy()
res_root_orient = cam_rollout_results['root_orient'].clone(
).detach().cpu().numpy()
res_betas = self.betas.clone().detach().cpu().numpy()
# camera frame
for bidx, res_out_path in enumerate(stages_res_out):
cur_res_out_path = os.path.join(res_out_path,
'stage3_init_results.npz')
save_dict = {
'betas': res_betas[bidx],
'trans': res_trans[bidx],
'root_orient': res_root_orient[bidx],
'pose_body': res_body_pose[bidx]
}
if 'contacts' in rollout_results:
save_dict['contacts'] = rollout_results['contacts'][
bidx].clone().detach().cpu().numpy()
if self.optim_floor:
save_dict['floor_plane'] = self.floor_plane[bidx].clone(
).detach().cpu().numpy()
np.savez(cur_res_out_path, **save_dict)
# prior frame
if self.optim_floor:
res_trans = rollout_results['trans'].clone().detach().cpu(
).cpu().numpy()
res_root_orient = rollout_results['root_orient'].clone(
).detach().cpu().numpy()
for bidx, res_out_path in enumerate(stages_res_out):
cur_res_out_path = os.path.join(
res_out_path, 'stage3_init_results_prior.npz')
save_dict = {
'betas': res_betas[bidx],
'trans': res_trans[bidx],
'root_orient': res_root_orient[bidx],
'pose_body': res_body_pose[bidx]
}
if 'contacts' in rollout_results:
save_dict['contacts'] = rollout_results['contacts'][
bidx].clone().detach().cpu().numpy()
np.savez(cur_res_out_path, **save_dict)
init_motion_scale = 1.0 # single-step losses must be scaled commensurately with losses summed over whole sequence
motion_optim = torch.optim.LBFGS(motion_opt_params,
max_iter=lbfgs_max_iter,
lr=lr,
line_search_fn=LINE_SEARCH)
motion_optim_curr = motion_optim_refine = None
if self.stage3_tune_init_state:
freeze_optim_params = [self.latent_motion, self.betas]
if self.optim_floor:
freeze_optim_params += [self.floor_plane]
motion_optim_curr = torch.optim.LBFGS(freeze_optim_params,
max_iter=lbfgs_max_iter,
lr=lr,
line_search_fn=LINE_SEARCH)
motion_optim_refine = torch.optim.LBFGS(motion_opt_params,
max_iter=lbfgs_max_iter,
lr=lr,
line_search_fn=LINE_SEARCH)
cur_stage3_nsteps = self.stage3_tune_init_num_frames
saved_contact_height_weight = self.fitting_loss.loss_weights[
'contact_height']
saved_contact_vel_weight = self.fitting_loss.loss_weights[
'contact_vel']
for i in range(num_iter[2]):
if self.stage3_tune_init_state and i >= self.stage3_tune_init_freeze_start and i < self.stage3_tune_init_freeze_end:
# freeze initial state
motion_optim = motion_optim_curr
self.trans.requires_grad = False
self.root_orient.requires_grad = False
self.latent_pose.requires_grad = False
self.trans_vel.requires_grad = False
self.joints_vel.requires_grad = False
self.root_orient_vel.requires_grad = False
if self.stage3_contact_refine_only:
self.fitting_loss.loss_weights['contact_height'] = 0.0
self.fitting_loss.loss_weights['contact_vel'] = 0.0
init_motion_scale = float(
self.seq_len) / self.stage3_tune_init_num_frames
elif self.stage3_tune_init_state and i >= self.stage3_tune_init_freeze_end:
# refine
motion_optim = motion_optim_refine
self.trans.requires_grad = True
self.root_orient.requires_grad = True
self.latent_pose.requires_grad = True
self.trans_vel.requires_grad = True
self.joints_vel.requires_grad = True
self.root_orient_vel.requires_grad = True
self.betas.requires_grad = True
if self.optim_floor:
self.floor_plane.requires_grad = True
if self.stage3_contact_refine_only:
self.fitting_loss.loss_weights[
'contact_height'] = saved_contact_height_weight
self.fitting_loss.loss_weights[
'contact_vel'] = saved_contact_vel_weight
init_motion_scale = float(
self.seq_len) / self.stage3_tune_init_num_frames
Logger.log('ITER: %d' % (i))
def closure():
motion_optim.zero_grad()
cur_body_pose = self.latent2pose(self.latent_pose)
if self.optim_floor:
# update the cam2prior transformation based on current initial state variable and floor values
cam_smpl_data, _ = self.smpl_results(
self.trans, self.root_orient, cur_body_pose,
self.betas)
self.cam2prior_R, self.cam2prior_t, self.cam2prior_root_height = compute_cam2prior(
self.floor_plane, self.trans[:, 0],
self.root_orient[:, 0], cam_smpl_data['joints3d'][:,
0])
pred_data = dict()
# Use current params to go through SMPL and get joints3d, verts3d, points3d
cur_trans = self.trans
cur_root_orient = self.root_orient
cur_betas = self.betas
cur_latent_pose = self.latent_pose
cur_latent_motion = self.latent_motion
cur_cond_prior = None
cur_rollout_joints = None
cur_contacts = cur_contacts_conf = None
cur_cam_trans = cur_cam_root_orient = None
if self.stage3_tune_init_state and i < self.stage3_tune_init_freeze_start:
cur_latent_motion = cur_latent_motion[:, :(
cur_stage3_nsteps - 1)]
# rollout full sequence with current latent dynamics
# rollout_results are in prior space, cam_rollout_results are in camera frame
rollout_results, cam_rollout_results = self.rollout_latent_motion(
cur_trans,
cur_root_orient,
cur_body_pose,
cur_betas,
prior_opt_params,
cur_latent_motion,
return_prior=self.cond_prior,
fit_gender=fit_gender)
cur_trans = rollout_results['trans']
cur_root_orient = rollout_results['root_orient']
cur_body_pose = rollout_results['pose_body']
cur_cam_trans = cam_rollout_results['trans']
cur_cam_root_orient = cam_rollout_results['root_orient']
if self.cond_prior:
cur_cond_prior = rollout_results['cond_prior']
# re-encode entire body pose sequence
cur_latent_pose = self.pose2latent(cur_body_pose)
cur_rollout_joints = rollout_results['joints']
if 'contacts' in rollout_results:
cur_contacts = rollout_results['contacts']
cur_contacts_conf = rollout_results['contacts_conf']
pred_data, _ = self.smpl_results(cur_trans, cur_root_orient,
cur_body_pose, cur_betas)
pred_data['latent_pose'] = cur_latent_pose
pred_data['betas'] = cur_betas
pred_data['latent_motion'] = cur_latent_motion
# info for init state pose prior
pred_data['joints_vel'] = self.joints_vel
pred_data['trans_vel'] = self.trans_vel
pred_data['root_orient_vel'] = self.root_orient_vel
pred_data['joints3d_rollout'] = cur_rollout_joints
if cur_contacts is not None:
pred_data['contacts'] = cur_contacts
if cur_contacts_conf is not None:
pred_data['contacts_conf'] = cur_contacts_conf
cam_pred_data = pred_data
if self.optim_floor:
cam_pred_data, _ = self.smpl_results(
cur_cam_trans, cur_cam_root_orient, cur_body_pose,
cur_betas)
cam_pred_data['latent_pose'] = cur_latent_pose
cam_pred_data['betas'] = cur_betas
cam_pred_data['floor_plane'] = self.floor_plane
loss_nsteps = self.seq_len
loss_obs_data = observed_data
if self.stage3_tune_init_state and i < self.stage3_tune_init_freeze_start:
loss_obs_data = {
k: v[:, :cur_stage3_nsteps]
for k, v in observed_data.items()
if k != 'prev_batch_overlap_res'
}
if 'prev_batch_overlap_res' in observed_data:
loss_obs_data[
'prev_batch_overlap_res'] = observed_data[
'prev_batch_overlap_res']
loss_nsteps = cur_stage3_nsteps
# if in initial stage, don't want to use overlap constraintsloss_obs_data['prev_batch_overlap_res']['seq_interval']
self.fitting_loss.loss_weights['rgb_overlap_consist'] = 0.0
# compute data losses, pose & motion prior
loss, stats_dict = self.fitting_loss.motion_fit(
loss_obs_data,
pred_data,
cam_pred_data,
loss_nsteps,
cond_prior=cur_cond_prior,
init_motion_scale=init_motion_scale)
if self.stage3_tune_init_state and i < self.stage3_tune_init_freeze_start:
# change it back
self.fitting_loss.loss_weights[
'rgb_overlap_consist'] = og_overlap_consist_weight
# log_cur_stats(stats_dict, loss, iter=i)
loss.backward()
return loss
print(
f"Stage III: ======= iter {i}: {observed_data['seq_interval'].cpu().numpy().tolist()} {out_names}======="
)
motion_optim.step(closure)
body_pose = self.latent2pose(self.latent_pose)
rollout_joints = rollout_results = None
# rollout and reset self.smpl_params to rolled out results so that get_optim_result works
rollout_results, cam_rollout_results = self.rollout_latent_motion(
self.trans,
self.root_orient,
body_pose,
self.betas,
prior_opt_params,
self.latent_motion,
fit_gender=fit_gender)
body_pose = rollout_results['pose_body']
self.latent_pose = self.pose2latent(body_pose)
self.trans = cam_rollout_results['trans']
self.root_orient = cam_rollout_results['root_orient']
rollout_joints = rollout_results['joints']
stage3_pred_data, _ = self.smpl_results(self.trans, self.root_orient,
body_pose, self.betas)
if rollout_joints is not None:
if self.optim_floor:
stage3_pred_data['prior_joints3d_rollout'] = rollout_joints
else:
stage3_pred_data['joints3d_rollout'] = rollout_joints
if rollout_results is not None and 'contacts' in rollout_results:
stage3_pred_data['contacts'] = rollout_results['contacts']
if self.optim_floor:
stage3_pred_data['prior_trans'] = rollout_results['trans']
stage3_pred_data['prior_root_orient'] = rollout_results[
'root_orient']
per_stage_outputs['stage3'] = stage3_pred_data
final_optim_res = self.get_optim_result(body_pose)
if rollout_results is not None and 'contacts' in rollout_results:
final_optim_res['contacts'] = rollout_results['contacts']
if self.optim_floor:
# go back and also save results from stage 2 using the final optimized floor to transform to prior frame
if stages_res_out is not None:
# need to recompute the tranformation for stage 2 results with the final floor
stg2_cam_smpl_data, _ = self.smpl_results(
stage2_result_data_dict['trans'],
stage2_result_data_dict['root_orient'],
stage2_result_data_dict['pose_body'],
stage2_result_data_dict['betas'])
stg2_cam2prior_R, stg2_cam2prior_t, stg2_cam2prior_root_height = compute_cam2prior(
self.floor_plane, stage2_result_data_dict['trans'][
np.arange(self.batch_size), self.init_fidx],
stage2_result_data_dict['root_orient'][
np.arange(self.batch_size), self.init_fidx],
stg2_cam_smpl_data['joints3d'][np.arange(self.batch_size),
self.init_fidx])
stage2_prior_data_dict = self.apply_cam2prior(
stage2_result_data_dict, stg2_cam2prior_R,
stg2_cam2prior_t, stg2_cam2prior_root_height,
stage2_result_data_dict['pose_body'],
stage2_result_data_dict['betas'], self.init_fidx)
if stages_res_out is not None:
# save stage 2 output in prior frame to compare to
res_betas = self.betas.clone().detach().cpu().numpy()
res_trans = stage2_prior_data_dict['trans'].clone().detach(
).cpu().numpy()
res_root_orient = stage2_prior_data_dict['root_orient'].clone(
).detach().cpu().numpy()
res_body_pose = cur_body_pose.clone().detach().cpu().numpy()
for bidx, res_out_path in enumerate(stages_res_out):
cur_res_out_path = os.path.join(
res_out_path, 'stage2_results_prior.npz')
np.savez(cur_res_out_path,
betas=res_betas[bidx],
trans=res_trans[bidx],
root_orient=res_root_orient[bidx],
pose_body=res_body_pose[bidx])
return final_optim_res, per_stage_outputs
def apply_cam2prior(self,
data_dict,
R,
t,
root_height,
body_pose,
betas,
key_frame_idx,
inverse=False):
'''
Applies the camera2prior tranformation made up of R, t to the data in data dict and
returns a new dictionary with the transformed data.
Right now supports: trans, root_orient.
NOTE: If the number of timesteps in trans/root_orient is 1, this function assumes they are at key_frame_idx.
(othherwise the calculation of cur_root_height or trans_offset in inverse case is not correct)
key_frame_idx : the timestep used to compute cam2prior size (B) tensor
inverse : if true, applies the inverse transformation from prior space to camera
'''
prior_dict = dict()
if 'root_orient' in data_dict:
# B x T x 3
root_orient = data_dict['root_orient']
B, T, _ = root_orient.size()
R_time = R.unsqueeze(1).expand((B, T, 3, 3))
t_time = t.unsqueeze(1).expand((B, T, 3))
root_orient_mat = batch_rodrigues(root_orient.reshape(
(-1, 3))).reshape((B, T, 3, 3))
if inverse:
prior_root_orient_mat = torch.matmul(R_time.transpose(3, 2),
root_orient_mat)
else:
prior_root_orient_mat = torch.matmul(R_time, root_orient_mat)
prior_root_orient = rotation_matrix_to_angle_axis(
prior_root_orient_mat.reshape((B * T, 3, 3))).reshape(
(B, T, 3))
prior_dict['root_orient'] = prior_root_orient
if 'trans' in data_dict and 'root_orient' in data_dict:
# B x T x 3
trans = data_dict['trans']
B, T, _ = trans.size()
R_time = R.unsqueeze(1).expand((B, T, 3, 3))
t_time = t.unsqueeze(1).expand((B, T, 3))
if inverse:
# transform so key frame at origin
if T > 1:
trans_offset = trans[
|
np.arange(B)
|
numpy.arange
|
#!/usr/bin/env python3
# Author: <NAME>
# Contact: <EMAIL>
"""Define common operations for **PETGEM**."""
# ---------------------------------------------------------------
# Load python modules
# ---------------------------------------------------------------
import time
import yaml
import sys
import os
import numpy as np
from functools import wraps
from colorama import Fore
from singleton_decorator import singleton
# ---------------------------------------------------------------
# Load petgem modules (BSC)
# ---------------------------------------------------------------
from .parallel import MPIEnvironment
# ###############################################################
# ################ CLASSES DEFINITION ##################
# ###############################################################
# ---------------------------------------------------------------
# Class Print definition
# ---------------------------------------------------------------
class Print(object):
"""
This class provides methods for pretty print.
:param object str: string to be printed.
:return: None.
:rtype: None.
"""
# Options for Gauss points computation (switch case)
_options = {
1: Fore.BLACK,
2: Fore.BLUE,
3: Fore.CYAN,
4: Fore.GREEN,
5: Fore.LIGHTBLACK_EX,
6: Fore.LIGHTBLUE_EX,
7: Fore.LIGHTCYAN_EX,
8: Fore.LIGHTGREEN_EX,
9: Fore.LIGHTMAGENTA_EX,
10: Fore.LIGHTRED_EX,
11: Fore.LIGHTWHITE_EX,
12: Fore.LIGHTYELLOW_EX,
13: Fore.MAGENTA,
14: Fore.RED,
15: Fore.WHITE,
16: Fore.YELLOW
}
# Constructor
def __init__(self, text, color_code=None):
"""Constructor."""
self._log(text, color_code)
# Logging method
def _log(self, text, color_code=None):
"""
Configure and prints a text.
:param str text: text to be printed.
:param int color_code: text color code.
:return: None.
"""
# Verify if color_code is None, then use black color
if color_code is None:
color_code = int(16)
set_color = self._options[color_code]
print(set_color + text)
sys.stdout.flush()
return
@classmethod
def header(self):
"""Print the header.
:param: None.
:return: None.
:rtype: None.
"""
# Specific color code for printing the header
color_code = 5
if( MPIEnvironment().rank == 0 ):
self._log(self, '%'*75, color_code)
self._log(self, '%%%' + ' '*69 + '%%%', color_code)
self._log(self, '%%%'+ 'PETGEM'.center(69) + '%%%', color_code)
self._log(self, '%%%'+ 'Parallel Edge-based Tool for Electromagnetic Modelling'.center(69) + '%%%', color_code)
self._log(self, '%%%' + ' '*69 + '%%%', color_code)
self._log(self, '%'*75, color_code)
self._log(self, '%%%' + ' '*69 + '%%%', color_code)
self._log(self, '%%% (c) <NAME>' +
' '*40 + '%%%', color_code)
self._log(self, '%%% Barcelona Supercomputing Center (BSC-CNS), 2021' +
' '*19 + '%%%', color_code)
self._log(self, '%%%' + ' '*69 + '%%%', color_code)
self._log(self, '%'*75, color_code)
return
@classmethod
def master(self, text, color_code=None):
"""
If the caller is the master process, this method prints a message.
:param: None.
:return: None.
:rtype: None.
"""
if( MPIEnvironment().rank == 0 ):
self._log(self, text, color_code)
return
# ---------------------------------------------------------------
# Class InputParameters definition
# ---------------------------------------------------------------
class InputParameters(object):
"""Method to import a yaml parameter file.
:param dict object: user params yaml file.
:return: user parameters as object view.
:rtype: object.
"""
def __init__(self, params, parEnv):
"""Class constructor.
:param str params: yaml parameters file.
:param object parEnv: parallel environment object.
:return: InputParameters object.
:rtype: object
"""
# ---------------------------------------------------------------
# Read the input parameters file
# ---------------------------------------------------------------
with open(params, 'r') as f:
# The FullLoader parameter handles the conversion from YAML
# scalar values to the Python dictionary format
inputs = yaml.safe_load(f)
# Get set of parameters
self.model = inputs['model']
self.run = inputs['run']
self.output = inputs['output']
# ---------------------------------------------------------------
# Check modeling mode
# ---------------------------------------------------------------
if not ('mode' in self.model.keys()):
Print.master(' Modeling mode not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
if not ((self.model.get('mode') == 'csem') or (self.model.get('mode') == 'mt')):
Print.master(' Modeling mode not supported.')
exit(-1)
# ---------------------------------------------------------------
# Check parameters consistency for csem mode
# ---------------------------------------------------------------
if (self.model.get('mode') == 'csem'):
if not ('csem' in self.model.keys()):
Print.master(' csem parameters not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
# Check consistency of csem params
conductivity_from_file, num_polarizations = self.__verify_CSEM_params__(self.model)
# ---------------------------------------------------------------
# Check parameters consistency for mt mode
# ---------------------------------------------------------------
elif (self.model.get('mode') == 'mt'):
if not ('mt' in self.model.keys()):
Print.master(' mt parameters not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
# Check consistency of mt params
conductivity_from_file, num_polarizations = self.__verify_MT_params__(self.model)
# Update number of models, interpolation strategy and
# polarization modes
self.run.update({'conductivity_from_file': conductivity_from_file})
self.run.update({'num_polarizations': num_polarizations})
# ---------------------------------------------------------------
# Check consistency of common parameters
# ---------------------------------------------------------------
# Mesh
if not ('mesh' in self.model.keys()):
Print.master(' mesh parameter not provided. Please, verify the parameter file consistency.')
exit(-1)
# Receivers
if not ('receivers' in self.model.keys()):
Print.master(' receivers parameter not provided. Please, verify the parameter file consistency.')
exit(-1)
# Basis order
if not ('nord' in self.run.keys()):
Print.master(' nord parameter not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
if ((self.run.get('nord') < 1) or (self.run.get('nord') > 6)):
Print.master(' Vector finite element basis order not supported. Please, select a valid order (1,2,3,4,5,6).')
exit(-1)
# Cuda support
if not ('cuda' in self.run.keys()):
self.run.update({'cuda': False})
else:
if not ((self.run.get('cuda') is False) or (self.run.get('cuda') is True)):
Print.master(' cuda option not supported. Please, select a valid order (True/False).')
exit(-1)
# Output
if not ('vtk' in self.output.keys()):
self.output.update({'vtk': False})
if not ('directory' in self.output.keys()):
Print.master(' output directory parameter not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
if(parEnv.rank == 0):
if not os.path.exists(self.output.get('directory')):
os.mkdir(self.output.get('directory'))
# If not scratch directory, use output directory
if not ('directory_scratch' in self.output.keys()):
self.output.update({'directory_scratch': self.output.get('directory')})
self.output.update({'remove_scratch': False})
else:
if(parEnv.rank == 0):
if not os.path.exists(self.output.get('directory_scratch')):
os.mkdir(self.output.get('directory_scratch'))
self.output.update({'remove_scratch': True})
return
def __verify_CSEM_params__(self, data):
"""Verify consistency of CSEM parameters
:param dict data: csem dictionary
:return: input conductivity model from file or array.
:rtype: bool
"""
# Get csem parameters
csem_params = data.get('csem')
# One "polarization mode" per csem model
num_polarizations = np.int(1)
# Check consistency for csem modeling
# Check sigma consistency
if not ('sigma' in csem_params.keys()):
Print.master(' csem parameters not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
# Get sigma parameters
i_sigma = csem_params.get('sigma')
# Conductivity file
if ('file' in i_sigma.keys()):
# No vectors conductivity
if (('horizontal' in i_sigma.keys()) or ('vertical' in i_sigma.keys())):
Print.master(' sigma parameters invalid. Please, verify the parameter file consistency.')
exit(-1)
else:
conductivity_from_file = True
# Vector conductivity
elif (('horizontal' in i_sigma.keys()) and ('vertical' in i_sigma.keys())):
# No file conductivity
if ('file' in i_sigma.keys()):
Print.master(' sigma parameters invalid. Please, verify the parameter file consistency.')
exit(-1)
else:
conductivity_from_file = False
else:
Print.master(' sigma parameters invalid. Please, verify the parameter file consistency.')
exit(-1)
# Check source consistency
if not ('source' in csem_params.keys()):
Print.master(' source parameters not provided. Please, verify the parameter file consistency.')
exit(-1)
else:
# Get source parameters
i_source = csem_params.get('source')
# Check number of source parameters
if not (len(i_source) == 6):
Print.master(' number of source parameters is not consistent. Please, verify the parameter file consistency.')
exit(-1)
else:
base_params = ['frequency', 'position', 'azimuth', 'dip', 'current', 'length']
for i in
|
np.arange(6)
|
numpy.arange
|
"""
"T#" test cases from https://archimede.dm.uniba.it/~bvpsolvers/testsetbvpsolvers/?page_id=27, [1]_.
"R#" test cases from https://doi.org/10.2514/6.2019-3666, [2]_.
References
----------
.. [1] <NAME> and <NAME>. "A fortran test set for boundary value problem solvers."
AIP Conference Proceedings. 1648(1):020009, 2015.
.. [2] <NAME> and <NAME>. "Numerical Algorithms for Solving Boundary-Value Problemson Reduced
Dimensional Manifolds." AIAA Aviation 2019 Forum. 2019.
"""
import pytest
import itertools
from beluga.numeric.data_classes.Trajectory import Trajectory
from beluga.numeric.bvp_solvers import Shooting
import numpy as np
import copy
from scipy.special import erf
# Test the shooting solver for each algorithm listed below
ALGORITHMS = ['Armijo', 'SLSQP']
EASY = [1]
MEDIUM = [1e-1]
HARD = [1e-2]
VHARD = [1e-3]
tol = 1e-3
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t1(algorithm, const):
def odefun(y, _, k):
return y[1], y[0] / k[0]
def odejac(_, __, k):
df_dy = np.array([[0, 1], [1 / k[0], 0]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 1], [0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = (np.exp(-sol.t / np.sqrt(sol.const)) - np.exp((sol.t - 2) / np.sqrt(sol.const))) / (
1 - np.exp(-2.e0 / np.sqrt(sol.const)))
e2 = (1. / (sol.const ** (1 / 2) * np.exp(sol.t / sol.const ** (1 / 2))) + np.exp(
(sol.t - 2) / sol.const ** (1 / 2)) / sol.const ** (1 / 2)) / (1 / np.exp(2 / sol.const ** (1 / 2)) - 1)
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t2(algorithm, const):
def odefun(y, _, k):
return y[1], y[1] / k[0]
def odejac(_, __, k):
df_dy = np.array([[0, 1], [0, 1 / k[0]]])
df_dp = np.empty((2, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 1], [0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = (1.e0 - np.exp((sol.t - 1.e0) / sol.const)) / (1.e0 - np.exp(-1.e0 / sol.const))
e2 = np.exp((sol.t - 1) / sol.const) / (sol.const * (1 / np.exp(1 / sol.const) - 1))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t3(algorithm, const):
def odefun(y, _, k):
return (2 * y[1], 2 * (-(2 + np.cos(np.pi * y[2])) * y[1] + y[0] - (1 + k[0] * np.pi * np.pi) * np.cos(
np.pi * y[2]) - (2 + np.cos(np.pi * y[2])) * np.pi * np.sin(np.pi * y[2])) / k[0], 2)
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], -(2 * np.cos(np.pi * y[2]) + 4)/k[0],
(2*np.pi**2 * np.sin(np.pi * y[2])**2 + 2 * np.pi*np.sin(np.pi*y[2])*(k[0]*np.pi**2 + 1)
- 2*np.pi**2*np.cos(np.pi*y[2])*(np.cos(np.pi*y[2]) + 2)
+ 2*y[1]*np.pi*np.sin(np.pi*y[2]))/k[0]],
[0, 0, 0]], dtype=np.float)
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2])
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t4(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (((1 + k[0]) * y[0] - y[1]) / k[0]), 2
def odejac(_, __, k):
df_dy = np.array([[0, 2, 0], [2 * (1 + k[0]) / k[0], 2 * (-1) / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1 - np.exp(-2), yf[0] - 1 - np.exp(-2 * (1 + k[0]) / k[0]), y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.exp(sol.y[:, 2] - 1) + np.exp(-((1 + sol.const[0]) * (1 + sol.y[:, 2]) / sol.const[0]))
e2 = np.exp(sol.y[:, 2] - 1) - (sol.const[0] + 1) / (
sol.const[0] * np.exp((sol.y[:, 2] + 1) * (sol.const[0] + 1) / sol.const[0]))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t5(algorithm, const):
def odefun(y, _, k):
return (2 * y[1], 2 * ((y[0] + y[2] * y[1] - (1 + k[0] * np.pi ** 2) * np.cos(np.pi * y[2])
+ y[2] * np.pi * np.sin(np.pi * y[2])) / k[0]), 2)
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0], [2 / k[0], 2 * y[2] / k[0],
(2 * (y[1] + np.pi * np.sin(np.pi * y[2]) + np.pi * np.sin(np.pi * y[2])
* (k * np.pi ** 2 + 1) + np.pi * np.pi * y[2]
* np.cos(np.pi * y[2]))) / k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2])
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t6(algorithm, const):
def odefun(y, _, k):
return (2 * y[1], 2 * ((-y[2] * y[1] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.pi * y[2] * np.sin(
np.pi * y[2])) / k[0]), 2)
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[0, -2 * y[2] / k[0],
-(2 * (y[1] + np.pi * np.sin(np.pi * y[2]) - k[0] * np.pi ** 3 * np.sin(np.pi * y[2])
+ np.pi ** 2 * y[2] * np.cos(np.pi * y[2]))) / k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 2, yf[0], y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + erf(sol.y[:, 2] / np.sqrt(2 * sol.const[0])) / erf(1 / np.sqrt(2 * sol.const[0]))
e2 = np.sqrt(2) / (np.sqrt(np.pi) * np.sqrt(sol.const[0]) * np.exp(sol.y[:, 2] ** 2 / (2 * sol.const[0])) * erf(
np.sqrt(2) / (2 * np.sqrt(sol.const[0])))) - np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t7(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((-y[2] * y[1] + y[0] - (1.0e0 + k[0] * np.pi ** 2) * np.cos(np.pi * y[2]) - np.pi *
y[2] * np.sin(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], -2 * y[2] / k[0],
-(2 * (y[1] + np.pi * np.sin(np.pi * y[2]) + np.pi ** 2 * y[2] * np.cos(np.pi * y[2])
- np.pi * np.sin(np.pi * y[2]) * (k[0] * np.pi ** 2 + 1))) / k[0]],
[0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] - 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + sol.y[:, 2] + (
sol.y[:, 2] * erf(sol.y[:, 2] / np.sqrt(2.0e0 * sol.const[0]))
+ np.sqrt(2 * sol.const[0] / np.pi) * np.exp(-sol.y[:, 2] ** 2 / (2 * sol.const[0]))) / (
erf(1.0e0 / np.sqrt(2 * sol.const[0])) + np.sqrt(2.0e0 * sol.const[0] / np.pi)
* np.exp(-1 / (2 * sol.const[0])))
e2 = erf((np.sqrt(2) * sol.y[:, 2]) / (2 * np.sqrt(sol.const[0]))) / (
erf(np.sqrt(2) / (2 * np.sqrt(sol.const[0]))) + (np.sqrt(2) * np.sqrt(sol.const[0])) / (
np.sqrt(np.pi) * np.exp(1 / (2 * sol.const[0])))) - np.pi * np.sin(np.pi * sol.y[:, 2]) + 1
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t8(algorithm, const):
def odefun(y, _, k):
return y[1], (-y[1] / k[0]), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [0, -1 / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] - 2, y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[1, 0, -1], [2, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = (2 - np.exp(-1 / sol.const[0]) - np.exp(-sol.y[:, 2] / sol.const[0])) / (1 - np.exp(-1 / sol.const[0]))
e2 = -1 / (sol.const[0] * np.exp(sol.y[:, 2] / sol.const[0]) * (1 / np.exp(1 / sol.const[0]) - 1))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t9(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (-(4 * y[2] * y[1] + 2 * y[0]) / (k[0] + y[2] ** 2)), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[-4 / (y[2] ** 2 + k[0]), -(8 * y[2]) / (y[2] ** 2 + k[0]),
(4 * y[2] * (2 * y[0] + 4 * y[1] * y[2])) / (y[2] ** 2 + k[0]) ** 2
- (8 * y[1]) / (y[2] ** 2 + k[0])], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1 / (1 + k[0]), yf[0] - 1 / (1 + k[0]), y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=2)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0., 1., 2)
# noinspection PyTypeChecker
solinit.y = np.array([[1. / (1. + const), 0., -1.], [1. / (1. + const), 1., 1.]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = 1 / (sol.const[0] + sol.y[:, 2] ** 2)
e2 = -(2 * sol.y[:, 2]) / (sol.y[:, 2] ** 2 + sol.const[0]) ** 2
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t10(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (-y[2] * y[1] / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0], [0, 2 * (-y[2]) / k[0], 2 * (-y[1] / k[0])], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] - 2, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, -1], [2, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = 1 + erf(sol.y[:, 2] / np.sqrt(2 * sol.const[0])) / erf(1 / np.sqrt(2 * sol.const[0]))
e2 = np.sqrt(2) / (np.sqrt(np.pi) * np.sqrt(sol.const[0]) * np.exp(sol.y[:, 2] ** 2 / (2 * sol.const[0])) * erf(
np.sqrt(2) / (2 * np.sqrt(sol.const[0]))))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t11(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2])
+ k[0] * np.pi ** 3 * np.sin(np.pi * y[2]))) / k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [-1, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2])
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t12(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2]) + k[0] * np.pi ** 3 * np.sin(np.pi * y[2])))
/ k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] + 1, yf[0], y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[-1, 0, -1], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + np.exp(-(1 - sol.y[:, 2]) / np.sqrt(sol.const[0]))
e2 = np.exp((sol.y[:, 2] - 1) / np.sqrt(sol.const[0])) / np.sqrt(sol.const[0]) - np.pi * np.sin(np.pi * sol.y[:, 2])
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t13(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2]) + k[0] * np.pi ** 3 * np.sin(np.pi * y[2])))
/ k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0] + 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=2)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, -1], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + np.exp(-(1 + sol.y[:, 2]) / np.sqrt(sol.const[0]))
e2 = -np.pi * np.sin(np.pi * sol.y[:, 2]) - 1 / (
np.sqrt(sol.const[0]) * np.exp((sol.y[:, 2] + 1) / np.sqrt(sol.const[0])))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t14(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * ((y[0] - k[0] * np.pi ** 2 * np.cos(np.pi * y[2]) - np.cos(np.pi * y[2])) / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0],
[2 / k[0], 0, (2 * (np.pi * np.sin(np.pi * y[2]) + k[0] * np.pi ** 3 * np.sin(np.pi * y[2])))
/ k[0]], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0], y0[2]+1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm, num_arcs=4)
algo.set_derivative_jacobian(odejac)
sol = Trajectory()
sol.t = np.linspace(0, 1, 2)
sol.y = np.array([[0, 0, -1], [0, 0, 1]])
sol.const = np.array([const])
# noinspection PyTypeChecker
cc = np.linspace(const * 10, const, 10)
for c in cc:
sol = copy.deepcopy(sol)
sol.const = np.array([c])
sol = algo.solve(sol)['sol']
e1 = np.cos(np.pi * sol.y[:, 2]) + np.exp(-(1 + sol.y[:, 2]) / np.sqrt(sol.const[0])) + np.exp(
-(1 - sol.y[:, 2]) / np.sqrt(sol.const[0]))
e2 = np.exp((sol.y[:, 2] - 1) / np.sqrt(sol.const[0])) / np.sqrt(sol.const[0]) - np.pi * np.sin(
np.pi * sol.y[:, 2]) - 1 / (np.sqrt(sol.const[0]) * np.exp((sol.y[:, 2] + 1) / np.sqrt(sol.const[0])))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t15(algorithm, const):
def odefun(y, _, k):
return 2 * y[1], 2 * (y[2] * y[0] / k[0]), 2
def odejac(y, _, k):
df_dy = np.array([[0, 2, 0], [2 * (y[2] / k[0]), 0, 2 * (y[0] / k[0])], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0] - 1, yf[0] - 1, y0[2] + 1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[1, 0, -1], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
assert sol.converged is True
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t16(algorithm, const):
def odefun(y, _, k):
return 1 * y[1], 1 * (-y[0] * np.pi ** 2 / (4 * k[0])), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [-np.pi**2 / (4 * k[0]), 0, 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0], yf[0] - np.sin(np.pi / (2 * np.sqrt(k[0]))), y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.sin(np.pi * sol.y[:, 2] / (2 * np.sqrt(sol.const[0])))
e2 = (np.pi * np.cos((np.pi * sol.y[:, 2]) / (2 * np.sqrt(sol.const[0])))) / (2 * np.sqrt(sol.const[0]))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t17(algorithm, const):
def odefun(y, _, k):
return 0.2 * y[1], 0.2 * (-3 * k[0] * y[0] / (k[0] + y[2] ** 2) ** 2), 0.2
def odejac(y, _, k):
df_dy = np.array([[0, 0.2, 0],
[-(3 * k[0]) / (5 * (y[2] ** 2 + k[0]) ** 2), 0, (12 * k[0] * y[0] * y[2])
/ (5 * (y[2] ** 2 + k[0]) ** 3)], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] + 0.1 / np.sqrt(k[0] + 0.01), yf[0] - 0.1 / np.sqrt(k[0] + 0.01), y0[2] + 0.1
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = sol.y[:, 2]/np.sqrt(sol.const[0] + sol.y[:, 2] ** 2)
e2 = 1 / np.sqrt(sol.y[:, 2] ** 2 + sol.const[0]) - sol.y[:, 2] ** 2 / (sol.y[:, 2] ** 2 + sol.const[0]) ** (3 / 2)
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, HARD))
def test_t18(algorithm, const):
def odefun(y, _, k):
return y[1], (-y[1] / k[0]), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [0, -1 / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, k):
return y0[0] - 1, yf[0] - np.exp(-1 / k[0]), y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
e1 = np.exp(-sol.y[:, 2] / sol.const[0])
e2 = -1 / (sol.const[0] * np.exp(sol.y[:, 2] / sol.const[0]))
assert all(e1 - sol.y[:, 0] < tol)
assert all(e2 - sol.y[:, 1] < tol)
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, VHARD))
def test_t19(algorithm, const):
def odefun(y, _, k):
return y[1], (-y[1] / k[0]), 1
def odejac(_, __, k):
df_dy = np.array([[0, 1, 0], [0, -1 / k[0], 0], [0, 0, 0]])
df_dp = np.empty((3, 0))
return df_dy, df_dp
def bcfun(y0, yf, _, __, ___):
return y0[0], yf[0], y0[2]
algo = Shooting(odefun, None, bcfun, algorithm=algorithm)
algo.set_derivative_jacobian(odejac)
solinit = Trajectory()
solinit.t = np.linspace(0, 1, 2)
solinit.y = np.array([[0, 0, 0], [0, 0, 1]])
solinit.const = np.array([const])
sol = algo.solve(solinit)['sol']
assert sol.converged
@pytest.mark.parametrize("algorithm, const", itertools.product(ALGORITHMS, MEDIUM))
def test_t21(algorithm, const):
def odefun(y, _, k):
return y[1], (y[0] * (1 + y[0]) - np.exp(-2 * y[2] / np.sqrt(k[0]))) / k[0], 1
def odejac(y, _, k):
df_dy = np.array([[0, 1, 0],
[(2*y[0] + 1) / k[0], 0, (2 * np.exp(-(2 * y[2]) / np.sqrt(k[0]))) / k[0] ** (3 / 2)],
[0, 0, 0]])
df_dp =
|
np.empty((3, 0))
|
numpy.empty
|
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler
import const
# 0, batch * 1, batch * 2 ...
class BatchIntervalSampler(Sampler):
def __init__(self, data_length, batch_size):
# data length 가 batch size 로 나뉘게 만듦
if data_length % batch_size != 0:
data_length = data_length - (data_length % batch_size)
self.indices =[]
# print(data_length)
batch_group_interval = int(data_length / batch_size)
for group_idx in range(batch_group_interval):
for local_idx in range(batch_size):
self.indices.append(group_idx + local_idx * batch_group_interval)
# print('sampler init', self.indices)
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
def record_net_data_stats(label_temp, data_idx_map):
net_class_count = {}
net_data_count= {}
for net_i, dataidx in data_idx_map.items():
unq, unq_cnt = np.unique(label_temp[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_class_count[net_i] = tmp
net_data_count[net_i] = len(dataidx)
print('Data statistics: %s' % str(net_class_count))
return net_class_count, net_data_count
def GetCanDataset(total_edge, fold_num, packet_num, csv_path, txt_path):
csv = pd.read_csv(csv_path)
txt = open(txt_path, "r")
lines = txt.read().splitlines()
idx = 0
datum = []
label_temp = []
# [cur_idx ~ cur_idx + packet_num)
while idx + packet_num - 1 < len(csv) // 2:
line = lines[idx + packet_num - 1]
if not line:
break
if line.split(' ')[1] == 'R':
datum.append((idx, 1))
label_temp.append(1)
else:
datum.append((idx, 0))
label_temp.append(0)
idx += 1
if (idx % 1000000 == 0):
print(idx)
fold_length = int(len(label_temp) / 5)
train_datum = []
train_label_temp = []
for i in range(5):
if i != fold_num:
train_datum += datum[i*fold_length:(i+1)*fold_length]
train_label_temp += label_temp[i*fold_length:(i+1)*fold_length]
else:
test_datum = datum[i*fold_length:(i+1)*fold_length]
N = len(train_label_temp)
train_label_temp = np.array(train_label_temp)
proportions = np.random.dirichlet(np.repeat(1, total_edge))
proportions = np.cumsum(proportions)
idx_batch = [[] for _ in range(total_edge)]
data_idx_map = {}
prev = 0.0
for j in range(total_edge):
idx_batch[j] = [idx for idx in range(int(prev * N), int(proportions[j] * N))]
prev = proportions[j]
data_idx_map[j] = idx_batch[j]
_, net_data_count = record_net_data_stats(train_label_temp, data_idx_map)
return CanDataset(csv, train_datum, packet_num), data_idx_map, net_data_count, CanDataset(csv, test_datum, packet_num, False)
class CanDataset(Dataset):
def __init__(self, csv, datum, packet_num, is_train=True):
self.csv = csv
self.datum = datum
self.packet_num = packet_num
if is_train:
self.idx_map = []
else:
self.idx_map = [idx for idx in range(len(self.datum))]
def __len__(self):
return len(self.idx_map) - self.packet_num + 1
def set_idx_map(self, data_idx_map):
self.idx_map = data_idx_map
def __getitem__(self, idx):
# [cur_idx ~ cur_idx + packet_num)
start_i = self.datum[self.idx_map[idx]][0]
is_regular = self.datum[self.idx_map[idx]][1]
packet = np.zeros((const.CAN_DATA_LEN * self.packet_num))
for next_i in range(self.packet_num):
data_len = self.csv.iloc[start_i + next_i, 1]
for j in range(data_len):
data_value = int(self.csv.iloc[start_i + next_i, 2 + j], 16) / 255.0
packet[j + const.CAN_DATA_LEN * next_i] = data_value
return torch.from_numpy(packet).float(), is_regular
def GetCanDatasetCNN(total_edge, fold_num, csv_path, txt_path):
csv = pd.read_csv(csv_path)
txt = open(txt_path, "r")
lines = txt.read().splitlines()
idx = 0
datum = []
label_temp = []
while idx < len(csv) // 2:
line = lines[idx]
if not line:
break
if line.split(' ')[1] == 'R':
datum.append((idx, 1))
label_temp.append(1)
else:
datum.append((idx, 0))
label_temp.append(0)
idx += 1
if (idx % 1000000 == 0):
print(idx)
fold_length = int(len(label_temp) / 5)
train_datum = []
train_label_temp = []
for i in range(5):
if i != fold_num:
train_datum += datum[i*fold_length:(i+1)*fold_length]
train_label_temp += label_temp[i*fold_length:(i+1)*fold_length]
else:
test_datum = datum[i*fold_length:(i+1)*fold_length]
N = len(train_label_temp)
train_label_temp = np.array(train_label_temp)
proportions = np.random.dirichlet(np.repeat(1, total_edge))
proportions = np.cumsum(proportions)
idx_batch = [[] for _ in range(total_edge)]
data_idx_map = {}
prev = 0.0
for j in range(total_edge):
idx_batch[j] = [idx for idx in range(int(prev * N), int(proportions[j] * N))]
prev = proportions[j]
data_idx_map[j] = idx_batch[j]
_, net_data_count = record_net_data_stats(train_label_temp, data_idx_map)
return CanDatasetCNN(csv, train_datum), data_idx_map, net_data_count, CanDatasetCNN(csv, test_datum, False)
class CanDatasetCNN(Dataset):
def __init__(self, csv, datum, is_train=True):
self.csv = csv
self.datum = datum
if is_train:
self.idx_map = []
else:
self.idx_map = [idx for idx in range(len(self.datum))]
def __len__(self):
return len(self.idx_map)
def set_idx_map(self, data_idx_map):
self.idx_map = data_idx_map
def __getitem__(self, idx):
start_i = self.datum[self.idx_map[idx]][0]
is_regular = self.datum[self.idx_map[idx]][1]
packet =
|
np.zeros((1, const.CNN_FRAME_LEN, const.CNN_FRAME_LEN))
|
numpy.zeros
|
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Adapted by <NAME> in November,2019 from this Colab notebook:
#https://colab.research.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb.
#Changes includes
# - Reading our stressor data and parsing it properly
# - reconfiguring the last layer to include N neurons corresponding to N categories
# - correcting the probability output so that it follows [0,1] proper pattern
# - better analysis with confusion matrix
# - exporting to pb format for tensorflow serving api
import os
os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda-10.0/lib64'
import sys
print(sys.executable)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import f1_score,confusion_matrix,classification_report,accuracy_score
import logging
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_colwidth', 1000)
config = tf.ConfigProto()
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#config.gpu_options.visible_device_list="0"
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
import bert
from bert import run_classifier_with_tfhub
from bert import optimization
from bert import tokenization
from bert import modeling
import numpy as np
############ Utils functions ##################
def create_examples_prediction(df):
"""Creates examples for the training and dev sets."""
examples = []
for index, row in df.iterrows():
#labels = row[LABEL_HOT_VECTOR].strip('][').split(', ')
#labels = [float(x) for x in labels]
labels = list(row[label_list_text])
examples.append(labels)
return pd.DataFrame(examples)
def f(x):
n = 2 # index of the second proability to get labeled
index = np.argsort(x.values.flatten().tolist())[-n:][0]
print(f"index is {index}")
label = label_list_text[index]
print(f"label is {label}")
return label
final_columns = ["sOrder","Input.text","is_stressor","is_stressor_conf","top_label","second_label","Branch", "Above SD-THRESHOLD","SD-THRESHOLD","SD","Other","Everyday Decision Making","Work","Social Relationships","Financial Problem","Health, Fatigue, or Physical Pain","Emotional Turmoil","Family Issues","School","avg_severity","median_severity","SD_severity","Votes","Source"]
def get_test_experiment_df(test):
test_predictions = [x[0]['probabilities'] for x in zip(getListPrediction(in_sentences=list(test[DATA_COLUMN])))]
test_live_labels =
|
np.array(test_predictions)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
biosppy.signals.tools
---------------------
This module provides various signal analysis methods in the time and
frequency domains.
:copyright: (c) 2015-2018 by Instituto de Telecomunicacoes
:license: BSD 3-clause, see LICENSE for more details.
"""
# Imports
# compat
from __future__ import absolute_import, division, print_function
from six.moves import range
import six
# 3rd party
import numpy as np
import scipy.signal as ss
from scipy import interpolate, optimize
from scipy.stats import stats
# local
from biosppy import utils
def _norm_freq(frequency=None, sampling_rate=1000.):
"""Normalize frequency to Nyquist Frequency (Fs/2).
Parameters
----------
frequency : int, float, list, array
Frequencies to normalize.
sampling_rate : int, float, optional
Sampling frequency (Hz).
Returns
-------
wn : float, array
Normalized frequencies.
"""
# check inputs
if frequency is None:
raise TypeError("Please specify a frequency to normalize.")
# convert inputs to correct representation
try:
frequency = float(frequency)
except TypeError:
# maybe frequency is a list or array
frequency = np.array(frequency, dtype='float')
Fs = float(sampling_rate)
wn = 2. * frequency / Fs
return wn
def _filter_init(b, a, alpha=1.):
"""Get an initial filter state that corresponds to the steady-state
of the step response.
Parameters
----------
b : array
Numerator coefficients.
a : array
Denominator coefficients.
alpha : float, optional
Scaling factor.
Returns
-------
zi : array
Initial filter state.
"""
zi = alpha * ss.lfilter_zi(b, a)
return zi
def _filter_signal(b, a, signal, zi=None, check_phase=True, **kwargs):
"""Filter a signal with given coefficients.
Parameters
----------
b : array
Numerator coefficients.
a : array
Denominator coefficients.
signal : array
Signal to filter.
zi : array, optional
Initial filter state.
check_phase : bool, optional
If True, use the forward-backward technique.
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying filtering
function.
Returns
-------
filtered : array
Filtered signal.
zf : array
Final filter state.
Notes
-----
* If check_phase is True, zi cannot be set.
"""
# check inputs
if check_phase and zi is not None:
raise ValueError(
"Incompatible arguments: initial filter state cannot be set when \
check_phase is True.")
if zi is None:
zf = None
if check_phase:
filtered = ss.filtfilt(b, a, signal, **kwargs)
else:
filtered = ss.lfilter(b, a, signal, **kwargs)
else:
filtered, zf = ss.lfilter(b, a, signal, zi=zi, **kwargs)
return filtered, zf
def _filter_resp(b, a, sampling_rate=1000., nfreqs=4096):
"""Compute the filter frequency response.
Parameters
----------
b : array
Numerator coefficients.
a : array
Denominator coefficients.
sampling_rate : int, float, optional
Sampling frequency (Hz).
nfreqs : int, optional
Number of frequency points to compute.
Returns
-------
freqs : array
Array of frequencies (Hz) at which the response was computed.
resp : array
Frequency response.
"""
w, resp = ss.freqz(b, a, nfreqs)
# convert frequencies
freqs = w * sampling_rate / (2. * np.pi)
return freqs, resp
def _get_window(kernel, size, **kwargs):
"""Return a window with the specified parameters.
Parameters
----------
kernel : str
Type of window to create.
size : int
Size of the window.
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying
scipy.signal.windows function.
Returns
-------
window : array
Created window.
"""
# mimics scipy.signal.get_window
if kernel in ['blackman', 'black', 'blk']:
winfunc = ss.blackman
elif kernel in ['triangle', 'triang', 'tri']:
winfunc = ss.triang
elif kernel in ['hamming', 'hamm', 'ham']:
winfunc = ss.hamming
elif kernel in ['bartlett', 'bart', 'brt']:
winfunc = ss.bartlett
elif kernel in ['hanning', 'hann', 'han']:
winfunc = ss.hann
elif kernel in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = ss.blackmanharris
elif kernel in ['parzen', 'parz', 'par']:
winfunc = ss.parzen
elif kernel in ['bohman', 'bman', 'bmn']:
winfunc = ss.bohman
elif kernel in ['nuttall', 'nutl', 'nut']:
winfunc = ss.nuttall
elif kernel in ['barthann', 'brthan', 'bth']:
winfunc = ss.barthann
elif kernel in ['flattop', 'flat', 'flt']:
winfunc = ss.flattop
elif kernel in ['kaiser', 'ksr']:
winfunc = ss.kaiser
elif kernel in ['gaussian', 'gauss', 'gss']:
winfunc = ss.gaussian
elif kernel in ['general gaussian', 'general_gaussian', 'general gauss',
'general_gauss', 'ggs']:
winfunc = ss.general_gaussian
elif kernel in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = ss.boxcar
elif kernel in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = ss.slepian
elif kernel in ['cosine', 'halfcosine']:
winfunc = ss.cosine
elif kernel in ['chebwin', 'cheb']:
winfunc = ss.chebwin
else:
raise ValueError("Unknown window type.")
try:
window = winfunc(size, **kwargs)
except TypeError as e:
raise TypeError("Invalid window arguments: %s." % e)
return window
def get_filter(ftype='FIR',
band='lowpass',
order=None,
frequency=None,
sampling_rate=1000., **kwargs):
"""Compute digital (FIR or IIR) filter coefficients with the given
parameters.
Parameters
----------
ftype : str
Filter type:
* Finite Impulse Response filter ('FIR');
* Butterworth filter ('butter');
* Chebyshev filters ('cheby1', 'cheby2');
* Elliptic filter ('ellip');
* Bessel filter ('bessel').
band : str
Band type:
* Low-pass filter ('lowpass');
* High-pass filter ('highpass');
* Band-pass filter ('bandpass');
* Band-stop filter ('bandstop').
order : int
Order of the filter.
frequency : int, float, list, array
Cutoff frequencies; format depends on type of band:
* 'lowpass' or 'highpass': single frequency;
* 'bandpass' or 'bandstop': pair of frequencies.
sampling_rate : int, float, optional
Sampling frequency (Hz).
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying
scipy.signal function.
Returns
-------
b : array
Numerator coefficients.
a : array
Denominator coefficients.
See Also:
scipy.signal
"""
# check inputs
if order is None:
raise TypeError("Please specify the filter order.")
if frequency is None:
raise TypeError("Please specify the cutoff frequency.")
if band not in ['lowpass', 'highpass', 'bandpass', 'bandstop']:
raise ValueError(
"Unknown filter type '%r'; choose 'lowpass', 'highpass', \
'bandpass', or 'bandstop'."
% band)
# convert frequencies
frequency = _norm_freq(frequency, sampling_rate)
# get coeffs
b, a = [], []
if ftype == 'FIR':
# FIR filter
if order % 2 == 0:
order += 1
a = np.array([1])
if band in ['lowpass', 'bandstop']:
b = ss.firwin(numtaps=order,
cutoff=frequency,
pass_zero=True, **kwargs)
elif band in ['highpass', 'bandpass']:
b = ss.firwin(numtaps=order,
cutoff=frequency,
pass_zero=False, **kwargs)
elif ftype == 'butter':
# Butterworth filter
b, a = ss.butter(N=order,
Wn=frequency,
btype=band,
analog=False,
output='ba', **kwargs)
elif ftype == 'cheby1':
# Chebyshev type I filter
b, a = ss.cheby1(N=order,
Wn=frequency,
btype=band,
analog=False,
output='ba', **kwargs)
elif ftype == 'cheby2':
# chebyshev type II filter
b, a = ss.cheby2(N=order,
Wn=frequency,
btype=band,
analog=False,
output='ba', **kwargs)
elif ftype == 'ellip':
# Elliptic filter
b, a = ss.ellip(N=order,
Wn=frequency,
btype=band,
analog=False,
output='ba', **kwargs)
elif ftype == 'bessel':
# Bessel filter
b, a = ss.bessel(N=order,
Wn=frequency,
btype=band,
analog=False,
output='ba', **kwargs)
return utils.ReturnTuple((b, a), ('b', 'a'))
def filter_signal(signal=None,
ftype='FIR',
band='lowpass',
order=None,
frequency=None,
sampling_rate=1000., **kwargs):
"""Filter a signal according to the given parameters.
Parameters
----------
signal : array
Signal to filter.
ftype : str
Filter type:
* Finite Impulse Response filter ('FIR');
* Butterworth filter ('butter');
* Chebyshev filters ('cheby1', 'cheby2');
* Elliptic filter ('ellip');
* Bessel filter ('bessel').
band : str
Band type:
* Low-pass filter ('lowpass');
* High-pass filter ('highpass');
* Band-pass filter ('bandpass');
* Band-stop filter ('bandstop').
order : int
Order of the filter.
frequency : int, float, list, array
Cutoff frequencies; format depends on type of band:
* 'lowpass' or 'bandpass': single frequency;
* 'bandpass' or 'bandstop': pair of frequencies.
sampling_rate : int, float, optional
Sampling frequency (Hz).
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying
scipy.signal function.
Returns
-------
signal : array
Filtered signal.
sampling_rate : float
Sampling frequency (Hz).
params : dict
Filter parameters.
Notes
-----
* Uses a forward-backward filter implementation. Therefore, the combined
filter has linear phase.
"""
# check inputs
if signal is None:
raise TypeError("Please specify a signal to filter.")
# get filter
b, a = get_filter(ftype=ftype,
order=order,
frequency=frequency,
sampling_rate=sampling_rate,
band=band, **kwargs)
# filter
filtered, _ = _filter_signal(b, a, signal, check_phase=True)
# output
params = {
'ftype': ftype,
'order': order,
'frequency': frequency,
'band': band,
}
params.update(kwargs)
args = (filtered, sampling_rate, params)
names = ('signal', 'sampling_rate', 'params')
return utils.ReturnTuple(args, names)
class OnlineFilter(object):
"""Online filtering.
Parameters
----------
b : array
Numerator coefficients.
a : array
Denominator coefficients.
"""
def __init__(self, b=None, a=None):
# check inputs
if b is None:
raise TypeError('Please specify the numerator coefficients.')
if a is None:
raise TypeError('Please specify the denominator coefficients.')
# self things
self.b = b
self.a = a
# reset
self.reset()
def reset(self):
"""Reset the filter state."""
self.zi = None
def filter(self, signal=None):
"""Filter a signal segment.
Parameters
----------
signal : array
Signal segment to filter.
Returns
-------
filtered : array
Filtered signal segment.
"""
# check input
if signal is None:
raise TypeError('Please specify the input signal.')
if self.zi is None:
self.zi = signal[0] * ss.lfilter_zi(self.b, self.a)
filtered, self.zi = ss.lfilter(self.b, self.a, signal, zi=self.zi)
return utils.ReturnTuple((filtered, ), ('filtered', ))
def smoother(signal=None, kernel='boxzen', size=10, mirror=True, **kwargs):
"""Smooth a signal using an N-point moving average [MAvg]_ filter.
This implementation uses the convolution of a filter kernel with the input
signal to compute the smoothed signal [Smit97]_.
Availabel kernels: median, boxzen, boxcar, triang, blackman, hamming, hann,
bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std), general_gaussian (needs power,
width), slepian (needs width), chebwin (needs attenuation).
Parameters
----------
signal : array
Signal to smooth.
kernel : str, array, optional
Type of kernel to use; if array, use directly as the kernel.
size : int, optional
Size of the kernel; ignored if kernel is an array.
mirror : bool, optional
If True, signal edges are extended to avoid boundary effects.
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying
scipy.signal.windows function.
Returns
-------
signal : array
Smoothed signal.
params : dict
Smoother parameters.
Notes
-----
* When the kernel is 'median', mirror is ignored.
References
----------
.. [MAvg] Wikipedia, "Moving Average",
http://en.wikipedia.org/wiki/Moving_average
.. [Smit97] <NAME>, "Moving Average Filters - Implementation by
Convolution", http://www.dspguide.com/ch15/1.htm, 1997
"""
# check inputs
if signal is None:
raise TypeError("Please specify a signal to smooth.")
length = len(signal)
if isinstance(kernel, six.string_types):
# check length
if size > length:
size = length - 1
if size < 1:
size = 1
if kernel == 'boxzen':
# hybrid method
# 1st pass - boxcar kernel
aux, _ = smoother(signal,
kernel='boxcar',
size=size,
mirror=mirror)
# 2nd pass - parzen kernel
smoothed, _ = smoother(aux,
kernel='parzen',
size=size,
mirror=mirror)
params = {'kernel': kernel, 'size': size, 'mirror': mirror}
args = (smoothed, params)
names = ('signal', 'params')
return utils.ReturnTuple(args, names)
elif kernel == 'median':
# median filter
if size % 2 == 0:
raise ValueError(
"When the kernel is 'median', size must be odd.")
smoothed = ss.medfilt(signal, kernel_size=size)
params = {'kernel': kernel, 'size': size, 'mirror': mirror}
args = (smoothed, params)
names = ('signal', 'params')
return utils.ReturnTuple(args, names)
else:
win = _get_window(kernel, size, **kwargs)
elif isinstance(kernel, np.ndarray):
win = kernel
size = len(win)
# check length
if size > length:
raise ValueError("Kernel size is bigger than signal length.")
if size < 1:
raise ValueError("Kernel size is smaller than 1.")
else:
raise TypeError("Unknown kernel type.")
# convolve
w = win / win.sum()
if mirror:
aux = np.concatenate(
(signal[0] * np.ones(size), signal, signal[-1] * np.ones(size)))
smoothed = np.convolve(w, aux, mode='same')
smoothed = smoothed[size:-size]
else:
smoothed = np.convolve(w, signal, mode='same')
# output
params = {'kernel': kernel, 'size': size, 'mirror': mirror}
params.update(kwargs)
args = (smoothed, params)
names = ('signal', 'params')
return utils.ReturnTuple(args, names)
def analytic_signal(signal=None, N=None):
"""Compute analytic signal, using the Hilbert Transform.
Parameters
----------
signal : array
Input signal.
N : int, optional
Number of Fourier components; default is `len(signal)`.
Returns
-------
amplitude : array
Amplitude envelope of the analytic signal.
phase : array
Instantaneous phase component of the analystic signal.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# hilbert transform
asig = ss.hilbert(signal, N=N)
# amplitude envelope
amp = np.absolute(asig)
# instantaneous
phase = np.angle(asig)
return utils.ReturnTuple((amp, phase), ('amplitude', 'phase'))
def phase_locking(signal1=None, signal2=None, N=None):
"""Compute the Phase-Locking Factor (PLF) between two signals.
Parameters
----------
signal1 : array
First input signal.
signal2 : array
Second input signal.
N : int, optional
Number of Fourier components.
Returns
-------
plf : float
The PLF between the two signals.
"""
# check inputs
if signal1 is None:
raise TypeError("Please specify the first input signal.")
if signal2 is None:
raise TypeError("Please specify the second input signal.")
if len(signal1) != len(signal2):
raise ValueError("The input signals must have the same length.")
# compute analytic signal
asig1 = ss.hilbert(signal1, N=N)
phase1 = np.angle(asig1)
asig2 = ss.hilbert(signal2, N=N)
phase2 = np.angle(asig2)
# compute PLF
plf = np.absolute(np.mean(np.exp(1j * (phase1 - phase2))))
return utils.ReturnTuple((plf,), ('plf',))
def power_spectrum(signal=None,
sampling_rate=1000.,
pad=None,
pow2=False,
decibel=True):
"""Compute the power spectrum of a signal (one-sided).
Parameters
----------
signal : array
Input signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
pad : int, optional
Padding for the Fourier Transform (number of zeros added);
defaults to no padding..
pow2 : bool, optional
If True, rounds the number of points `N = len(signal) + pad` to the
nearest power of 2 greater than N.
decibel : bool, optional
If True, returns the power in decibels.
Returns
-------
freqs : array
Array of frequencies (Hz) at which the power was computed.
power : array
Power spectrum.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
npoints = len(signal)
if pad is not None:
if pad >= 0:
npoints += pad
else:
raise ValueError("Padding must be a positive integer.")
# power of 2
if pow2:
npoints = 2 ** (np.ceil(np.log2(npoints)))
Nyq = float(sampling_rate) / 2
hpoints = npoints // 2
freqs = np.linspace(0, Nyq, hpoints)
power = np.abs(np.fft.fft(signal, npoints)) / npoints
# one-sided
power = power[:hpoints]
power[1:] *= 2
power = np.power(power, 2)
if decibel:
power = 10. * np.log10(power)
return utils.ReturnTuple((freqs, power), ('freqs', 'power'))
def welch_spectrum(signal=None,
sampling_rate=1000.,
size=None,
overlap=None,
window='hanning',
window_kwargs=None,
pad=None,
decibel=True):
"""Compute the power spectrum of a signal using Welch's method (one-sided).
Parameters
----------
signal : array
Input signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
size : int, optional
Number of points in each Welch segment;
defaults to the equivalent of 1 second;
ignored when 'window' is an array.
overlap : int, optional
Number of points to overlap between segments; defaults to `size / 2`.
window : str, array, optional
Type of window to use.
window_kwargs : dict, optional
Additional keyword arguments to pass on window creation; ignored if
'window' is an array.
pad : int, optional
Padding for the Fourier Transform (number of zeros added);
defaults to no padding.
decibel : bool, optional
If True, returns the power in decibels.
Returns
-------
freqs : array
Array of frequencies (Hz) at which the power was computed.
power : array
Power spectrum.
Notes
-----
* Detrends each Welch segment by removing the mean.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
length = len(signal)
sampling_rate = float(sampling_rate)
if size is None:
size = int(sampling_rate)
if window_kwargs is None:
window_kwargs = {}
if isinstance(window, six.string_types):
win = _get_window(window, size, **window_kwargs)
elif isinstance(window, np.ndarray):
win = window
size = len(win)
if size > length:
raise ValueError('Segment size must be smaller than signal length.')
if overlap is None:
overlap = size // 2
elif overlap > size:
raise ValueError('Overlap must be smaller than segment size.')
nfft = size
if pad is not None:
if pad >= 0:
nfft += pad
else:
raise ValueError("Padding must be a positive integer.")
freqs, power = ss.welch(
signal,
fs=sampling_rate,
window=win,
nperseg=size,
noverlap=overlap,
nfft=nfft,
detrend='constant',
return_onesided=True,
scaling='spectrum',
)
# compensate one-sided
power *= 2
if decibel:
power = 10. * np.log10(power)
return utils.ReturnTuple((freqs, power), ('freqs', 'power'))
def band_power(freqs=None, power=None, frequency=None, decibel=True):
"""Compute the avearge power in a frequency band.
Parameters
----------
freqs : array
Array of frequencies (Hz) at which the power was computed.
power : array
Input power spectrum.
frequency : list, array
Pair of frequencies defining the band.
decibel : bool, optional
If True, input power is in decibels.
Returns
-------
avg_power : float
The average power in the band.
"""
# check inputs
if freqs is None:
raise TypeError("Please specify the 'freqs' array.")
if power is None:
raise TypeError("Please specify the input power spectrum.")
if len(freqs) != len(power):
raise ValueError(
"The input 'freqs' and 'power' arrays must have the same length.")
if frequency is None:
raise TypeError("Please specify the band frequencies.")
try:
f1, f2 = frequency
except ValueError:
raise ValueError("Input 'frequency' must be a pair of frequencies.")
# make frequencies sane
if f1 > f2:
f1, f2 = f2, f1
if f1 < freqs[0]:
f1 = freqs[0]
if f2 > freqs[-1]:
f2 = freqs[-1]
# average
sel = np.nonzero(np.logical_and(f1 <= freqs, freqs <= f2))[0]
if decibel:
aux = 10 ** (power / 10.)
avg = np.mean(aux[sel])
avg = 10. * np.log10(avg)
else:
avg = np.mean(power[sel])
return utils.ReturnTuple((avg,), ('avg_power',))
def signal_stats(signal=None):
"""Compute various metrics describing the signal.
Parameters
----------
signal : array
Input signal.
Returns
-------
mean : float
Mean of the signal.
median : float
Median of the signal.
min : float
Minimum signal value.
max : float
Maximum signal value.
max_amp : float
Maximum absolute signal amplitude, in relation to the mean.
var : float
Signal variance (unbiased).
std_dev : float
Standard signal deviation (unbiased).
abs_dev : float
Mean absolute signal deviation around the median.
kurtosis : float
Signal kurtosis (unbiased).
skew : float
Signal skewness (unbiased).
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# ensure numpy
signal = np.array(signal)
# mean
mean = np.mean(signal)
# median
median = np.median(signal)
# min
minVal = np.min(signal)
# max
maxVal = np.max(signal)
# maximum amplitude
maxAmp = np.abs(signal - mean).max()
# variance
sigma2 = signal.var(ddof=1)
# standard deviation
sigma = signal.std(ddof=1)
# absolute deviation
ad = np.mean(np.abs(signal - median))
# kurtosis
kurt = stats.kurtosis(signal, bias=False)
# skweness
skew = stats.skew(signal, bias=False)
# output
args = (mean, median, minVal, maxVal, maxAmp, sigma2, sigma, ad, kurt, skew)
names = ('mean', 'median', 'min', 'max', 'max_amp', 'var', 'std_dev',
'abs_dev', 'kurtosis', 'skewness')
return utils.ReturnTuple(args, names)
def normalize(signal=None, ddof=1):
"""Normalize a signal to zero mean and unitary standard deviation.
Parameters
----------
signal : array
Input signal.
ddof : int, optional
Delta degrees of freedom for standard deviation computation;
the divisor is `N - ddof`, where `N` is the number of elements;
default is one.
Returns
-------
signal : array
Normalized signal.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# ensure numpy
signal = np.array(signal)
normalized = signal - signal.mean()
normalized /= normalized.std(ddof=ddof)
return utils.ReturnTuple((normalized,), ('signal',))
def zero_cross(signal=None, detrend=False):
"""Locate the indices where the signal crosses zero.
Parameters
----------
signal : array
Input signal.
detrend : bool, optional
If True, remove signal mean before computation.
Returns
-------
zeros : array
Indices of zero crossings.
Notes
-----
* When the signal crosses zero between samples, the first index
is returned.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if detrend:
signal = signal - np.mean(signal)
# zeros
df = np.diff(np.sign(signal))
zeros = np.nonzero(np.abs(df) > 0)[0]
return utils.ReturnTuple((zeros,), ('zeros',))
def find_extrema(signal=None, mode='both'):
"""Locate local extrema points in a signal.
Based on Fermat's Theorem [Ferm]_.
Parameters
----------
signal : array
Input signal.
mode : str, optional
Whether to find maxima ('max'), minima ('min'), or both ('both').
Returns
-------
extrema : array
Indices of the extrama points.
values : array
Signal values at the extrema points.
References
----------
.. [Ferm] Wikipedia, "Fermat's theorem (stationary points)",
https://en.wikipedia.org/wiki/Fermat%27s_theorem_(stationary_points)
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if mode not in ['max', 'min', 'both']:
raise ValueError("Unknwon mode %r." % mode)
aux = np.diff(np.sign(np.diff(signal)))
if mode == 'both':
aux = np.abs(aux)
extrema = np.nonzero(aux > 0)[0] + 1
elif mode == 'max':
extrema =
|
np.nonzero(aux < 0)
|
numpy.nonzero
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.