prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
#! /usr/bin/env python3
#
# Copyright 2019 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: <NAME>, <EMAIL>
#
from scipy.linalg import inv
from isofit.core.instrument import Instrument
from spectral.io import envi
from scipy.spatial import KDTree
import numpy as np
import logging
import time
import matplotlib
import pylab as plt
from isofit.configs import configs
import ray
import atexit
plt.switch_backend("Agg")
def _write_bil_chunk(dat: np.array, outfile: str, line: int, shape: tuple, dtype: str = 'float32') -> None:
"""
Write a chunk of data to a binary, BIL formatted data cube.
Args:
dat: data to write
outfile: output file to write to
line: line of the output file to write to
shape: shape of the output file
dtype: output data type
Returns:
None
"""
outfile = open(outfile, 'rb+')
outfile.seek(line * shape[1] * shape[2] * np.dtype(dtype).itemsize)
outfile.write(dat.astype(dtype).tobytes())
outfile.close()
@ray.remote
def _run_chunk(start_line: int, stop_line: int, reference_radiance_file: str, reference_reflectance_file: str,
reference_uncertainty_file: str, reference_locations_file: str, input_radiance_file: str,
input_locations_file: str, segmentation_file: str, isofit_config: str, output_reflectance_file: str,
output_uncertainty_file: str, radiance_factors: np.array, nneighbors: int,
nodata_value: float, loglevel: str, logfile: str) -> None:
"""
Args:
start_line: line to start empirical line run at
stop_line: line to stop empirical line run at
reference_radiance_file: source file for radiance (interpolation built from this)
reference_reflectance_file: source file for reflectance (interpolation built from this)
reference_uncertainty_file: source file for uncertainty (interpolation built from this)
reference_locations_file: source file for file locations (lon, lat, elev), (interpolation built from this)
input_radiance_file: input radiance file (interpolate over this)
input_locations_file: input location file (interpolate over this)
segmentation_file: input file noting the per-pixel segmentation used
isofit_config: path to isofit configuration JSON file
output_reflectance_file: location to write output reflectance to
output_uncertainty_file: location to write output uncertainty to
radiance_factors: radiance adjustment factors
nneighbors: number of neighbors to use for interpolation
nodata_value: nodata value of input and output
loglevel: logging level
logfile: logging file
Returns:
None
"""
logging.basicConfig(format='%(message)s', level=loglevel, filename=logfile)
# Load reference images
reference_radiance_img = envi.open(reference_radiance_file + '.hdr', reference_radiance_file)
reference_reflectance_img = envi.open(reference_reflectance_file + '.hdr', reference_reflectance_file)
reference_uncertainty_img = envi.open(reference_uncertainty_file + '.hdr', reference_uncertainty_file)
reference_locations_img = envi.open(reference_locations_file + '.hdr', reference_locations_file)
n_reference_lines, n_radiance_bands, n_reference_columns = [int(reference_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
n_reference_uncertainty_bands = int(reference_uncertainty_img.metadata['bands'])
# Load input images
input_radiance_img = envi.open(input_radiance_file + '.hdr', input_radiance_file)
n_input_lines, n_input_bands, n_input_samples = [int(input_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
input_locations_img = envi.open(input_locations_file + '.hdr', input_locations_file)
n_location_bands = int(input_locations_img.metadata['bands'])
# Load output images
output_reflectance_img = envi.open(output_reflectance_file + '.hdr', output_reflectance_file)
output_uncertainty_img = envi.open(output_uncertainty_file + '.hdr', output_uncertainty_file)
n_output_reflectance_bands = int(output_reflectance_img.metadata['bands'])
n_output_uncertainty_bands = int(output_uncertainty_img.metadata['bands'])
# Load reference data
reference_locations_mm = reference_locations_img.open_memmap(interleave='bip', writable=False)
reference_locations = np.array(reference_locations_mm[:, :, :]).reshape((n_reference_lines, n_location_bands))
reference_radiance_mm = reference_radiance_img.open_memmap(interleave='bip', writable=False)
reference_radiance = np.array(reference_radiance_mm[:, :, :]).reshape((n_reference_lines, n_radiance_bands))
reference_reflectance_mm = reference_reflectance_img.open_memmap(interleave='bip', writable=False)
reference_reflectance = np.array(reference_reflectance_mm[:, :, :]).reshape((n_reference_lines, n_radiance_bands))
reference_uncertainty_mm = reference_uncertainty_img.open_memmap(interleave='bip', writable=False)
reference_uncertainty = np.array(reference_uncertainty_mm[:, :, :]).reshape((n_reference_lines,
n_reference_uncertainty_bands))
reference_uncertainty = reference_uncertainty[:, :n_radiance_bands].reshape((n_reference_lines, n_radiance_bands))
# Load segmentation data
if segmentation_file:
segmentation_img = envi.open(segmentation_file + '.hdr', segmentation_file)
segmentation_img = segmentation_img.read_band(0)
else:
segmentation_img = None
# Prepare instrument model, if available
if isofit_config is not None:
config = configs.create_new_config(isofit_config)
instrument = Instrument(config)
logging.info('Loading instrument')
else:
instrument = None
# Load radiance factors
if radiance_factors is None:
radiance_adjustment = np.ones(n_radiance_bands, )
else:
radiance_adjustment =
|
np.loadtxt(radiance_factors)
|
numpy.loadtxt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 2018
@author: jsaavedr
Description: A list of function to create tfrecords
"""
import os
import random
import sys
import numpy
import numpy as np
import tensorflow as tf
from skimage.draw import line_aa
from skimage.transform import resize
from sklearn.utils import shuffle
import json
# %%
from configuration_sketch import data_path, CLASSES_COUNT, TEST_EXAMPLES_PER_CLASS, TRAIN_EXAMPLES_PER_CLASS, \
IMAGE_DIMENSIONS, DRAW_IMAGE_LIMIT
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# %%
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# %%
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
# creating tfrecords
def createTFRecordFromList(images, labels, tfr_filename):
h = IMAGE_DIMENSIONS[0]
w = IMAGE_DIMENSIONS[1]
writer = tf.python_io.TFRecordWriter(tfr_filename)
assert len(images) == len(labels)
mean_image = np.zeros([h, w], dtype=np.float32)
for i in range(len(images)):
if i % 500 == 0:
print("---{}".format(i))
image = images[i].astype(np.uint8)
img_raw = image.tostring()
# image = processFun(image, (w, h))
# create a feature
feature = {'train/label': _int64_feature(labels[i]),
'train/image': _bytes_feature(img_raw)}
# crate an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# serialize to string an write on the file
writer.write(example.SerializeToString())
mean_image = mean_image + image / len(images)
# serialize mean_image
writer.close()
sys.stdout.flush()
return mean_image
def createImage(points):
x_points = []
y_points = []
target_size = 256
object_size = 200
# reading all points
for stroke in points:
x_points = x_points + stroke[0]
y_points = y_points + stroke[1]
# min max for each axis
min_x = min(x_points)
max_x = max(x_points)
min_y = min(y_points)
max_y = max(y_points)
im_width = np.int(max_x - min_x + 1)
im_height = np.int(max_y - min_y + 1)
if im_width > im_height:
resize_factor = np.true_divide(object_size, im_width)
else:
resize_factor = np.true_divide(object_size, im_height)
t_width = np.int(im_width * resize_factor)
t_height = np.int(im_height * resize_factor)
center_x = np.int(sum(x_points) / len(x_points))
center_y = np.int(sum(y_points) / len(y_points))
center_x = np.int(t_width * 0.5)
center_y = np.int(t_height * 0.5)
t_center_x = np.int(target_size * 0.5)
t_center_y = np.int(target_size * 0.5)
offset_x = t_center_x - center_x
offset_y = t_center_y - center_y
blank_image = np.zeros((target_size, target_size), np.uint8)
blank_image[:, :] = 0
# cv2.circle(blank_image, (), 1, 1, 8)
for stroke in points:
xa = -1
ya = -1
for p in zip(stroke[0], stroke[1]):
x = np.int(
|
np.true_divide(p[0] - min_x, im_width)
|
numpy.true_divide
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Torch interface"""
import functools
import numpy as np
import pytest
torch = pytest.importorskip("torch")
import pennylane as qml
from pennylane.gradients import finite_diff, param_shift
from pennylane.interfaces.batch import execute
class TestTorchExecuteUnitTests:
"""Unit tests for torch execution"""
def test_jacobian_options(self, mocker, tol):
"""Test setting jacobian options"""
spy = mocker.spy(qml.gradients, "param_shift")
a = torch.tensor([0.1, 0.2], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn=param_shift,
gradient_kwargs={"shifts": [(np.pi / 4,)] * 2},
interface="torch",
)[0]
res.backward()
for args in spy.call_args_list:
assert args[1]["shift"] == [(np.pi / 4,)] * 2
def test_incorrect_mode(self):
"""Test that an error is raised if a gradient transform
is used with mode=forward"""
a = torch.tensor([0.1, 0.2], requires_grad=True)
dev = qml.device("default.qubit", wires=1)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
with pytest.raises(
ValueError, match="Gradient transforms cannot be used with mode='forward'"
):
execute([tape], dev, gradient_fn=param_shift, mode="forward", interface="torch")[0]
def test_forward_mode_reuse_state(self, mocker):
"""Test that forward mode uses the `device.execute_and_gradients` pathway
while reusing the quantum state."""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(dev, "execute_and_gradients")
a = torch.tensor([0.1, 0.2], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn="device",
gradient_kwargs={"method": "adjoint_jacobian", "use_device_state": True},
interface="torch",
)[0]
# adjoint method only performs a single device execution, but gets both result and gradient
assert dev.num_executions == 1
spy.assert_called()
def test_forward_mode(self, mocker):
"""Test that forward mode uses the `device.execute_and_gradients` pathway"""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(dev, "execute_and_gradients")
a = torch.tensor([0.1, 0.2], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn="device",
gradient_kwargs={"method": "adjoint_jacobian"},
interface="torch",
)[0]
# two device executions; one for the value, one for the Jacobian
assert dev.num_executions == 2
spy.assert_called()
def test_backward_mode(self, mocker):
"""Test that backward mode uses the `device.batch_execute` and `device.gradients` pathway"""
dev = qml.device("default.qubit", wires=1)
spy_execute = mocker.spy(qml.devices.DefaultQubit, "batch_execute")
spy_gradients = mocker.spy(qml.devices.DefaultQubit, "gradients")
a = torch.tensor([0.1, 0.2], requires_grad=True)
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.expval(qml.PauliZ(0))
res = execute(
[tape],
dev,
gradient_fn="device",
mode="backward",
gradient_kwargs={"method": "adjoint_jacobian"},
interface="torch",
)[0]
assert dev.num_executions == 1
spy_execute.assert_called()
spy_gradients.assert_not_called()
res.backward()
spy_gradients.assert_called()
class TestCaching:
"""Test for caching behaviour"""
def test_cache_maxsize(self, mocker):
"""Test the cachesize property of the cache"""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(qml.interfaces.batch, "cache_execute")
def cost(a, cachesize):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.probs(wires=0)
return execute(
[tape], dev, gradient_fn=param_shift, cachesize=cachesize, interface="torch"
)[0][0, 0]
params = torch.tensor([0.1, 0.2], requires_grad=True)
res = cost(params, cachesize=2)
res.backward()
cache = spy.call_args[0][1]
assert cache.maxsize == 2
assert cache.currsize == 2
assert len(cache) == 2
def test_custom_cache(self, mocker):
"""Test the use of a custom cache object"""
dev = qml.device("default.qubit", wires=1)
spy = mocker.spy(qml.interfaces.batch, "cache_execute")
def cost(a, cache):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.probs(wires=0)
return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface="torch")[0][
0, 0
]
custom_cache = {}
params = torch.tensor([0.1, 0.2], requires_grad=True)
res = cost(params, cache=custom_cache)
res.backward()
cache = spy.call_args[0][1]
assert cache is custom_cache
def test_caching_param_shift(self, tol):
"""Test that, with the parameter-shift transform,
Torch always uses the optimum number of evals when computing the Jacobian."""
dev = qml.device("default.qubit", wires=1)
def cost(a, cache):
with qml.tape.JacobianTape() as tape:
qml.RY(a[0], wires=0)
qml.RX(a[1], wires=0)
qml.probs(wires=0)
return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface="torch")[0][
0, 0
]
# Without caching, 5 evaluations are required to compute
# the Jacobian: 1 (forward pass) + (2 shifts * 2 params)
params = torch.tensor([0.1, 0.2], requires_grad=True)
torch.autograd.functional.jacobian(lambda p: cost(p, cache=None), params)
assert dev.num_executions == 5
# With caching, 5 evaluations are required to compute
# the Jacobian: 1 (forward pass) + (2 shifts * 2 params)
dev._num_executions = 0
torch.autograd.functional.jacobian(lambda p: cost(p, cache=True), params)
assert dev.num_executions == 5
@pytest.mark.parametrize("num_params", [2, 3])
def test_caching_param_shift_hessian(self, num_params, tol):
"""Test that, with the parameter-shift transform,
caching reduces the number of evaluations to their optimum
when computing Hessians."""
dev = qml.device("default.qubit", wires=2)
params = torch.tensor(np.arange(1, num_params + 1) / 10, requires_grad=True)
N = len(params)
def cost(x, cache):
with qml.tape.JacobianTape() as tape:
qml.RX(x[0], wires=[0])
qml.RY(x[1], wires=[1])
for i in range(2, num_params):
qml.RZ(x[i], wires=[i % 2])
qml.CNOT(wires=[0, 1])
qml.var(qml.PauliZ(0) @ qml.PauliX(1))
return execute(
[tape], dev, gradient_fn=param_shift, cache=cache, interface="torch", max_diff=2
)[0]
# No caching: number of executions is not ideal
hess1 = torch.autograd.functional.hessian(lambda x: cost(x, cache=None), params)
if num_params == 2:
# compare to theoretical result
x, y, *_ = params.detach()
expected = torch.tensor(
[
[2 * np.cos(2 * x) * np.sin(y) ** 2, np.sin(2 * x) * np.sin(2 * y)],
[np.sin(2 * x) * np.sin(2 * y), -2 * np.cos(x) ** 2 * np.cos(2 * y)],
]
)
assert
|
np.allclose(expected, hess1, atol=tol, rtol=0)
|
numpy.allclose
|
import numpy as np
from scipy.stats import multivariate_normal
class FeatStat:
""" cardinality, mean and observed variance of a set (has __add__())
NOTE: we use the observed variance, not the unbiased estimate (see
discussion of ddof parameter in np.cov and np.cov)
>>> a_set = range(10)
>>> b_set = range(15)
>>> a = FeatStat.from_iter(a_set)
>>> b = FeatStat.from_iter(b_set)
>>> a + b
FeatStat(n=25, mu=[6.], cov=[[16.]])
>>> # validation, explicitly compute via original set
>>> FeatStat.from_iter(list(a_set) + list(b_set))
FeatStat(n=25, mu=[6.], cov=[[16.]])
>>> # test multi dim
>>> np.random.seed(1)
>>> a_array = np.random.rand(2, 10)
>>> b_array = np.random.rand(2, 10)
>>> a = FeatStat.from_array(a_array)
>>> b = FeatStat.from_array(b_array)
>>> a + b == FeatStat.from_array(np.hstack((a_array, b_array)))
True
"""
@property
def d(self):
return self.mu.size
@property
def cov_det(self):
if self.__cov_det is None:
self.__cov_det = np.linalg.det(self.cov)
return self.__cov_det
@property
def cov_inv(self):
""" inverse of covariance, defaults to pseudoinverse if singular
"""
if self.__cov_inv is None:
try:
self.__cov_inv = np.linalg.inv(self.cov)
except np.linalg.linalg.LinAlgError:
self.__cov_inv = np.linalg.pinv(self.cov)
return self.__cov_inv
@property
def outer_mu(self):
if self._outer_mu is None:
self._outer_mu = np.outer(self.mu, self.mu)
return self._outer_mu
def __init__(self, n, mu, cov, _fast=False, outer_mu=None):
# defaults to 'empty' set of features
self.n = n
self.mu = mu
self.cov = cov
self.__cov_det = None
self.__cov_inv = None
self._outer_mu = outer_mu
if _fast:
return
self.n = int(self.n)
self.mu =
|
np.atleast_1d(self.mu)
|
numpy.atleast_1d
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/1/7 4:33 下午
# @Author: zhoumengjie
# @File : jisilu.py
import base64
import logging
import math
import os
import shutil
import time
from datetime import date, datetime, timedelta
from functools import cmp_to_key
import pandas as pd
from wxcloudrun.bond.BondBuilder import BondInfo, BondData, ForceBondInfo
from wxcloudrun.bond.BondBuilder import BondPage
from wxcloudrun.bond.BondBuilder import CompanyInfo
from wxcloudrun.bond.BondUtils import Crawler, format_func
from wxcloudrun.bond import PageTemplate as pt
from wxcloudrun.bond.ImageConverter import ImageConverter
import numpy as np
from wxcloudrun.bond.PageTemplate import PROJECT_DIR
from wxcloudrun.common import mdmaker, preview, pdfutils, shellclient, fingerprinter, akclient
from wxcloudrun.common.chart import ChartClient
# from db.dbclient import SqliteClient
# from db.dbmodels import BondMarketSummaryModel, ApplyBondInfoModel, StockMarketSummaryModel
log = logging.getLogger('log')
crawler = Crawler()
# sqlclient = SqliteClient()
tushare = ChartClient()
briefs = []
filter_concept_names = ['同花顺漂亮100', '转融券标的', '融资融券', '融资标的股',
'融券标的股', '标普道琼斯A股', '长三角一体化', '年报预增',
'沪股通', '深股通', '机构重仓', '北京国资改革', '创业板重组松绑',
'核准制次新股', '新股与次新股', '股权转让', '兜底增持', '科创次新股',
'举牌', '高送转预期']
# trade_open = tushare.is_trade_open()
def build_bond():
rows = crawler.query_apply_list()
if not rows or len(rows) == 0:
return None
bond_page = BondPage()
apply_bonds = []
next_bonds = []
ipo_bonds = []
prepare_bonds = []
applying_bonds = []
today_bonds = []
draw_bonds = []
pass_bonds = []
today = date.today()
tomorrow = (today + timedelta(days=1))
yesterday = (today + timedelta(days=-1))
next_trade_open_day = tushare.next_trade_day(tomorrow)
last_trade_open_day = tushare.last_trade_day(yesterday)
next_open_date = next_trade_open_day.strftime('%Y-%m-%d')
last_open_date = last_trade_open_day.strftime('%Y-%m-%d')
today_str = today.strftime('%Y-%m-%d')
for row in rows:
if row['cb_type'] != '可转债':
continue
if row['apply_date'] == last_open_date:
draw_bonds.append(BondInfo(row))
if row['ap_flag'] == 'D' and row['list_date'] == today_str:
today_bond = BondInfo(row)
today_bonds.append(today_bond)
# 不需要continue
if row['ap_flag'] == 'B' and row['apply_date'] == next_open_date:
apply_bond = BondInfo(row)
apply_bonds.append(apply_bond)
continue
if row['ap_flag'] == 'E' and row['list_date'] == next_open_date:
prepare_bond = BondInfo(row)
prepare_bonds.append(prepare_bond)
continue
if row['ap_flag'] == 'B':
applying_bond = BondInfo(row)
applying_bonds.append(applying_bond)
continue
if row['ap_flag'] == 'N' and row['progress_nm'] == '证监会核准/同意注册':
next_bond = BondInfo(row)
next_bonds.append(next_bond)
continue
if row['ap_flag'] == 'N' and row['progress_nm'] == '发审委通过':
pass_bond = BondInfo(row)
pass_bonds.append(pass_bond)
continue
if row['ap_flag'] == 'C' or row['ap_flag'] == 'E':
ipo_bond = BondInfo(row)
ipo_bonds.append(ipo_bond)
continue
bond_page.apply_bonds = apply_bonds
bond_page.next_bonds = next_bonds
bond_page.ipo_bonds = ipo_bonds
bond_page.prepare_bonds = prepare_bonds
bond_page.applying_bonds = applying_bonds
bond_page.today_bonds = today_bonds
bond_page.draw_bonds = draw_bonds
bond_page.pass_bonds = pass_bonds
return bond_page
def build_company_brief(stock_code):
suff_code = crawler.query_stock_suff(stock_code)
client = ChartClient(False)
company = client.get_company_info(stock_code + "." + suff_code)
if company is None:
log.info("no company data")
return None
return company
def build_company(stock_code):
data = crawler.query_company(stock_code)
company = CompanyInfo(data)
if company is None:
log.info("no company data")
return None
return company
def build_similar_bonds(industry_code):
datas = crawler.query_industry_list(industry_code)
if datas is None:
log.info("no similar bonds")
return None
similar_bonds = []
for data in datas:
cell = data['cell']
similar_bonds.append(BondData(cell))
return similar_bonds
def do_generate_similar(bond:BondData):
line = '**' + bond.bond_name + '**' + '(' + bond.stock_name + '):' + bond.grade + '评级' ',现价是' + format_func(bond.price) + ',转股价值是' + format_func(bond.convert_value) + ',溢价率是' + format_func(bond.premium_rt) + '%' + '\n'
return line
def get_main_concept(stock_code:str) -> str:
df = tushare.concept_detail(stock_code)
concept_names = df['concept_name'].values.tolist()
names = filter(lambda name: name not in filter_concept_names, concept_names)
return '、'.join(names)
def do_generate_prepare_document(prepare:BondInfo, buffers:[], add_finger_print=False, default_estimate_rt=None):
converter = ImageConverter(prepare.bond_code, prepare.bond_name)
doms = converter.save(add_finger_print=add_finger_print)
log.info("save prepare bond image...")
title = pt.CHAPTER_PREPARE_TITLE.replace('{bond_name}', prepare.bond_name) \
.replace("{stock_code}", prepare.stock_code) \
.replace('{bond_code}', prepare.bond_code)
# 查询相似行业
industry_code = doms[1]
industry_text = doms[2]
similar_bonds = build_similar_bonds(industry_code)
log.info('query prepare similar bonds...')
if len(similar_bonds) == 0:
similar_lines = []
# 默认30%的溢价率
estimate_rt = 30.0 if prepare.bond_code not in default_estimate_rt.keys() else round(default_estimate_rt[prepare.bond_code] / round(float(prepare.pma_rt), 2) - 1, 2) * 100
premium_rt = 100.00 - round(float(prepare.pma_rt), 2)
estimate_rt_all = round(estimate_rt / 100, 2) + 1
estimate_amount = round(estimate_rt_all, 2) * round(float(prepare.pma_rt), 2)
else:
estimate_amount, \
estimate_rt, \
estimate_rt_all, \
premium_rt, \
similar_lines = build_estimate_similar(prepare, similar_bonds, default_estimate_rt)
suff = crawler.query_stock_suff(prepare.stock_code)
# chart = ChartClient()
start_date = str(prepare.apply_date).replace('-', '')
end_date = str(prepare.list_date).replace('-', '')
chart_data = tushare.get_daily_image(prepare.stock_code + '.' + suff.upper(), prepare.stock_name, start_date, end_date, add_finger_print)
overview = pt.CHAPTER_PREPARE_OVERVIEW\
.replace('{bond_code}', prepare.bond_code) \
.replace('{pic_base64}', (doms[0]).decode()) \
.replace('{bond_name}', prepare.bond_name) \
.replace('{list_date}', str(prepare.list_date).replace('2022-', ''))\
.replace('{apply_date}', str(prepare.apply_date).replace('2022-', ''))\
.replace('{amount}', format_func(prepare.amount)) \
.replace('{online_amount}', format_func(prepare.online_amount))\
.replace('{grade}', prepare.grade) \
.replace("{valid_apply}", prepare.valid_apply)\
.replace('{ration_rt}', format_func(round(float(prepare.ration_rt), 2))) \
.replace('{lucky_draw_rt}', prepare.lucky_draw_rt) \
.replace('{pma_rt}', format_func(prepare.pma_rt)) \
.replace('{estimate_rt_all}', format_func(round(estimate_rt_all, 2))) \
.replace('{estimate_rt}', format_func(round(estimate_rt, 0)))\
.replace('{estimate_amount}', format_func(round(estimate_amount, 2)))\
.replace('{stock_old_price}', format_func(chart_data[0]))\
.replace('{status}', '涨' if chart_data[0] < chart_data[1] else '跌')\
.replace('{stock_now_price}', format_func(chart_data[1]))\
.replace('{stock_code}', prepare.stock_code)\
.replace('{stock_pic_base64}', (chart_data[2]).decode())\
.replace('{before_benefit}', doms[4])\
.replace('{after_benefit}', doms[5])\
.replace('{down_rate}', doms[6])\
.replace('{redeem_rate}', doms[7])\
.replace('{resale_rate}', doms[8])
# 插入新数据
# if trade_open:
# model = ApplyBondInfoModel(bond_code=prepare.bond_code, bond_name=prepare.bond_name,
# stock_code=prepare.stock_code, stock_name=prepare.stock_name,
# apply_date=prepare.apply_date, grade=prepare.grade, amount=prepare.amount,
# industry_text=industry_text, list_date=prepare.list_date,
# valid_apply=round(float(prepare.valid_apply), 3), lucky_draw_rt=round(float(prepare.lucky_draw_rt), 3))
# sqlclient.update_or_insert_apply_bond_info(model)
# 简评
today = date.today()
tomorrow = (today + timedelta(days=1))
tomorrow_trade_open = tushare.is_trade_open(tomorrow)
next_trade_open_day = tushare.next_trade_day(tomorrow)
briefs.append(pt.CHAPTER_BRIEF_PREPARE_TEXT
.replace('{date}', '明日' if tomorrow_trade_open else next_trade_open_day.strftime('%m%d'))
.replace('{bond_name}', prepare.bond_name)
.replace('{estimate_amount}', format_func(round(estimate_amount, 2))))
company_info = build_company_brief(prepare.stock_code)
log.info("query prepare company info...")
if company_info is None:
return None
# 暂时删除公司简介
# .replace('{gsjj}', company_info['introduction'])\
company = pt.CHAPTER_PREPARE_COMPANY\
.replace('{sshy}', industry_text)\
.replace('{zyyw}', company_info['main_business'])\
.replace('{hxgn}', get_main_concept(prepare.stock_code))
buffers.append(title)
buffers.append(overview)
buffers.append(pt.CHAPTER_PREPARE_TIPS)
buffers.append(company)
if len(similar_lines) != 0:
buffers.extend(do_generate_apply_company_basic_document(prepare, False))
buffers.append(pt.CHAPTER_SIMILAR_TEXT)
for line in similar_lines:
buffers.append(line)
buffers.append('\n')
buffers.append(pt.CHAPTER_NEXT)
return buffers
def do_generate_apply_company_basic_document(apply:BondInfo, is_show:True) -> []:
buffers = []
if is_show:
basic_data = crawler.query_stock_basic_info(apply.stock_code)
list_timestamp = basic_data.get('listTime', 0)
list_date = time.strftime("%Y-%m-%d", time.localtime(list_timestamp / 1000))
share_total = round(basic_data.get('share_total', 0)/100000000, 2)
share_liq = round(basic_data.get('share_liq', 0)/100000000, 2)
total_assets = round(basic_data.get('total_mv', 0), 2)
pe = basic_data['pepbMap']['pe']
pe_content = basic_data['pepbMap']['peContent']
pb = basic_data['pepbMap']['pb']
pb_content = basic_data['pepbMap']['pbContent']
# 基本面
buffers.append(
pt.CHAPTER_COMPANY_BASIC_SUMMARY
.replace('{stock_name}', apply.stock_name)
.replace('{list_date}', list_date)
.replace('{total_share}', format_func(share_total))
.replace('{total_assets}', format_func(total_assets))
.replace('{pe}', format_func(pe))
.replace('{pe_content}', pe_content.replace('\n', '').replace('😊', '').replace('😰', '').replace('行业:', '').replace('历史:', ''))
.replace('{pb}', format_func(pb))
.replace('{pb_content}', pb_content.replace('\n', '').replace('行业:', '').replace('历史:', ''))
)
# 业绩
notice_list = crawler.query_stock_notice(apply.stock_code)
if len(notice_list) > 0:
buffers.append(pt.CHAPTER_COMPANY_BASIC_ACHIEVEMENT)
for data in notice_list:
type = data['type']
# 0 预计披露
if type == 0:
continue
# 2 业绩预告
if type == 2:
buffers.append('<font color="#1E90FF">' + '**' + data['noticeName'] + '预告:' + data['content'] + '**' + '</font>\n')
break
# 1 业绩报告
if type == 1:
buffers.append('<font color="#1E90FF">' + '**' + data['noticeName'] + ':' + data['content'] + '**' + '</font>\n')
break
if is_show:
# 亮点
positive_list = basic_data['comment_new']['positive_new']
if len(positive_list) > 0:
buffers.append(pt.CHAPTER_COMPANY_BASIC_POSITIVE)
for i, data in enumerate(positive_list):
buffers.append(str(i+1) + '.' + data['value'] + '\n')
# 风险点
negative_list = basic_data['comment_new']['unpositive_new']
if len(negative_list) > 0:
buffers.append(pt.CHAPTER_COMPANY_BASIC_NEGATIVE)
for i, data in enumerate(negative_list):
buffers.append(str(i+1) + '.' + data['value'] + '\n')
return buffers
def do_generate_apply_document(apply:BondInfo, buffers:[], add_finger_print=False, default_estimate_rt=None, owner_apply_rate:dict=None):
converter = ImageConverter(apply.bond_code, apply.bond_name)
doms = converter.save(add_finger_print=add_finger_print)
log.info("save apply bond image...")
title = pt.TITLE.replace('{bond_name}', apply.bond_name)\
.replace("{stock_code}", apply.stock_code)\
.replace('{bond_code}', apply.bond_code)
overview = pt.CHAPTER_OVERVIEW\
.replace('{bond_code}', apply.bond_code)\
.replace('{pic_base64}', (doms[0]).decode())\
.replace('{grade}', apply.grade)\
.replace('{scale}', '小' if apply.amount < 10 else ('还行' if apply.amount < 30 else '大'))\
.replace('{amount}', format_func(apply.amount))\
.replace('{stock_name}', apply.stock_name)\
.replace('{price}', format_func(apply.price))\
.replace('{convert_price}', format_func(apply.convert_price))\
.replace('{pma_rt}', format_func(apply.pma_rt))\
.replace('{purpose}', str(doms[3]).replace('\r', '、'))\
.replace('{before_benefit}', doms[4])\
.replace('{after_benefit}', doms[5])\
.replace('{down_rate}', doms[6])\
.replace('{redeem_rate}', doms[7])\
.replace('{resale_rate}', doms[8])
#查询相似行业
industry_code = doms[1]
industry_text = doms[2]
similar_bonds = build_similar_bonds(industry_code)
log.info('query apply similar bonds...')
if len(similar_bonds) == 0:
similar_lines = []
# 默认30%的溢价率
estimate_rt = 30.0 if apply.bond_code not in default_estimate_rt.keys() else default_estimate_rt[apply.bond_code]
premium_rt = 100.00 - round(float(apply.pma_rt), 2)
estimate_rt_all = round(estimate_rt / 100, 2) + 1
estimate_amount = round(estimate_rt_all, 2) * round(float(apply.pma_rt), 2)
else:
estimate_amount, \
estimate_rt, \
estimate_rt_all, \
premium_rt, \
similar_lines = build_estimate_similar(apply, similar_bonds, default_estimate_rt)
# 默认申购股东75%
pre_apply_rt = owner_apply_rate.get(apply.bond_code, 0.75)
owner_apply_amount = apply.amount * (1 - pre_apply_rt)
owner_apply_amount_w = owner_apply_amount * 10000
estimate_lucky_rt = owner_apply_amount_w/1100/1000
if estimate_lucky_rt > 0.5:
lucky_rate = '高'
elif estimate_lucky_rt > 0.1:
lucky_rate = '尚可'
else:
lucky_rate = '低'
company_info = build_company(apply.stock_code)
log.info("query apply company info...")
if company_info is None:
return None
brief_company_info = build_company_brief(apply.stock_code)
company = pt.CHAPTER_COMPANY_TEXT\
.replace('{gsmc}', company_info.gsmc)\
.replace('{sszjhhy}', industry_text)\
.replace('{hxgn}', get_main_concept(apply.stock_code))\
.replace('{jyfw}', str(brief_company_info['main_business']).replace(',', ',').replace('.', '。'))
summary = pt.CHAPTER_SUMMARY\
.replace('{premium_rt}', format_func(round(premium_rt, 2)))\
.replace('{grade}', apply.grade)\
.replace('{estimate_rt}', format_func(round(estimate_rt, 0)))\
.replace('{convert_value}', format_func(round(float(apply.pma_rt), 2)))\
.replace('{estimate_rt_all}', format_func(round(estimate_rt_all, 2)))\
.replace('{estimate_amount}', format_func(round(estimate_amount, 0)))\
.replace('{owner_apply_rate}', format_func(round(pre_apply_rt * 100, 0)))\
.replace('{owner_apply_amount}', format_func(round(owner_apply_amount, 2)))\
.replace('{owner_apply_amount_w}', format_func(round(owner_apply_amount_w, 0)))\
.replace('{estimate_lucky_rt}', format_func(round(estimate_lucky_rt, 3)))\
.replace('{lucky_rate}', lucky_rate)
# 插入新数据
# if trade_open:
# model = ApplyBondInfoModel(bond_code=apply.bond_code, bond_name=apply.bond_name,
# stock_code=apply.stock_code, stock_name=apply.stock_name,
# apply_date=apply.apply_date, grade=apply.grade, amount=apply.amount,
# industry_text=industry_text)
# sqlclient.update_or_insert_apply_bond_info(model)
# 简评
today = date.today()
tomorrow = (today + timedelta(days=1))
next_trade_open_day = tushare.next_trade_day(tomorrow)
tomorrow_trade_open = tushare.is_trade_open(tomorrow)
briefs.append(pt.CHAPTER_BRIEF_APPLY_TEXT
.replace('{date}', '明日' if tomorrow_trade_open else next_trade_open_day.strftime('%m%d'))
.replace('{bond_name}', apply.bond_name)
.replace('{estimate_amount}', format_func(round(estimate_amount, 0))))
buffers.append(title)
buffers.append(overview)
buffers.append(company)
if len(similar_lines) != 0:
# 公司基本面
buffers.extend(do_generate_apply_company_basic_document(apply, True))
buffers.append(pt.CHAPTER_SIMILAR_TEXT)
for line in similar_lines:
buffers.append(line)
buffers.append(summary)
buffers.append(pt.CHAPTER_NEXT)
return buffers
def do_compare(bond1:BondData, bond2:BondData) -> int:
grade_1 = bond1.grade
grade_2 = bond2.grade
score_1 = pt.BOND_GRADE_SCORE_MAP.get(grade_1)
score_2 = pt.BOND_GRADE_SCORE_MAP.get(grade_2)
if score_1 > score_2:
return -1
elif score_1 < score_2:
return 1
else:
return 0
def build_estimate_similar(apply, similar_bonds, default_estimate_rt=None, similar_rank=5):
# 最大溢价率
premium_rts = []
grade_premium_rts = []
for similar in similar_bonds:
if similar.grade == apply.grade:
grade_premium_rts.append(similar.premium_rt)
premium_rts.append(similar.premium_rt)
if apply.bond_code in default_estimate_rt.keys():
estimate_rt = round(default_estimate_rt[apply.bond_code] / round(float(apply.pma_rt), 2) - 1, 2) * 100
log.info("query similar bond, assign_premium rate=" + str(estimate_rt))
elif len(grade_premium_rts) >= 3:
estimate_rt = np.mean(grade_premium_rts)
log.info("query similar bond, grade_premium_rts len=" + str(len(grade_premium_rts)) + '; avg=' + str(np.mean(grade_premium_rts)) + "; middle=" + str(np.median(grade_premium_rts)) + "; max=" + str(np.max(grade_premium_rts)) + "; min=" + str(np.min(grade_premium_rts)))
else:
estimate_rt = np.mean(premium_rts)
log.info("query similar bond, premium_rts len=" + str(len(premium_rts)) + '; avg=' + str(np.mean(premium_rts)) + "; middle=" + str(np.median(premium_rts)) + "; max=" + str(np.max(premium_rts)) + "; min=" + str(
|
np.min(premium_rts)
|
numpy.min
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pymrt.base: generic basic utilities.
"""
# ======================================================================
# :: Future Imports
from __future__ import (
division, absolute_import, print_function, unicode_literals)
# ======================================================================
# :: Python Standard Library Imports
import os # Miscellaneous operating system interfaces
import sys # System-specific parameters and functions
import math # Mathematical functions
import itertools # Functions creating iterators for efficient looping
import functools # Higher-order functions and operations on callable objects
import subprocess # Subprocess management
import fractions # Rational numbers
import csv # CSV File Reading and Writing [CSV: Comma-Separated Values]
import inspect # Inspect live objects
import stat # Interpreting stat() results
import doctest # Test interactive Python examples
import shlex # Simple lexical analysis
import warnings # Warning control
# :: External Imports
import numpy as np # NumPy (multidimensional numerical arrays library)
import scipy as sp # SciPy (signal and image processing library)
# :: External Imports Submodules
import scipy.optimize # SciPy: Optimization Algorithms
import scipy.stats # SciPy: Statistical functions
import scipy.signal # SciPy: Signal Processing
from numpy.fft import fftshift, ifftshift
from scipy.fftpack import fftn, ifftn
# :: Local Imports
from pymrt import VERB_LVL, D_VERB_LVL #, VERB_LVL_NAMES
from pymrt.base import elapsed, print_elapsed
from pymrt import msg, dbg
# ======================================================================
# :: Custom defined constants
# ======================================================================
# :: Default values usable in functions.
COMMENT_TOKEN = '#'
CSV_DELIMITER = '\t'
PNG_INTERVAL = (0.0, 255.0)
EXT = {
'plot': 'png',
'nii': 'nii',
'niz': 'nii.gz',
'text': 'txt',
'tab': 'csv',
'data': 'json',
}
D_TAB_SIZE = 8
# ======================================================================
def _is_hidden(filepath):
"""
Heuristic to determine hidden files.
Args:
filepath (str): the input file path.
Returns:
is_hidden (bool): True if is hidden, False otherwise.
Notes:
Only works with UNIX-like files, relying on prepended '.'.
"""
# if sys.version_info[0] > 2:
# filepath = filepath.encode('utf-8')
# filepath = filepath.decode('utf-8')
return os.path.basename(filepath).startswith('.')
# ======================================================================
def _is_special(stats_mode):
"""
Heuristic to determine non-standard files.
Args:
filepath (str): the input file path.
Returns:
is_special (bool): True if is hidden, False otherwise.
Notes:
Its working relies on Python stat module implementation.
"""
is_special = not stat.S_ISREG(stats_mode) and \
not stat.S_ISDIR(stats_mode) and \
not stat.S_ISLNK(stats_mode)
return is_special
# ======================================================================
def auto_repeat(
obj,
n,
force=False,
check=False):
"""
Automatically repeat the specified object n times.
If the object is not iterable, a tuple with the specified size is returned.
If the object is iterable, the object is left untouched.
Args:
obj: The object to operate with.
n (int): The length of the output object.
force (bool): Force the repetition, even if the object is iterable.
check (bool): Ensure that the object has length n.
Returns:
val (tuple): Returns obj repeated n times.
Raises:
AssertionError: If force is True and the object does not have length n.
Examples:
>>> auto_repeat(1, 3)
(1, 1, 1)
>>> auto_repeat([1], 3)
[1]
>>> auto_repeat([1, 3], 2)
[1, 3]
>>> auto_repeat([1, 3], 2, True)
([1, 3], [1, 3])
>>> auto_repeat([1, 2, 3], 2, True, True)
([1, 2, 3], [1, 2, 3])
>>> auto_repeat([1, 2, 3], 2, False, True)
Traceback (most recent call last):
...
AssertionError
"""
try:
iter(obj)
except TypeError:
force = True
finally:
if force:
obj = (obj,) * n
if check:
assert (len(obj) == n)
return obj
def max_iter_len(items):
"""
Determine the maximum length of an item within a collection of items.
Args:
items (iterable): The collection of items to inspect.
Returns:
num (int): The maximum length of the collection.
"""
num = 1
for val in items:
try:
iter(val)
except TypeError:
pass
else:
num = max(len(val), num)
return num
# ======================================================================
def is_prime(num):
"""
Determine if num is a prime number.
A prime number is only divisible by 1 and itself.
0 and 1 are considered special cases; in this implementations they are
considered primes.
It is implemented by directly testing for possible factors.
Args:
num (int): The number to check for primality.
Only works for numbers larger than 1.
Returns:
is_divisible (bool): The result of the primality.
Examples:
>>> is_prime(100)
False
>>> is_prime(101)
True
>>> is_prime(-100)
False
>>> is_prime(-101)
True
>>> is_prime(2 ** 17)
False
>>> is_prime(17 * 19)
False
>>> is_prime(2 ** 17 - 1)
True
>>> is_prime(0)
True
>>> is_prime(1)
True
"""
num = abs(num)
if num % 2 == 0 and num > 2:
return False
for i in range(3, int(num ** 0.5) + 1, 2):
if num % i == 0:
return False
return True
# # alternate implementation
# is_divisible = num == 1 or num != 2 and num % 2 == 0
# i = 3
# while not is_divisible and i * i < num:
# is_divisible = num % i == 0
# # only odd factors needs to be tested
# i += 2
# return not is_divisible
# ======================================================================
def primes_in_range(
stop,
start=2):
"""
Calculate the prime numbers in the range.
Args:
stop (int): The final value of the range.
This value is excluded.
If stop < start the values are switched.
start (int): The initial value of the range.
This value is included.
If start > stop the values are switched.
Yields:
num (int): The next prime number.
Examples:
>>> list(primes_in_range(50))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> list(primes_in_range(101, 150))
[101, 103, 107, 109, 113, 127, 131, 137, 139, 149]
>>> list(primes_in_range(1000, 1050))
[1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049]
>>> list(primes_in_range(1050, 1000))
[1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049]
"""
if start > stop:
start, stop = stop, start
if start % 2 == 0:
if start == 2:
yield start
start += 1
for num in range(start, stop, 2):
if is_prime(num):
yield num
# ======================================================================
def get_primes(num=2):
"""
Calculate prime numbers.
Args:
num (int): The initial value
Yields:
num (int): The next prime number.
Examples:
>>> n = 15
>>> primes = get_primes()
>>> [next(primes) for i in range(n)] # first n prime numbers
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> n = 10
>>> primes = get_primes(101)
>>> [next(primes) for i in range(n)] # first n primes larger than 1000
[101, 103, 107, 109, 113, 127, 131, 137, 139, 149]
>>> n = 10
>>> primes = get_primes(1000)
>>> [next(primes) for i in range(n)] # first n primes larger than 1000
[1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061]
"""
while num <= 2:
if is_prime(num):
yield num
num += 1
if num % 2 == 0:
num += 1
while True:
if is_prime(num):
yield num
num += 2
# ======================================================================
def factorize(num):
"""
Find all factors of a number.
Args:
num (int): The number to factorize.
Returns:
numbers (list[int]): The factors of number.
Example:
>>> n = 100
>>> f = factorize(n)
>>> print(f)
[2, 2, 5, 5]
>>> n == np.prod(f)
True
>>> n= 1234567890
>>> f = factorize(n)
>>> print(f)
[2, 3, 3, 5, 3607, 3803]
"""
factors = []
primes = get_primes()
prime = next(primes)
while prime * prime <= num:
while num % prime == 0:
num //= prime
factors.append(prime)
prime = next(primes)
if num > 1:
factors.append(num)
return factors
# =====================================================================
def optimal_ratio(
num,
condition=None):
"""
Find the optimal ratio for arranging elements into a matrix.
Args:
num (int): The number of elements to arrange.
condition (callable): The optimality condition to use.
This is passed as the `key` argument of `sorted`.
Returns:
num1 (int): The first number (num1 > num2).
num2 (int): The second number (num2 < num1).
Examples:
>>> n1, n2 = 40, 48
>>> [optimal_ratio(i) for i in range(n1, n2)]
[(8, 5), (41, 1), (7, 6), (43, 1), (11, 4), (9, 5), (23, 2), (47, 1)]
>>> [optimal_ratio(i, max) for i in range(n1, n2)]
[(8, 5), (41, 1), (7, 6), (43, 1), (11, 4), (9, 5), (23, 2), (47, 1)]
>>> [optimal_ratio(i, min) for i in range(n1, n2)]
[(20, 2), (41, 1), (21, 2), (43, 1), (22, 2), (15, 3), (23, 2), (47, 1)]
"""
ratios = []
if is_prime(num):
return num, 1
else:
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
ratios.append((num // i, i))
return sorted(ratios, key=condition)[0]
# =====================================================================
def gcd(*nums):
"""
Find the greatest common divisor (GCD) of a list of numbers.
Args:
*nums (tuple[int]): The input numbers.
Returns:
gcd_val (int): The value of the greatest common divisor (GCD).
Examples:
>>> gcd(12, 24, 18)
6
>>> gcd(12, 24, 18, 42, 600, 66, 666, 768)
6
>>> gcd(12, 24, 18, 42, 600, 66, 666, 768, 101)
1
>>> gcd(12, 24, 18, 3)
3
"""
gcd_val = nums[0]
for num in nums[1:]:
gcd_val = math.gcd(gcd_val, num)
return gcd_val
# ======================================================================
def lcm(*nums):
"""
Find the least common multiple (LCM) of a list of numbers.
Args:
*numbers (tuple[int]): The input numbers.
Returns:
gcd_val (int): The value of the least common multiple (LCM).
Examples:
>>> lcm(2, 3, 4)
12
>>> lcm(9, 8)
72
>>> lcm(12, 23, 34, 45, 56)
985320
"""
lcm_val = nums[0]
for num in nums[1:]:
lcm_val = lcm_val * num // fractions.gcd(lcm_val, num)
return lcm_val
# ======================================================================
def merge_dicts(*dicts):
"""
Merge dictionaries into a new dict (new keys overwrite the old ones).
Args:
dicts (tuple[dict]): Dictionaries to be merged together.
Returns:
merged (dict): The merged dict (new keys overwrite the old ones).
Examples:
>>> d1 = {1: 2, 3: 4, 5: 6}
>>> d2 = {2: 1, 4: 3, 6: 5}
>>> d3 = {1: 1, 3: 3, 6: 5}
>>> dd = merge_dicts(d1, d2)
>>> print(tuple(sorted(dd.items())))
((1, 2), (2, 1), (3, 4), (4, 3), (5, 6), (6, 5))
>>> dd = merge_dicts(d1, d3)
>>> print(tuple(sorted(dd.items())))
((1, 1), (3, 3), (5, 6), (6, 5))
"""
merged = {}
for item in dicts:
merged.update(item)
return merged
# ======================================================================
def accumulate(
items,
func=lambda x, y: x + y):
"""
Cumulatively apply the specified function to the elements of the list.
Args:
items (iterable): The items to process.
func (callable): func(x,y) -> z
The function applied cumulatively to the first n items of the list.
Defaults to cumulative sum.
Returns:
lst (list): The cumulative list.
See Also:
itertools.accumulate.
Examples:
>>> accumulate(list(range(5)))
[0, 1, 3, 6, 10]
>>> accumulate(list(range(5)), lambda x, y: (x + 1) * y)
[0, 1, 4, 15, 64]
>>> accumulate([1, 2, 3, 4, 5, 6, 7, 8], lambda x, y: x * y)
[1, 2, 6, 24, 120, 720, 5040, 40320]
"""
return [
functools.reduce(func, list(items)[:idx + 1])
for idx in range(len(items))]
# ======================================================================
def multi_replace(
text,
replaces):
"""
Perform multiple replacements in a string.
Args:
text (str): The input string.
replaces (tuple[str,str]): The listing of the replacements.
Format: ((<old>, <new>), ...).
Returns:
text (str): The string after the performed replacements.
Examples:
>>> multi_replace('python.best', (('thon', 'mrt'), ('est', 'ase')))
'pymrt.base'
>>> multi_replace('x-x-x-x', (('x', 'est'), ('est', 'test')))
'test-test-test-test'
>>> multi_replace('x-x-', (('-x-', '.test'),))
'x.test'
"""
return functools.reduce(lambda s, r: s.replace(*r), replaces, text)
# ======================================================================
def common_substr_2(
seq1,
seq2,
sorting=None):
"""
Find the longest common consecutive subsequence(s).
This version works for two iterables.
This is known as the `longest common substring` problem, or LCS for short.
Args:
seq1 (iterable): The first input sequence.
Must be of the same type as seq2.
seq2 (iterable): The second input sequence.
Must be of the same type as seq1.
sorting (callable): Sorting function passed to 'sorted' via `key` arg.
Returns:
commons (list[iterable]): The longest common subsequence(s).
Examples:
>>> common_substr_2('academy', 'abracadabra')
['acad']
>>> common_substr_2('los angeles', 'lossless')
['los', 'les']
>>> common_substr_2('los angeles', 'lossless', lambda x: x)
['les', 'los']
>>> common_substr_2((1, 2, 3, 4, 5), (0, 1, 2))
[(1, 2)]
"""
# note: [[0] * (len(seq2) + 1)] * (len(seq1) + 1) will not work!
counter = [[0 for j in range(len(seq2) + 1)] for i in range(len(seq1) + 1)]
longest = 0
commons = []
for i, item in enumerate(seq1):
for j, jtem in enumerate(seq2):
if item == jtem:
tmp = counter[i][j] + 1
counter[i + 1][j + 1] = tmp
if tmp > longest:
commons = []
longest = tmp
commons.append(seq1[i - tmp + 1:i + 1])
elif tmp == longest:
commons.append(seq1[i - tmp + 1:i + 1])
if sorting is None:
return commons
else:
return sorted(commons, key=sorting)
# ======================================================================
def common_substr(
seqs,
sorting=None):
"""
Find the longest common consecutive subsequence(s).
This version works for an iterable of iterables.
This is known as the `longest common substring` problem, or LCS for short.
Args:
seqs (iterable[iterable]): The input sequences.
All the items must be of the same type.
sorting (callable): Sorting function passed to 'sorted' via `key` arg.
Returns:
commons (list[iterable]): The longest common subsequence(s).
Examples:
>>> common_substr(['academy', 'abracadabra', 'cadet'])
['cad']
>>> common_substr(['los angeles', 'lossless', 'les alos'])
['los', 'les']
>>> common_substr(['los angeles', 'lossless', 'les alos', 'losles'])
['los', 'les']
>>> common_substr(['los angeles', 'lossless', 'dolos'])
['los']
>>> common_substr([(1, 2, 3, 4, 5), (1, 2, 3), (0, 1, 2)])
[(1, 2)]
"""
commons = [seqs[0]]
for text in seqs[1:]:
tmps = []
for common in commons:
tmp = common_substr_2(common, text, sorting)
if len(tmps) == 0 or len(tmp[0]) == len(tmps[0]):
tmps.extend(common_substr_2(common, text, sorting))
commons = tmps
return commons
# ======================================================================
def set_keyword_parameters(
func,
values):
"""
Set keyword parameters of a function to specific or default values.
Args:
func (callable): The function to be inspected.
values (dict): The (key, value) pairs to set.
If a value is None, it will be replaced by the default value.
To use the names defined locally, use: `locals()`.
Results:
kw_params (dict): A dictionary of the keyword parameters to set.
See Also:
inspect.getargspec, locals, globals.
"""
# todo: refactor to get rid of deprecated getargspec
inspected = inspect.getargspec(func)
defaults = dict(
zip(reversed(inspected.args), reversed(inspected.defaults)))
kw_params = {}
for key in inspected.args:
if key in values:
kw_params[key] = values[key]
elif key in defaults:
kw_params[key] = defaults[key]
return kw_params
# ======================================================================
def mdot(*arrs):
"""
Cumulative application of multiple `numpy.dot` operation.
Args:
*arrs (tuple[ndarray]): Tuple of input arrays.
Returns:
arr (np.ndarray): The result of the tensor product.
Examples:
>>>
"""
arr = arrs[0]
for item in arrs[1:]:
arr = np.dot(arr, item)
return arr
# ======================================================================
def ndot(
arr,
dim=-1,
step=1):
"""
Cumulative application of `numpy.dot` operation over a given axis.
Args:
arr (np.ndarray): The input array.
Returns:
prod (np.ndarray): The result of the tensor product.
Examples:
>>>
"""
if dim < 0:
dim += arr.ndim
start = 0 if step > 0 else arr.shape[dim] - 1
stop = arr.shape[dim] if step > 0 else -1
prod = arr[
[slice(None) if j != dim else start for j in range(arr.ndim)]]
for i in range(start, stop, step)[1:]:
indexes = [slice(None) if j != dim else i for j in range(arr.ndim)]
prod = np.dot(prod, arr[indexes])
return prod
def commutator(a, b):
"""
Calculate the commutator of two arrays: [A,B] = AB - BA
Args:
a (np.ndarray): The first operand
b (np.ndarray): The second operand
Returns:
c (np.ndarray): The operation result
"""
return a.dot(b) - b.dot(a)
def anticommutator(a, b):
"""
Calculate the anticommutator of two arrays: [A,B] = AB + BA
Args:
a (np.ndarray): The first operand
b (np.ndarray): The second operand
Returns:
c (np.ndarray): The operation result
"""
return a.dot(b) + b.dot(a)
# ======================================================================
def walk2(
base,
follow_links=False,
follow_mounts=False,
allow_special=False,
allow_hidden=True,
max_depth=-1,
on_error=None):
"""
Recursively walk through sub paths of a base directory
Args:
base (str): directory where to operate
follow_links (bool): follow links during recursion
follow_mounts (bool): follow mount points during recursion
allow_special (bool): include special files
allow_hidden (bool): include hidden files
max_depth (int): maximum depth to reach. Negative for unlimited
on_error (callable): function to call on error
Returns:
path (str): path to the next object
stats (stat_result): structure containing file stats information
"""
# def _or_not_and(flag, check):
# return flag or not flag and check
def _or_not_and_not(flag, check):
return flag or not flag and not check
try:
for name in os.listdir(base):
path = os.path.join(base, name)
stats = os.stat(path)
mode = stats.st_mode
# for some reasons, stat.S_ISLINK and os.path.islink results differ
allow = \
_or_not_and_not(follow_links, os.path.islink(path)) and \
_or_not_and_not(follow_mounts, os.path.ismount(path)) and \
_or_not_and_not(allow_special, _is_special(mode)) and \
_or_not_and_not(allow_hidden, _is_hidden(path))
if allow:
yield path, stats
if os.path.isdir(path):
if max_depth != 0:
next_level = walk2(
path, follow_links, follow_mounts,
allow_special, allow_hidden, max_depth - 1,
on_error)
for next_path, next_stats in next_level:
yield next_path, next_stats
except OSError as error:
if on_error is not None:
on_error(error)
return
# ======================================================================
def which(args):
"""
Determine the full path of an executable, if possible.
It mimics the behavior of the POSIX command `which`.
Args:
args (str|list[str]): Command to execute as a list of tokens.
Optionally can accept a string which will be tokenized.
Returns:
args (list[str]): Command to execute as a list of tokens.
The first item of the list is the full path of the executable.
If the executable is not found in path, returns the first token of
the input.
Other items are identical to input, if the input was a str list.
Otherwise it will be the tokenized version of the passed string,
except for the first token.
is_valid (bool): True if path of executable is found, False otherwise.
"""
def is_executable(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
# ensure args in the correct format
try:
args = shlex.split(args)
except AttributeError:
pass
cmd = os.path.expanduser(args[0])
dirpath, filename = os.path.split(cmd)
if dirpath:
is_valid = is_executable(cmd)
else:
is_valid = False
for dirpath in os.environ['PATH'].split(os.pathsep):
dirpath = dirpath.strip('"')
tmp = os.path.join(dirpath, cmd)
is_valid = is_executable(tmp)
if is_valid:
cmd = tmp
break
return [cmd] + args[1:], is_valid
# ======================================================================
def execute(
args,
in_pipe=None,
mode='call',
timeout=None,
encoding='utf-8',
log=None,
dry=False,
verbose=D_VERB_LVL):
"""
Execute command and retrieve/print output at the end of execution.
Args:
args (str|list[str]): Command to execute as a list of tokens.
Optionally can accept a string.
in_pipe (str|None): Input data to be used as stdin of the process.
mode (str): Set the execution mode (affects the return values).
Allowed modes:
- 'spawn': Spawn a new process. stdout and stderr will be lost.
- 'call': Call new process and wait for execution.
Once completed, obtain the return code, stdout, and stderr.
- 'flush': Call new process and get stdout+stderr immediately.
Once completed, obtain the return code.
Unfortunately, there is no easy
timeout (float): Timeout of the process in seconds.
encoding (str): The encoding to use.
log (str): The template filename to be used for logs.
If None, no logs are produced.
dry (bool): Print rather than execute the command (dry run).
verbose (int): Set level of verbosity.
Returns:
ret_code (int|None): if mode not `spawn`, return code of the process.
p_stdout (str|None): if mode not `spawn`, the stdout of the process.
p_stderr (str|None): if mode is `call`, the stderr of the process.
"""
ret_code, p_stdout, p_stderr = None, None, None
args, is_valid = which(args)
if is_valid:
msg('{} {}'.format('$$' if dry else '>>', ' '.join(args)),
verbose, D_VERB_LVL if dry else VERB_LVL['medium'])
else:
msg('W: `{}` is not in available in $PATH.'.format(args[0]))
if not dry and is_valid:
if in_pipe is not None:
msg('< {}'.format(in_pipe),
verbose, VERB_LVL['highest'])
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE if in_pipe and not mode == 'flush' else None,
stdout=subprocess.PIPE if mode != 'spawn' else None,
stderr=subprocess.PIPE if mode == 'call' else subprocess.STDOUT,
shell=False)
# handle stdout nd stderr
if mode == 'flush' and not in_pipe:
p_stdout = ''
while proc.poll() is None:
out_buff = proc.stdout.readline().decode(encoding)
p_stdout += out_buff
msg(out_buff, fmt='', end='')
sys.stdout.flush()
ret_code = proc.wait()
elif mode == 'call':
# try:
p_stdout, p_stderr = proc.communicate(
in_pipe.encode(encoding) if in_pipe else None)
# except subprocess.TimeoutExpired:
# proc.kill()
# p_stdout, p_stderr = proc.communicate()
p_stdout = p_stdout.decode(encoding)
p_stderr = p_stderr.decode(encoding)
if p_stdout:
msg(p_stdout, verbose, VERB_LVL['high'], fmt='')
if p_stderr:
msg(p_stderr, verbose, VERB_LVL['high'], fmt='')
ret_code = proc.wait()
else:
proc.kill()
msg('E: mode `{}` and `in_pipe` not supported.'.format(mode))
if log:
name = os.path.basename(args[0])
pid = proc.pid
for stream, source in ((p_stdout, 'out'), (p_stderr, 'err')):
if stream:
log_filepath = log.format_map(locals())
with open(log_filepath, 'wb') as fileobj:
fileobj.write(stream.encode(encoding))
return ret_code, p_stdout, p_stderr
# ======================================================================
def grouping(
items,
num_elems):
"""
Generate a list of lists from a source list and grouping specifications
Args:
items (iterable): The source list.
num_elems (iterable[int]): number of elements that each group contains.
Returns:
groups (list[list]): Grouped elements from the source list.
Examples:
>>> l = list(range(10))
>>> grouping(l, 4)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
>>> grouping(l, (2, 3))
[[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]]
>>> grouping(l, (2, 4, 1))
[[0, 1], [2, 3, 4, 5], [6], [7, 8, 9]]
>>> grouping(l, (2, 4, 1, 20))
[[0, 1], [2, 3, 4, 5], [6], [7, 8, 9]]
"""
if isinstance(num_elems, int):
num_elems = auto_repeat(num_elems, len(items) // num_elems)
group, groups = [], []
j = 0
count = num_elems[j] if j < len(num_elems) else len(items) + 1
for i, item in enumerate(items):
if i >= count:
loop = True
while loop:
groups.append(group)
group = []
j += 1
add = num_elems[j] if j < len(num_elems) else len(items) + 1
if add < 0:
add = len(items) + 1
count += add
if add == 0:
loop = True
else:
loop = False
group.append(item)
groups.append(group)
return groups
# ======================================================================
def realpath(path):
"""
Get the expanded absolute path from its short or relative counterpart.
Args:
path (str): The path to expand.
Returns:
new_path (str): the expanded path.
Raises:
OSError: if the expanded path does not exists.
"""
new_path = os.path.abspath(os.path.realpath(os.path.expanduser(path)))
if not os.path.exists(new_path):
raise (OSError)
return new_path
# ======================================================================
def listdir(
path,
file_ext='',
pattern=slice(None, None, None),
full_path=True,
verbose=D_VERB_LVL):
"""
Retrieve a sorted list of files matching specified extension and pattern.
Args:
path (str): Path to search.
file_ext (str|None): File extension. Empty string for all files.
None for directories.
pattern (slice): Selection pattern (assuming alphabetical ordering).
full_path (bool): Include the full path.
verbose (int): Set level of verbosity.
Returns:
list[str]: List of file names/paths
"""
if file_ext is None:
msg('Scanning for DIRS on:\n{}'.format(path),
verbose, VERB_LVL['debug'])
filepaths = [
os.path.join(path, filename) if full_path else filename
for filename in os.listdir(path)
if os.path.isdir(os.path.join(path, filename))]
else:
msg('Scanning for `{}` on:\n{}'.format(file_ext, path),
verbose, VERB_LVL['debug'])
# extracts only those ending with specific file_ext
filepaths = [
os.path.join(path, filename) if full_path else filename
for filename in os.listdir(path)
if filename.lower().endswith(file_ext.lower())]
return sorted(filepaths)[pattern]
# ======================================================================
def add_extsep(ext):
"""
Add a extsep char to a filename extension, if it does not have one.
Args:
ext (str): Filename extension to which the dot has to be added.
Returns:
ext (str): Filename extension with a prepending dot.
Examples:
>>> add_extsep('txt')
'.txt'
>>> add_extsep('.txt')
'.txt'
>>> add_extsep('')
'.'
"""
if not ext:
ext = ''
if not ext.startswith(os.path.extsep):
ext = os.path.extsep + ext
return ext
# ======================================================================
def change_ext(
filepath,
new_ext,
old_ext=None,
case_sensitive=False):
"""
Substitute the old extension with a new one in a filepath.
Args:
filepath (str): Input filepath.
new_ext (str): The new extension (with or without the dot).
old_ext (str|None): The old extension (with or without the dot).
If None, it will be obtained from os.path.splitext.
case_sensitive (bool): Case-sensitive match of old extension.
If old_ext is None or empty, it has no effect.
Returns:
filepath (str): Output filepath
Examples:
>>> change_ext('test.txt', 'dat', 'txt')
'test.dat'
>>> change_ext('test.txt', '.dat', 'txt')
'test.dat'
>>> change_ext('test.txt', '.dat', '.txt')
'test.dat'
>>> change_ext('test.txt', 'dat', '.txt')
'test.dat'
>>> change_ext('test.txt', 'dat', 'TXT', False)
'test.dat'
>>> change_ext('test.txt', 'dat', 'TXT', True)
'test.txt.dat'
>>> change_ext('test.tar.gz', 'tgz')
'test.tar.tgz'
>>> change_ext('test.tar.gz', 'tgz', 'tar.gz')
'test.tgz'
>>> change_ext('test.tar', 'gz', '')
'test.tar.gz'
>>> change_ext('test.tar', 'gz', None)
'test.gz'
"""
if old_ext is None:
filepath, old_ext = os.path.splitext(filepath)
else:
old_ext = add_extsep(old_ext)
if not case_sensitive:
true_old_ext = filepath.lower().endswith(old_ext.lower())
else:
true_old_ext = filepath.endswith(old_ext)
if true_old_ext:
filepath = filepath[:-len(old_ext)]
if new_ext:
filepath += add_extsep(new_ext)
return filepath
# ======================================================================
def compact_num_str(
val,
max_lim=D_TAB_SIZE - 1):
"""
Convert a number into the most informative string within specified limit.
Args:
val (int|float): The number to be converted to string.
max_lim (int): The maximum number of characters allowed for the string.
Returns:
val_str (str): The string with the formatted number.
Examples:
>>> compact_num_str(100.0, 3)
'100'
>>> compact_num_str(100.042, 6)
'100.04'
>>> compact_num_str(100.042, 9)
'100.04200'
"""
try:
# this is to simplify formatting (and accepting even strings)
val = float(val)
# helpers
extra_char_in_exp = 5
extra_char_in_dec = 2
extra_char_in_sign = 1
# 'order' of zero is 1 for our purposes, because needs 1 char
order = np.log10(abs(val)) if abs(val) > 0.0 else 1
# adjust limit for sign
limit = max_lim - extra_char_in_sign if val < 0.0 else max_lim
# perform the conversion
if order > float(limit) or order < -float(extra_char_in_exp - 1):
limit -= extra_char_in_exp + 1
val_str = '{:.{size}e}'.format(val, size=limit)
elif -float(extra_char_in_exp - 1) <= order < 0.0:
limit -= extra_char_in_dec
val_str = '{:.{size}f}'.format(val, size=limit)
elif val % 1.0 == 0.0:
# currently, no distinction between int and float is made
limit = 0
val_str = '{:.{size}f}'.format(val, size=limit)
else:
limit -= (extra_char_in_dec + int(order))
if limit < 0:
limit = 0
val_str = '{:.{size}f}'.format(val, size=limit)
except (TypeError, ValueError):
warnings.warn('Could not convert value `{}` to float'.format(val))
val_str = 'NaN'
return val_str
# ======================================================================
def has_decorator(
text,
pre_decor='"',
post_decor='"'):
"""
Determine if a string is delimited by some characters (decorators).
Args:
text (str): The text input string.
pre_decor (str): initial string decorator.
post_decor (str): final string decorator.
Returns:
has_decorator (bool): True if text is delimited by the specified chars.
Examples:
>>> has_decorator('"test"')
True
>>> has_decorator('"test')
False
>>> has_decorator('<test>', '<', '>')
True
"""
return text.startswith(pre_decor) and text.endswith(post_decor)
# ======================================================================
def strip_decorator(
text,
pre_decor='"',
post_decor='"'):
"""
Strip initial and final character sequences (decorators) from a string.
Args:
text (str): The text input string.
pre_decor (str): initial string decorator.
post_decor (str): final string decorator.
Returns:
text (str): the text without the specified decorators.
Examples:
>>> strip_decorator('"test"')
'test'
>>> strip_decorator('"test')
'test'
>>> strip_decorator('<test>', '<', '>')
'test'
"""
begin = len(pre_decor) if text.startswith(pre_decor) else None
end = -len(post_decor) if text.endswith(post_decor) else None
return text[begin:end]
# ======================================================================
def auto_convert(
text,
pre_decor=None,
post_decor=None):
"""
Convert value to numeric if possible, or strip delimiters from string.
Args:
text (str): The text input string.
pre_decor (str): initial string decorator.
post_decor (str): final string decorator.
Returns:
val (int|float|complex): The numeric value of the string.
Examples:
>>> auto_convert('<100>', '<', '>')
100
>>> auto_convert('<100.0>', '<', '>')
100.0
>>> auto_convert('100.0+50j')
(100+50j)
>>> auto_convert('1e3')
1000.0
"""
if pre_decor and post_decor and \
has_decorator(text, pre_decor, post_decor):
text = strip_decorator(text, pre_decor, post_decor)
try:
val = int(text)
except (TypeError, ValueError):
try:
val = float(text)
except (TypeError, ValueError):
try:
val = complex(text)
except (TypeError, ValueError):
val = text
return val
# ======================================================================
def is_number(var):
"""
Determine if a variable contains a number.
Args:
var (str): The variable to test.
Returns:
result (bool): True if the values can be converted, False otherwise.
Examples:
>>> is_number('<100.0>')
False
>>> is_number('100.0+50j')
True
>>> is_number('1e3')
True
"""
try:
complex(var)
except (TypeError, ValueError):
result = False
else:
result = True
return result
# ======================================================================
def guess_decimals(
val,
n_max=16,
base=10,
fp=16):
"""
Guess the number of decimals in a given float number.
Args:
val ():
n_max (int): Maximum number of guessed decimals.
base (int): The base used for the number representation.
fp (int): The floating point maximum precision.
A number with precision is approximated by the underlying platform.
The default value corresponds to the limit of the IEEE-754 floating
point arithmetic, i.e. 53 bits of precision: log10(2 ** 53) = 16
approximately. This value should not be changed unless the
underlying platform follows a different floating point arithmetic.
Returns:
prec (int): the guessed number of decimals.
Examples:
>>> guess_decimals(10)
0
>>> guess_decimals(1)
0
>>> guess_decimals(0.1)
1
>>> guess_decimals(0.01)
2
>>> guess_decimals(0.000001)
6
>>> guess_decimals(-0.72)
2
>>> guess_decimals(0.9567)
4
>>> guess_decimals(0.12345678)
8
>>> guess_decimals(0.9999999999999)
13
>>> guess_decimals(0.1234567890123456)
16
>>> guess_decimals(0.9999999999999999)
16
>>> guess_decimals(0.1234567890123456, 6)
6
>>> guess_decimals(0.54235, 10)
5
>>> guess_decimals(0x654321 / 0x10000, 16, 16)
4
"""
offset = 2
prec = 0
tol = 10 ** -fp
x = (val - int(val)) * base
while base - abs(x) > tol and abs(x % tol) < tol < abs(x) and prec < n_max:
x = (x - int(x)) * base
tol = 10 ** -(fp - prec - offset)
prec += 1
return prec
# ======================================================================
def significant_figures(
val,
num):
"""
Format a number with the correct number of significant figures.
Args:
val (str|float|int): The numeric value to be correctly formatted.
num (str|int): The number of significant figures to be displayed.
Returns:
val (str): String containing the properly formatted number.
Examples:
>>> significant_figures(1.2345, 1)
'1'
>>> significant_figures(1.2345, 4)
'1.234'
>>> significant_figures(1.234e3, 2)
'1.2e+03'
>>> significant_figures(-1.234e3, 3)
'-1.23e+03'
>>> significant_figures(12345678, 4)
'1.235e+07'
See Also:
The 'decimal' Python standard module.
"""
val = float(val)
num = int(num)
order = int(np.floor(np.log10(abs(val)))) if abs(val) != 0.0 else 0
dec = num - order - 1 # if abs(order) < abs(num) else 0
typ = 'f' if order < num else 'g'
prec = dec if order < num else num
# print('val={}, num={}, ord={}, dec={}, typ={}, prec={}'.format(
# val, num, order, dec, typ, prec)) # DEBUG
val = '{:.{prec}{typ}}'.format(round(val, dec), prec=prec, typ=typ)
return val
# ======================================================================
def format_value_error(
val,
err,
num=2):
"""
Write correct value/error pairs.
Args:
val (str|float|int): The numeric value to be correctly formatted.
err (str|float|int): The numeric error to be correctly formatted.
num (str|int): The precision to be used for the error (usually 1 or 2).
Returns:
val_str (str): The string with the correctly formatted numeric value.
err_str (str): The string with the correctly formatted numeric error.
Examples:
>>> format_value_error(1234.5, 6.7)
('1234.5', '6.7')
>>> format_value_error(123.45, 6.7, 1)
('123', '7')
>>> format_value_error(12345.6, 7.89, 2)
('12345.6', '7.9')
>>> format_value_error(12345.6, 78.9, 2)
('12346', '79')
"""
val = float(val)
err = float(err)
num = int(num)
val_order = np.ceil(np.log10(np.abs(val))) if val != 0 else 0
err_order = np.ceil(np.log10(np.abs(err))) if val != 0 else 0
try:
val_str = significant_figures(val, val_order - err_order + num)
err_str = significant_figures(err, num)
except ValueError:
val_str = str(val)
err_str = str(err)
return val_str, err_str
# ======================================================================
def str2dict(
in_str,
entry_sep=',',
key_val_sep='=',
pre_decor='{',
post_decor='}',
strip_key_str=None,
strip_val_str=None,
convert=True):
"""
Convert a string to a dictionary.
Args:
in_str (str): The input string.
entry_sep (str): The entry separator.
key_val_sep (str): The key-value separator.
pre_decor (str): initial decorator (to be removed before parsing).
post_decor (str): final decorator (to be removed before parsing).
strip_key_str (str): Chars to be stripped from both ends of the key.
If None, whitespaces are stripped. Empty string for no stripping.
strip_val_str (str): Chars to be stripped from both ends of the value.
If None, whitespaces are stripped. Empty string for no stripping.
convert (bool): Enable automatic conversion of string to numeric.
Returns:
out_dict (dict): The output dictionary generated from the string.
Examples:
>>> d = str2dict('{a=10,b=20,c=test}')
>>> for k in sorted(d.keys()): print(k, ':', d[k]) # display dict
a : 10
b : 20
c : test
See Also:
dict2str
"""
if has_decorator(in_str, pre_decor, post_decor):
in_str = strip_decorator(in_str, pre_decor, post_decor)
entries = in_str.split(entry_sep)
out_dict = {}
for entry in entries:
# fetch entry
key_val = entry.split(key_val_sep)
# parse entry
if len(key_val) == 1:
key, val = key_val[0], None
elif len(key_val) == 2:
key, val = key_val
val = val.strip(strip_val_str)
elif len(key_val) > 2:
key, val = key_val[0], key_val[1:]
val = [tmp_val.strip(strip_val_str) for tmp_val in val]
else:
key = None
# strip dict key
key = key.strip(strip_key_str)
# add to dictionary
if key:
if convert:
val = auto_convert(val)
out_dict[key] = val
return out_dict
# ======================================================================
def dict2str(
in_dict,
entry_sep=',',
key_val_sep='=',
pre_decor='{',
post_decor='}',
strip_key_str=None,
strip_val_str=None,
sorting=None):
"""
Convert a dictionary to a string.
Args:
in_dict (dict): The input dictionary.
entry_sep (str): The entry separator.
key_val_sep (str): The key-value separator.
pre_decor (str): initial decorator (to be appended to the output).
post_decor (str): final decorator (to be appended to the output).
strip_key_str (str): Chars to be stripped from both ends of the key.
If None, whitespaces are stripped. Empty string for no stripping.
strip_val_str (str): Chars to be stripped from both ends of the value.
If None, whitespaces are stripped. Empty string for no stripping.
sorting (callable): Sorting function passed to 'sorted' via `key` arg.
Used for sorting the dictionary keys.
Returns:
out_str (str): The output string generated from the dictionary.
Examples:
>>> dict2str({'a': 10, 'b': 20, 'c': 'test'})
'{a=10,b=20,c=test}'
See Also:
str2dict
"""
keys = sorted(in_dict.keys(), key=sorting)
out_list = []
for key in keys:
key = key.strip(strip_key_str)
val = str(in_dict[key]).strip(strip_val_str)
out_list.append(key_val_sep.join([key, val]))
out_str = pre_decor + entry_sep.join(out_list) + post_decor
return out_str
# ======================================================================
def string_between(
text,
begin_str,
end_str,
incl_begin=False,
incl_end=False,
greedy=True):
"""
Isolate the string contained between two tokens
Args:
text (str): String to parse
begin_str (str): Token at the beginning
end_str (str): Token at the ending
incl_begin (bool): Include 'begin_string' in the result
incl_end (bool): Include 'end_str' in the result.
greedy (bool): Output the largest possible string.
Returns:
text (str): The string contained between the specified tokens (if any)
Examples:
>>> string_between('roses are red violets are blue', 'ses', 'lets')
' are red vio'
>>> string_between('roses are red, or not?', 'a', 'd')
're re'
>>> string_between('roses are red, or not?', ' ', ' ')
'are red, or'
>>> string_between('roses are red, or not?', ' ', ' ', greedy=False)
'are'
>>> string_between('roses are red, or not?', 'r', 'r')
'oses are red, o'
>>> string_between('roses are red, or not?', 'r', 'r', greedy=False)
'oses a'
>>> string_between('roses are red, or not?', 'r', 's', True, False)
'rose'
>>> string_between('roses are red violets are blue', 'x', 'y')
''
"""
incl_begin = len(begin_str) if not incl_begin else 0
incl_end = len(end_str) if incl_end else 0
if begin_str in text and end_str in text:
if greedy:
begin = text.find(begin_str) + incl_begin
end = text.rfind(end_str) + incl_end
else:
begin = text.find(begin_str) + incl_begin
end = text[begin:].find(end_str) + incl_end + begin
text = text[begin:end]
else:
text = ''
return text
# ======================================================================
def check_redo(
in_filepaths,
out_filepaths,
force=False,
make_out_dirpaths=False,
no_empty_input=False):
"""
Check if input files are newer than output files, to force calculation.
Args:
in_filepaths (iterable[str]|None): Input filepaths for computation.
out_filepaths (iterable[str]): Output filepaths for computation.
force (bool): Force computation to be re-done.
make_out_dirpaths (bool): Create output dirpaths if not existing.
no_empty_input (bool): Check if the input filepath list is empty.
Returns:
force (bool): True if the computation is to be re-done.
Raises:
IndexError: If the input filepath list is empty.
Only if `no_empty_input` is True.
IOError: If any of the input files do not exist.
"""
# check if output exists
if not force:
for out_filepath in out_filepaths:
if out_filepath and not os.path.exists(out_filepath):
force = True
break
# create output directories
if force and make_out_dirpaths:
for out_filepath in out_filepaths:
out_dirpath = os.path.dirname(out_filepath)
if not os.path.isdir(out_dirpath):
os.makedirs(out_dirpath)
# check if input is older than output
if not force:
# check if input is not empty
if in_filepaths:
# check if input exists
for in_filepath in in_filepaths:
if not os.path.exists(in_filepath):
raise IOError('Input file does not exists.')
for in_filepath, out_filepath in \
itertools.product(in_filepaths, out_filepaths):
if os.path.getmtime(in_filepath) > os.path.getmtime(
out_filepath):
force = True
break
elif no_empty_input:
raise IOError('Input file list is empty.')
return force
# ======================================================================
def bijective_part(arr, invert=False):
"""
Determine the largest bijective part of an array.
Args:
arr (np.ndarray): The input 1D-array.
invert (bool): Invert the selection order for equally large parts.
The behavior of `numpy.argmax` is the default.
Returns:
slice (slice): The largest bijective portion of arr.
If two equivalent parts are found, uses the `numpy.argmax` default.
Examples:
>>> x = np.linspace(-1 / np.pi, 1 / np.pi, 5000)
>>> arr = np.sin(1 / x)
>>> bijective_part(x)
slice(None, None, None)
>>> bijective_part(arr)
slice(None, 833, None)
>>> bijective_part(arr, True)
slice(4166, None, None)
"""
local_mins = sp.signal.argrelmin(arr.ravel())[0]
local_maxs = sp.signal.argrelmax(arr.ravel())[0]
# boundaries are considered pseudo-local maxima and minima
# but are not included in local_mins / local_maxs
# therefore they are added manually
extrema = np.zeros((len(local_mins) + len(local_maxs)) + 2, dtype=np.int)
extrema[-1] = len(arr) - 1
if len(local_mins) > 0 and len(local_maxs) > 0:
# start with smallest maxima or minima
if np.min(local_mins) < np.min(local_maxs):
extrema[1:-1:2] = local_mins
extrema[2:-1:2] = local_maxs
else:
extrema[1:-1:2] = local_maxs
extrema[2:-1:2] = local_mins
elif len(local_mins) == 1 and len(local_maxs) == 0:
extrema[1] = local_mins
elif len(local_mins) == 0 and len(local_maxs) == 1:
extrema[1] = local_maxs
elif len(local_maxs) == len(local_mins) == 0:
pass
else:
raise ValueError('Failed to determine maxima and/or minima.')
part_sizes = np.diff(extrema)
if any(part_sizes) < 0:
raise ValueError('Failed to determine orders of maxima and minima.')
if not invert:
largest = np.argmax(part_sizes)
else:
largest = len(part_sizes) - np.argmax(part_sizes[::-1]) - 1
min_cut, max_cut = extrema[largest:largest + 2]
return slice(
min_cut if min_cut > 0 else None,
max_cut if max_cut < len(arr) - 1 else None)
# ======================================================================
def sgnlog(
x,
base=np.e):
"""
Signed logarithm of x: log(abs(x) * sign(x)
Args:
x (float|ndarray): The input value(s)
Returns:
The signed logarithm
Examples:
>>> sgnlog(-100, 10)
-2.0
>>> sgnlog(-64, 2)
-6.0
>>> sgnlog(100, 2)
6.6438561897747253
"""
return np.log(np.abs(x)) / np.log(base) * np.sign(x)
# ======================================================================
def sgnlogspace(
start,
stop,
num=50,
endpoint=True,
base=10.0):
"""
Logarithmically spaced samples between signed start and stop endpoints.
Args:
start (float): The starting value of the sequence.
stop (float): The end value of the sequence.
num (int): Number of samples to generate. Must be non-negative.
endpoint (bool): The value of 'stop' is the last sample.
base (float): The base of the log space. Must be non-negative.
Returns:
samples (ndarray): equally spaced samples on a log scale.
Examples:
>>> sgnlogspace(-10, 10, 3)
array([-10. , 0.1, 10. ])
>>> sgnlogspace(-100, -1, 3)
array([-100., -10., -1.])
>>> sgnlogspace(-10, 10, 6)
array([-10. , -1. , -0.1, 0.1, 1. , 10. ])
>>> sgnlogspace(-10, 10, 5)
array([-10. , -0.1, 0.1, 1. , 10. ])
>>> sgnlogspace(2, 10, 4)
array([ 2. , 3.41995189, 5.84803548, 10. ])
"""
if start * stop < 0.0:
bounds = (
(start, -(np.exp(-np.log(np.abs(start))))),
((np.exp(-np.log(np.abs(stop)))), stop))
args_bounds = tuple(
tuple(np.log(np.abs(val)) / np.log(base) for val in arg_bounds)
for arg_bounds in bounds)
args_num = (num // 2, num - num // 2)
args_sign = (np.sign(start), np.sign(stop))
args_endpoint = True, endpoint
logspaces = tuple(
np.logspace(*(arg_bounds + (arg_num, arg_endpoint, base))) \
* arg_sign
for arg_bounds, arg_sign, arg_num, arg_endpoint
in zip(args_bounds, args_sign, args_num, args_endpoint))
samples = np.concatenate(logspaces)
else:
sign = np.sign(start)
logspace_bound = \
tuple(np.log(np.abs(val)) / np.log(base) for val in (start, stop))
samples = np.logspace(*(logspace_bound + (num, endpoint, base))) * sign
return samples
# ======================================================================
def minmax(arr):
"""
Calculate the minimum and maximum of an array: (min, max).
Args:
arr (np.ndarray): The input array.
Returns:
min (float): the minimum value of the array
max (float): the maximum value of the array
Examples:
>>> minmax(np.arange(10))
(0, 9)
"""
return np.min(arr), np.max(arr)
# ======================================================================
def scale(
val,
out_interval=None,
in_interval=None):
"""
Linear convert the value from input interval to output interval
Args:
val (float|np.ndarray): Value(s) to convert.
out_interval (float,float): Interval of the output value(s).
If None, set to: (0, 1).
in_interval (float,float): Interval of the input value(s).
If None, and val is iterable, it is calculated as:
(min(val), max(val)), otherwise set to: (0, 1).
Returns:
val (float|np.ndarray): The converted value(s).
Examples:
>>> scale(100, (0, 1000), (0, 100))
1000.0
>>> scale(50, (0, 1000), (-100, 100))
750.0
>>> scale(50, (0, 10), (0, 1))
500.0
>>> scale(0.5, (-10, 10))
0.0
>>> scale(np.pi / 3, (0, 180), (0, np.pi))
60.0
>>> scale(np.arange(5), (0, 1))
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> scale(np.arange(6), (0, 10))
array([ 0., 2., 4., 6., 8., 10.])
>>> scale(np.arange(6), (0, 10), (0, 2))
array([ 0., 5., 10., 15., 20., 25.])
"""
if in_interval:
in_min, in_max = sorted(in_interval)
elif isinstance(val, np.ndarray):
in_min, in_max = minmax(val)
else:
in_min, in_max = (0, 1)
if out_interval:
out_min, out_max = sorted(out_interval)
else:
out_min, out_max = (0, 1)
return (val - in_min) / (in_max - in_min) * (out_max - out_min) + out_min
# ======================================================================
def combine_interval(
interval1,
interval2,
operation):
"""
Combine two intervals with some operation to obtain a new interval.
Args:
interval1 (tuple[float]): Interval of first operand
interval2 (tuple[float]): Interval of second operand
operation (str): String with operation to perform.
Supports the following operations:
- '+' : addition
- '-' : subtraction
Returns:
new_interval (tuple[float]): Interval resulting from operation
Examples:
>>> combine_interval((-1.0, 1.0), (0, 1), '+')
(-1.0, 2.0)
>>> combine_interval((-1.0, 1.0), (0, 1), '-')
(-2.0, 1.0)
"""
if operation == '+':
new_interval = (
interval1[0] + interval2[0], interval1[1] + interval2[1])
elif operation == '-':
new_interval = (
interval1[0] - interval2[1], interval1[1] - interval2[0])
else:
new_interval = (-np.inf, np.inf)
return new_interval
# ======================================================================
def midval(arr):
"""
Calculate the middle value vector.
Args:
arr (np.ndarray): The input N-dim array
Returns:
arr (np.ndarray): The output (N-1)-dim array
Examples:
>>> midval(np.array([0, 1, 2, 3, 4]))
array([ 0.5, 1.5, 2.5, 3.5])
"""
return (arr[1:] - arr[:-1]) / 2.0 + arr[:-1]
# ======================================================================
def subst(
arr,
pairs=((np.inf, 0.0), (-np.inf, 0.0), (np.nan, 0.0))):
"""
Substitute all occurrences of a value in an array.
Useful to remove specific values, e.g. singularities.
Args:
arr (np.ndarray): The input array.
subst (tuple[tuple]): The substitution rules.
Each rule consist of a value to replace and its replacement.
Each rule is applied sequentially in the order they appear and
modify the content of the array immediately.
Returns:
arr (np.ndarray): The output array.
Examples:
>>> a = np.arange(10)
>>> subst(a, ((1, 100), (7, 700)))
array([ 0, 100, 2, 3, 4, 5, 6, 700, 8, 9])
>>> a = np.tile(np.arange(4), 3)
>>> subst(a, ((1, 100), (7, 700)))
array([ 0, 100, 2, 3, 0, 100, 2, 3, 0, 100, 2, 3])
>>> a = np.tile(np.arange(4), 3)
>>> subst(a, ((1, 100), (3, 300)))
array([ 0, 100, 2, 300, 0, 100, 2, 300, 0, 100, 2, 300])
>>> a = np.array([0.0, 1.0, np.inf, -np.inf, np.nan, -np.nan])
>>> subst(a)
array([ 0., 1., 0., 0., 0., 0.])
>>> a = np.array([0.0, 1.0, np.inf, 2.0, np.nan])
>>> subst(a, ((np.inf, 0.0), (0.0, np.inf), (np.nan, 0.0)))
array([ inf, 1., inf, 2., 0.])
>>> subst(a, ((np.inf, 0.0), (np.nan, 0.0), (0.0, np.inf)))
array([ inf, 1., inf, 2., inf])
"""
for k, v in pairs:
if k is np.nan:
arr[np.isnan(arr)] = v
else:
arr[arr == k] = v
return arr
# ======================================================================
def dftn(arr):
"""
Discrete Fourier Transform.
Interface to fftn combined with fftshift.
Args:
arr (np.ndarray): Input n-dim array.
Returns:
arr (np.ndarray): Output n-dim array.
Examples:
>>> a = np.arange(2)
>>> dftn(a)
array([-1.+0.j, 1.+0.j])
>>> print(np.allclose(a, dftn(idftn(a))))
True
See Also:
numpy.fft, scipy.fftpack
"""
return fftshift(fftn(arr))
# ======================================================================
def idftn(arr):
"""
Inverse Discrete Fourier transform.
Interface to ifftn combined with ifftshift.
Args:
arr (np.ndarray): Input n-dim array.
Returns:
arr (np.ndarray): Output n-dim array.
Examples:
>>> a = np.arange(2)
>>> idftn(a)
array([ 0.5+0.j, 0.5+0.j])
>>> print(np.allclose(a, idftn(dftn(a))))
True
See Also:
numpy.fft, scipy.fftpack
"""
return ifftn(ifftshift(arr))
# ======================================================================
def coord(
shape,
origin=0.5,
is_relative=True,
dense=False,
use_int=True):
"""
Calculate the generic x_i coordinates for N-dim operations.
Args:
shape (tuple[int]): The shape of the mask in px.
origin (float|tuple[float]): Relative position of the origin.
Values are in the [0, 1] interval.
is_relative (bool): Interpret origin as relative.
dense (bool): Determine the shape of the mesh-grid arrays.
use_int (bool):
Returns:
coord (list[np.ndarray]): mesh-grid ndarrays.
The shape is identical if dense is True, otherwise only one
dimension is larger than 1.
Examples:
>>> coord((4, 4))
[array([[-2],
[-1],
[ 0],
[ 1]]), array([[-2, -1, 0, 1]])]
>>> coord((5, 5))
[array([[-2],
[-1],
[ 0],
[ 1],
[ 2]]), array([[-2, -1, 0, 1, 2]])]
>>> coord((2, 2))
[array([[-1],
[ 0]]), array([[-1, 0]])]
>>> coord((2, 2), dense=True)
array([[[-1, -1],
[ 0, 0]],
<BLANKLINE>
[[-1, 0],
[-1, 0]]])
>>> coord((2, 3), origin=(0.0, 0.5))
[array([[0],
[1]]), array([[-1, 0, 1]])]
>>> coord((3, 9), origin=(1, 4), is_relative=False)
[array([[-1],
[ 0],
[ 1]]), array([[-4, -3, -2, -1, 0, 1, 2, 3, 4]])]
>>> coord((3, 9), origin=0.2, is_relative=True)
[array([[0],
[1],
[2]]), array([[-1, 0, 1, 2, 3, 4, 5, 6, 7]])]
>>> coord((4, 4), use_int=False)
[array([[-1.5],
[-0.5],
[ 0.5],
[ 1.5]]), array([[-1.5, -0.5, 0.5, 1.5]])]
>>> coord((5, 5), use_int=False)
[array([[-2.],
[-1.],
[ 0.],
[ 1.],
[ 2.]]), array([[-2., -1., 0., 1., 2.]])]
>>> coord((2, 3), origin=(0.0, 0.0), use_int=False)
[array([[ 0.],
[ 1.]]), array([[ 0., 1., 2.]])]
"""
origin = auto_repeat(origin, len(shape), check=True)
if is_relative:
if use_int:
origin = [int(scale(x, (0, dim)))
for x, dim in zip(origin, shape)]
else:
origin = [scale(x, (0, dim - 1))
for x, dim in zip(origin, shape)]
elif any([not isinstance(x, int) for x in origin]) and use_int:
raise TypeError('Absolute origin must be integer.')
grid = [slice(-x0, dim - x0) for x0, dim in zip(origin, shape)]
return np.ogrid[grid] if not dense else np.mgrid[grid]
# ======================================================================
def _kk_2(
shape,
factors=1):
"""
Calculate the k^2 kernel to be used for the Laplacian operators.
Args:
shape (iterable[int]): The size of the array.
factors (iterable[int|tuple]): The size conversion factors for each dim.
Returns:
arr (np.ndarray): The resulting array.
Examples:
>>> _kk_2((3, 3, 3))
array([[[ 3., 2., 3.],
[ 2., 1., 2.],
[ 3., 2., 3.]],
<BLANKLINE>
[[ 2., 1., 2.],
[ 1., 0., 1.],
[ 2., 1., 2.]],
<BLANKLINE>
[[ 3., 2., 3.],
[ 2., 1., 2.],
[ 3., 2., 3.]]])
>>> _kk_2((3, 3, 3), np.sqrt(3))
array([[[ 1. , 0.66666667, 1. ],
[ 0.66666667, 0.33333333, 0.66666667],
[ 1. , 0.66666667, 1. ]],
<BLANKLINE>
[[ 0.66666667, 0.33333333, 0.66666667],
[ 0.33333333, 0. , 0.33333333],
[ 0.66666667, 0.33333333, 0.66666667]],
<BLANKLINE>
[[ 1. , 0.66666667, 1. ],
[ 0.66666667, 0.33333333, 0.66666667],
[ 1. , 0.66666667, 1. ]]])
>>> _kk_2((2, 2, 2), 0.6)
array([[[ 8.33333333, 5.55555556],
[ 5.55555556, 2.77777778]],
<BLANKLINE>
[[ 5.55555556, 2.77777778],
[ 2.77777778, 0. ]]])
"""
kk = coord(shape)
if factors and factors != 1:
factors = auto_repeat(factors, len(shape), check=True)
kk = [k_i / factor for k_i, factor in zip(kk, factors)]
kk_2 = np.zeros(shape)
for k_i, dim in zip(kk, shape):
kk_2 += k_i ** 2
return kk_2
def auto_pad_width(
pad_width,
shape,
combine=None):
"""
Ensure pad_width value(s) to be consisting of integer.
Args:
pad_width (float|int|iterable[float|int]): Size of the padding to use.
This is useful for mitigating border effects.
If iterable, a value for each dim must be specified.
If not iterable, all dims will have the same value.
If int, it is interpreted as absolute size.
If float, it is interpreted as relative to corresponding dim size.
shape (iterable[int]): The shape to associate to `pad_width`.
combine (callable|None): The function for combining shape values.
If None, uses the corresponding dim from the shape.
Returns:
pad_width (int|tuple[tuple[int]]): The absolute `pad_width`.
If input `pad_width` is not iterable, result is not iterable.
See Also:
np.pad
Examples:
>>> shape = (10, 20, 30)
>>> auto_pad_width(0.1, shape)
((1, 1), (2, 2), (3, 3))
>>> auto_pad_width(0.1, shape, max)
((3, 3), (3, 3), (3, 3))
>>> shape = (10, 20, 30)
>>> auto_pad_width(((0.1, 0.5),), shape)
((1, 5), (2, 10), (3, 15))
>>> auto_pad_width(((2, 3),), shape)
((2, 3), (2, 3), (2, 3))
>>> auto_pad_width(((2, 3), (1, 2)), shape)
Traceback (most recent call last):
....
AssertionError
>>> auto_pad_width(((0.1, 0.2),), shape, min)
((1, 2), (1, 2), (1, 2))
>>> auto_pad_width(((0.1, 0.2),), shape, max)
((3, 6), (3, 6), (3, 6))
"""
def float_to_int(val, scale):
return int(val * scale) if isinstance(val, float) else val
try:
iter(pad_width)
except TypeError:
pad_width = ((pad_width,) * 2,)
finally:
combined = combine(shape) if combine else None
pad_width = list(
pad_width if len(pad_width) > 1 else pad_width * len(shape))
assert (len(pad_width) == len(shape))
for i, (item, dim) in enumerate(zip(pad_width, shape)):
lower, upper = item
pad_width[i] = (
float_to_int(lower, dim if not combine else combined),
float_to_int(upper, dim if not combine else combined))
pad_width = tuple(pad_width)
return pad_width
# ======================================================================
def laplacian(
arr,
ft_factor=(2 * np.pi),
pad_width=0):
"""
Calculate the Laplacian operator in the Fourier domain.
Args:
arr (np.ndarray): The input array.
ft_factor (float): The Fourier factor for the gradient operator.
Should be either 1 or 2*pi, depending on DFT implementation.
pad_width (float|int|iterable[float|int]): Size of the padding to use.
This is useful for mitigating border effects.
If iterable, a value for each dim must be specified.
If not iterable, all dims will have the same value.
If int, it is interpreted as absolute size.
If float, it is interpreted as relative to the maximum size.
Returns:
arr (np.ndarray): The output array.
"""
if pad_width:
shape = arr.shape
pad_width = auto_pad_width(pad_width, shape)
mask = [slice(lower, -upper) for (lower, upper) in pad_width]
arr = np.pad(arr, pad_width, 'constant', constant_values=0)
else:
mask = [slice(None)] * arr.ndim
kk_2 = fftshift(_kk_2(arr.shape))
arr = ((1j * ft_factor) ** 2) * ifftn(kk_2 * fftn(arr))
return arr[mask]
# ======================================================================
def inv_laplacian(
arr,
ft_factor=(2 * np.pi),
pad_width=0):
"""
Calculate the inverse Laplacian operator in the Fourier domain.
Args:
arr (np.ndarray): The input array.
ft_factor (float): The Fourier factor for the gradient operator.
Should be either 1 or 2*pi, depending on DFT implementation.
pad_width (float|int): Size of the border to use.
This is useful for mitigating border effects.
If int, it is interpreted as absolute size.
If float, it is interpreted as relative to the maximum size.
Returns:
arr (np.ndarray): The output array.
"""
if pad_width:
shape = arr.shape
pad_width = auto_pad_width(pad_width, shape)
# mask = [slice(borders, -borders)] * arr.ndim
mask = [slice(lower, -upper) for (lower, upper) in pad_width]
arr = np.pad(arr, pad_width, 'constant', constant_values=0)
else:
mask = [slice(None)] * arr.ndim
kk_2 = fftshift(_kk_2(arr.shape))
kk_2[kk_2 != 0] = 1.0 / kk_2[kk_2 != 0]
arr = fftn(arr) * kk_2
arr = ((-1j / ft_factor) ** 2) * ifftn(arr)
return arr[mask]
# ======================================================================
def auto_bin(
arr,
method='auto'):
"""
Determine the optimal number of bins for an array.
Args:
arr (np.ndarray): The input array.
method (str|None):
Returns:
num (int): The number of bins.
Examples:
>>> arr = np.arange(100)
>>> auto_bin(arr)
22
>>> auto_bin(arr, 'sqrt')
10
>>> auto_bin(arr, 'auto')
22
>>> auto_bin(arr, 'sturges')
8
>>> auto_bin(arr, 'rice')
10
>>> auto_bin(arr, 'scott')
22
>>> auto_bin(arr, 'fd')
22
>>> auto_bin(arr, None)
100
"""
if method == 'auto':
num = max(auto_bin(arr, 'fd'), auto_bin(arr, 'sturges'))
elif method == 'sqrt':
num = int(np.ceil(np.sqrt(arr.size)))
elif method == 'sturges':
num = int(np.ceil(np.log2(arr.size)) + 1)
elif method == 'rice':
num = int(np.ceil(2 * arr.size ** (1 / 3)))
elif method == 'scott':
num = int(np.ceil(3.5 * np.std(arr) / arr.size ** (1 / 3)))
elif method == 'fd':
q75, q25 =
|
np.percentile(arr, [75, 25])
|
numpy.percentile
|
import numpy as np
import torch
from torchvision import transforms
from torch import nn
from torch.nn import functional as F
import matplotlib.pyplot as plt
import json
import segmentation_models_pytorch as smp
import os
from PIL import Image
import random
import copy
import cv2
class HFlipDefault:
def __init__(self):
self.p = 0.5
self.hflip = transforms.functional.hflip
def __call__(self, data):
img, label = data
if np.random.rand() <= self.p:
img = self.hflip(img)
label = self.hflip(label)
return [img, label]
class colorJitter:
def __init__(self):
self.ColorJitter = transforms.ColorJitter(brightness=0.2,
contrast=0.2,
saturation=0.2,
hue=0.05)
def __call__(self, data):
img, label = data
img = self.ColorJitter(img)
return [img, label]
class normalize:
def __init__(self, mean, std):
self.normalize = transforms.Normalize(mean, std)
def __call__(self, data):
img, label = data
img = self.normalize(img)
return [img, label]
class toTensor:
def __init__(self):
self.toTensor = transforms.ToTensor()
def __call__(self, data):
img, label = data
img = self.toTensor(img)
label = torch.from_numpy(np.array(label, dtype=np.float64))
label = label.long()
return [img, label]
def jaccard_loss(true, logits, eps=1e-7):
"""Computes the Jaccard loss, a.k.a the IoU loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the jaccard loss so we
return the negated jaccard loss.
Args:
true: a tensor of shape [B, H, W] or [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
jacc_loss: the Jaccard loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
union = cardinality - intersection
jacc_loss = (intersection / (union + eps))
jacc_loss = jacc_loss[1:].mean()
return (1 - jacc_loss)
class Metric(object):
"""Base class for all metrics.
From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py
"""
def reset(self):
pass
def add(self):
pass
def value(self):
pass
class ConfusionMatrix(Metric):
"""Constructs a confusion matrix for a multi-class classification problems.
Does not support multi-label, multi-class problems.
Keyword arguments:
- num_classes (int): number of classes in the classification problem.
- normalized (boolean, optional): Determines whether or not the confusion
matrix is normalized or not. Default: False.
Modified from: https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
"""
def __init__(self, num_classes, normalized=False):
super().__init__()
self.conf = np.ndarray((num_classes, num_classes), dtype=np.int32)
self.normalized = normalized
self.num_classes = num_classes
self.reset()
def reset(self):
self.conf.fill(0)
def add(self, predicted, target):
"""Computes the confusion matrix
The shape of the confusion matrix is K x K, where K is the number
of classes.
Keyword arguments:
- predicted (Tensor or numpy.ndarray): Can be an N x K tensor/array of
predicted scores obtained from the model for N examples and K classes,
or an N-tensor/array of integer values between 0 and K-1.
- target (Tensor or numpy.ndarray): Can be an N x K tensor/array of
ground-truth classes for N examples and K classes, or an N-tensor/array
of integer values between 0 and K-1.
"""
# If target and/or predicted are tensors, convert them to numpy arrays
if torch.is_tensor(predicted):
predicted = predicted.cpu().numpy()
if torch.is_tensor(target):
target = target.cpu().numpy()
assert predicted.shape[0] == target.shape[0], \
'number of targets and predicted outputs do not match'
if np.ndim(predicted) != 1:
assert predicted.shape[1] == self.num_classes, \
'number of predictions does not match size of confusion matrix'
predicted = np.argmax(predicted, 1)
else:
assert (predicted.max() < self.num_classes) and (predicted.min() >= 0), \
'predicted values are not between 0 and k-1'
if np.ndim(target) != 1:
assert target.shape[1] == self.num_classes, \
'Onehot target does not match size of confusion matrix'
assert (target >= 0).all() and (target <= 1).all(), \
'in one-hot encoding, target values should be 0 or 1'
assert (target.sum(1) == 1).all(), \
'multi-label setting is not supported'
target = np.argmax(target, 1)
else:
assert (target.max() < self.num_classes) and (target.min() >= 0), \
'target values are not between 0 and k-1'
# hack for bincounting 2 arrays together
x = predicted + self.num_classes * target
bincount_2d = np.bincount(
x.astype(np.int32), minlength=self.num_classes**2)
assert bincount_2d.size == self.num_classes**2
conf = bincount_2d.reshape((self.num_classes, self.num_classes))
self.conf += conf
def value(self):
"""
Returns:
Confustion matrix of K rows and K columns, where rows corresponds
to ground-truth targets and columns corresponds to predicted
targets.
"""
if self.normalized:
conf = self.conf.astype(np.float32)
return conf / conf.sum(1).clip(min=1e-12)[:, None]
else:
return self.conf
def do_cca(predicted, cuda=True):
predicted = F.softmax(predicted, dim=1)
if cuda:
predicted = predicted.cpu()
predicted = predicted.numpy()
new_pred = []
for i, pred in enumerate(predicted):
pred = pred.transpose(1, 2, 0)
mask = np.array(np.argmax(pred, axis=2), dtype=np.uint8)
mask2 = np.array(np.max(pred, axis=2))
ret, labels = cv2.connectedComponents(mask, connectivity=8)
biggest = 1
biggest_score = 0
for u in np.unique(labels)[1:]:
score = np.sum(mask2[labels == u])
if score > biggest_score:
biggest_score = score
biggest = u
out = np.zeros(mask.shape)
out[labels == biggest] = 1
new_pred.append(np.expand_dims(out, axis=0))
new_pred = np.concatenate(new_pred, axis=0)
return new_pred
class IoU_cca(Metric):
"""Computes the intersection over union (IoU) per class and corresponding
mean (mIoU).
Intersection over union (IoU) is a common evaluation metric for semantic
segmentation. The predictions are first accumulated in a confusion matrix
and the IoU is computed from it as follows:
IoU = true_positive / (true_positive + false_positive + false_negative).
Keyword arguments:
- num_classes (int): number of classes in the classification problem
- normalized (boolean, optional): Determines whether or not the confusion
matrix is normalized or not. Default: False.
- ignore_index (int or iterable, optional): Index of the classes to ignore
when computing the IoU. Can be an int, or any iterable of ints.
"""
def __init__(self, num_classes, normalized=False, ignore_index=None):
super().__init__()
self.conf_metric = ConfusionMatrix(num_classes, normalized)
if ignore_index is None:
self.ignore_index = None
elif isinstance(ignore_index, int):
self.ignore_index = (ignore_index,)
else:
try:
self.ignore_index = tuple(ignore_index)
except TypeError:
raise ValueError("'ignore_index' must be an int or iterable")
def reset(self):
self.conf_metric.reset()
def add(self, predicted, target):
"""Adds the predicted and target pair to the IoU metric.
Keyword arguments:
- predicted (Tensor): Can be a (N, K, H, W) tensor of
predicted scores obtained from the model for N examples and K classes,
or (N, H, W) tensor of integer values between 0 and K-1.
- target (Tensor): Can be a (N, K, H, W) tensor of
target scores for N examples and K classes, or (N, H, W) tensor of
integer values between 0 and K-1.
"""
# Dimensions check
assert predicted.size(0) == target.size(0), \
'number of targets and predicted outputs do not match'
assert predicted.dim() == 3 or predicted.dim() == 4, \
"predictions must be of dimension (N, H, W) or (N, K, H, W)"
assert target.dim() == 3 or target.dim() == 4, \
"targets must be of dimension (N, H, W) or (N, K, H, W)"
new_pred = do_cca(predicted)
new_pred = torch.from_numpy(new_pred)
self.conf_metric.add(new_pred.view(-1), target.view(-1))
def value(self):
"""Computes the IoU and mean IoU.
The mean computation ignores NaN elements of the IoU array.
Returns:
Tuple: (IoU, mIoU). The first output is the per class IoU,
for K classes it's numpy.ndarray with K elements. The second output,
is the mean IoU.
"""
conf_matrix = self.conf_metric.value()
if self.ignore_index is not None:
for index in self.ignore_index:
conf_matrix[:, index] = 0
conf_matrix[index, :] = 0
true_positive = np.diag(conf_matrix)
false_positive = np.sum(conf_matrix, 0) - true_positive
false_negative = np.sum(conf_matrix, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
return iou, np.nanmean(iou[1:])
class IoU(Metric):
"""Computes the intersection over union (IoU) per class and corresponding
mean (mIoU).
Intersection over union (IoU) is a common evaluation metric for semantic
segmentation. The predictions are first accumulated in a confusion matrix
and the IoU is computed from it as follows:
IoU = true_positive / (true_positive + false_positive + false_negative).
Keyword arguments:
- num_classes (int): number of classes in the classification problem
- normalized (boolean, optional): Determines whether or not the confusion
matrix is normalized or not. Default: False.
- ignore_index (int or iterable, optional): Index of the classes to ignore
when computing the IoU. Can be an int, or any iterable of ints.
"""
def __init__(self, num_classes, normalized=False, ignore_index=None):
super().__init__()
self.conf_metric = ConfusionMatrix(num_classes, normalized)
if ignore_index is None:
self.ignore_index = None
elif isinstance(ignore_index, int):
self.ignore_index = (ignore_index,)
else:
try:
self.ignore_index = tuple(ignore_index)
except TypeError:
raise ValueError("'ignore_index' must be an int or iterable")
def reset(self):
self.conf_metric.reset()
def add(self, predicted, target):
"""Adds the predicted and target pair to the IoU metric.
Keyword arguments:
- predicted (Tensor): Can be a (N, K, H, W) tensor of
predicted scores obtained from the model for N examples and K classes,
or (N, H, W) tensor of integer values between 0 and K-1.
- target (Tensor): Can be a (N, K, H, W) tensor of
target scores for N examples and K classes, or (N, H, W) tensor of
integer values between 0 and K-1.
"""
# Dimensions check
assert predicted.size(0) == target.size(0), \
'number of targets and predicted outputs do not match'
assert predicted.dim() == 3 or predicted.dim() == 4, \
"predictions must be of dimension (N, H, W) or (N, K, H, W)"
assert target.dim() == 3 or target.dim() == 4, \
"targets must be of dimension (N, H, W) or (N, K, H, W)"
# If the tensor is in categorical format convert it to integer format
if predicted.dim() == 4:
_, predicted = predicted.max(1)
if target.dim() == 4:
_, target = target.max(1)
self.conf_metric.add(predicted.view(-1), target.view(-1))
def value(self):
"""Computes the IoU and mean IoU.
The mean computation ignores NaN elements of the IoU array.
Returns:
Tuple: (IoU, mIoU). The first output is the per class IoU,
for K classes it's numpy.ndarray with K elements. The second output,
is the mean IoU.
"""
conf_matrix = self.conf_metric.value()
if self.ignore_index is not None:
for index in self.ignore_index:
conf_matrix[:, index] = 0
conf_matrix[index, :] = 0
true_positive = np.diag(conf_matrix)
false_positive = np.sum(conf_matrix, 0) - true_positive
false_negative = np.sum(conf_matrix, 1) - true_positive
# Just in case we get a division by 0, ignore/hide the error
with np.errstate(divide='ignore', invalid='ignore'):
iou = true_positive / (true_positive + false_positive + false_negative)
return iou, np.nanmean(iou[1:])
def animate(i, fig, axs, path):
with open(path) as json_file:
config = json.load(json_file)
plt.cla()
fig.suptitle('Best mIou: {} in epoch: {}'.format(np.round(config['best_iou_score'], 3), config['best_iou_score_epoch']), fontsize=16)
axs[0].plot(config['losses'], c='b')
axs[0].set_title('jaccard loss')
axs[0].set_ylabel('1-mIoU')
axs[0].set_xlabel('Epochs')
axs[1].plot(config['iou_scores'], c='r')
axs[1].set_title('mean IoU')
axs[1].set_ylabel('mIoU')
axs[1].set_xlabel('Epochs')
nets = {'Unet': smp.Unet,
'PsPNet': smp.PSPNet,
'LinkNet': smp.Linknet}
def get_model(name, segmentation_config):
model = nets[name]
model = model(**segmentation_config)
return model
def load_subtraction(root,
key,
idx,
resize=None,
rotate=None,
colorJitter=None,
hflip=None,
vflip=None,
plot=False,
abs=True):
if plot:
plt.clf()
plt.cla()
# make sure augmentation is for all the same
angle = 0
if rotate:
angle = random.uniform(-180, 180)
if hflip:
if np.random.rand() <= 0.5:
hflip = None
if vflip:
if np.random.rand() <= 0.5:
vflip = None
# get background and foreground, also augment them
b = Image.open(os.path.join(root,
key,
'background',
'img{:06d}.png'.format(idx))
).convert('RGB')
b = augment(b,
angle=angle,
resize=resize,
rotate=rotate,
colorJitter=colorJitter,
hflip=hflip,
vflip=vflip)
f = Image.open(os.path.join(root,
key,
'foreground',
'img{:06d}.png'.format(idx))
).convert('RGB')
if plot:
plt.subplot(3, 4, 1)
plt.imshow(np.array(f))
plt.title('RGB foreground')
plt.axis('off')
plt.subplot(3, 4, 2)
plt.imshow(np.array(copy.deepcopy(f).convert('HSV')))
plt.title('HSV foreground')
plt.axis('off')
f = augment(f,
angle=angle,
resize=resize,
rotate=rotate,
colorJitter=colorJitter,
hflip=hflip,
vflip=vflip)
# copy rgb images and convert them to hsv
b_hsv = copy.deepcopy(b).convert('HSV')
f_hsv = copy.deepcopy(f).convert('HSV')
if plot:
plt.subplot(3, 4, 5)
plt.imshow(np.array(f))
plt.title('RGB augmented foreground')
plt.axis('off')
plt.subplot(3, 4, 6)
plt.imshow(np.array(f_hsv))
plt.title('HSV augmented foreground')
plt.axis('off')
# load depth and augment them
b_depth = Image.open(os.path.join(root,
key,
'background',
'depth{:06d}.png'.format(idx))
)
b_depth = augment(b_depth,
angle=angle,
resize=resize,
rotate=rotate,
colorJitter=None,
hflip=hflip,
vflip=vflip)
f_depth = Image.open(os.path.join(root,
key,
'foreground',
'depth{:06d}.png'.format(idx))
)
if plot:
plt.subplot(3, 4, 3)
plt.imshow(np.array(f_depth))
plt.title('Depth foreground')
plt.axis('off')
f_depth = augment(f_depth,
angle=angle,
resize=resize,
rotate=rotate,
colorJitter=None,
hflip=hflip,
vflip=vflip)
if plot:
plt.subplot(3, 4, 7)
plt.imshow(np.array(f_depth))
plt.title('Depth augmented foreground')
plt.axis('off')
# conver to numpy
b = np.array(b, dtype=np.float)
f = np.array(f, dtype=np.float)
b_hsv = np.array(b_hsv, dtype=np.float)
f_hsv = np.array(f_hsv, dtype=np.float)
b_depth = np.array(b_depth, dtype=np.float)
f_depth = np.array(f_depth, dtype=np.float)
# eliminate measuring errors
f_depth[b_depth == 0] = 0
b_depth[f_depth == 0] = 0
# subtract
x = f - b
x_hsv = f_hsv - b_hsv
x_depth = f_depth - b_depth
# take absolute difference if wanted
if abs:
x = np.abs(x)
x_hsv = np.abs(x_hsv)
x_depth = np.abs(x_depth)
if plot:
plt.subplot(3, 4, 9)
plt.imshow(np.array(x, dtype=np.uint8))
plt.title('RGB subtracted')
plt.axis('off')
plt.subplot(3, 4, 10)
plt.imshow(
|
np.array(x_hsv, dtype=np.uint8)
|
numpy.array
|
# ------------------------------------------------------------------------
# Tools for stain normalisation
# ------------------------------------------------------------------------
import numpy as np
import cv2 as cv
from PIL import Image
from staintools.preprocessing.input_validation import is_uint8_image
from staintools import ReinhardColorNormalizer, LuminosityStandardizer, StainNormalizer
from staintools.stain_extraction.macenko_stain_extractor import MacenkoStainExtractor
from staintools.stain_extraction.vahadane_stain_extractor import VahadaneStainExtractor
from staintools.miscellaneous.optical_density_conversion import convert_OD_to_RGB
from staintools.miscellaneous.get_concentrations import get_concentrations
class LuminosityStandardizerIterative(LuminosityStandardizer):
"""
Transforms image to a standard brightness
Modifies the luminosity channel such that a fixed percentile is saturated
Standardiser can fit to source slide image and apply the same luminosity standardisation settings to all tiles generated
from the source slide image
"""
def __init__(self):
super().__init__()
self.p = None
def fit(self, I, percentile = 95):
assert is_uint8_image(I), "Image should be RGB uint8."
I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)
L_float = I_LAB[:, :, 0].astype(float)
self.p = np.percentile(L_float, percentile)
def standardize_tile(self, I):
I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)
L_float = I_LAB[:, :, 0].astype(float)
I_LAB[:, :, 0] = np.clip(255 * L_float / self.p, 0, 255).astype(np.uint8)
I = cv.cvtColor(I_LAB, cv.COLOR_LAB2RGB)
return I
class ReinhardColorNormalizerIterative(ReinhardColorNormalizer):
"""
Normalise each tile from a slide to a target slide using the method of:
<NAME>, <NAME>, <NAME>, and <NAME>,
'Color transfer between images'
Normaliser can fit to source slide image and apply the same normalisation settings to all tiles generated from the
source slide image
Attributes
----------
target_means : tuple float
means pixel value for each channel in target image
target_stds : tuple float
standard deviation of pixel values for each channel in target image
source_means : tuple float
mean pixel value for each channel in source image
source_stds : tuple float
standard deviation of pixel values for each channel in source image
Methods
-------
fit_target(target)
Fit normaliser to target image
fit_source(source)
Fit normaliser to source image
transform(I)
Transform an image to normalise it to the target image
transform_tile(I)
Transform a tile using precomputed parameters that normalise the source slide image to the target slide image
lab_split(I)
Convert from RGB unint8 to LAB and split into channels
merge_back(I1, I2, I3)
Take separate LAB channels and merge back to give RGB uint8
get_mean_std(I)
Get mean and standard deviation of each channel
"""
def __init__(self):
super().__init__()
self.source_means = None
self.source_stds = None
def fit_target(self, target):
"""Fit to a target image
Parameters
----------
target : Image RGB uint8
Returns
-------
None
"""
means, stds = self.get_mean_std(target)
self.target_means = means
self.target_stds = stds
def fit_source(self, source):
"""Fit to a source image
Parameters
----------
source : Image RGB uint8
Returns
-------
None
"""
means, stds = self.get_mean_std(source)
self.source_means = means
self.source_stds = stds
def transform_tile(self, I):
"""Transform a tile using precomputed parameters that normalise the source slide image to the target slide image
Parameters
----------
I : Image RGB uint8
Returns
-------
transformed_tile : Image RGB uint8
"""
I1, I2, I3 = self.lab_split(I)
norm1 = ((I1 - self.source_means[0]) * (self.target_stds[0] / self.source_stds[0])) + self.target_means[0]
norm2 = ((I2 - self.source_means[1]) * (self.target_stds[1] / self.source_stds[1])) + self.target_means[1]
norm3 = ((I3 - self.source_means[2]) * (self.target_stds[2] / self.source_stds[2])) + self.target_means[2]
return self.merge_back(norm1, norm2, norm3)
class StainNormalizerIterative(StainNormalizer):
"""Normalise each tile from a slide to a target slide using the Macenko or Vahadane method
"""
def __init__(self, method):
super().__init__(method)
self.maxC_source = None
def fit_target(self, I):
self.fit(I)
def fit_source(self, I):
self.stain_matrix_source = self.extractor.get_stain_matrix(I)
source_concentrations = get_concentrations(I, self.stain_matrix_source)
self.maxC_source = np.percentile(source_concentrations, 99, axis=0).reshape((1, 2))
def transform_tile(self, I):
source_concentrations = get_concentrations(I, self.stain_matrix_source)
source_concentrations *= (self.maxC_target / self.maxC_source)
tmp = 255 * np.exp(-1 * np.dot(source_concentrations, self.stain_matrix_target))
return tmp.reshape(I.shape).astype(np.uint8)
class IterativeNormaliser:
"""Iterative normalise each tile from a slide to a target using a selectable method
Normalisation methods include: 'none', 'reinhard', 'macenko' and 'vahadane'
Luminosity standardisation is also selectable
"""
def __init__(self, normalisation_method = 'vahadane', standardise_luminosity = True):
self.method = normalisation_method
self.standardise_luminosity = standardise_luminosity
# Instantiate normaliser and luminosity standardiser
if normalisation_method == 'none':
pass
elif normalisation_method == 'reinhard':
self.normaliser = ReinhardColorNormalizerIterative()
elif normalisation_method == 'macenko' or normalisation_method == 'vahadane':
self.normaliser = StainNormalizerIterative(normalisation_method)
if standardise_luminosity:
self.lum_std = LuminosityStandardizerIterative()
def fit_target(self, target_img):
if self.standardise_luminosity:
self.target_std = self.lum_std.standardize(np.array(target_img))
else:
self.target_std = np.array(target_img)
if self.method != 'none':
self.normaliser.fit_target(self.target_std)
def fit_source(self, source_img):
if self.standardise_luminosity:
self.lum_std.fit(np.array(source_img))
source_std = self.lum_std.standardize_tile(np.array(source_img))
else:
source_std =
|
np.array(source_img)
|
numpy.array
|
import os
import sys
if '..' not in sys.path:
print("pipeline.py: appending '..' to sys.path")
sys.path.append('..')
import numpy as np
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import pprint
import copy
import winsound
from tqdm import tqdm, trange
from collections import deque, defaultdict
from datetime import datetime
from classes.line import Line
from classes.plotdisplay import PlotDisplay
from common.utils import (sliding_window_detection , polynomial_proximity_detection,
offCenterMsg , curvatureMsg , colorLanePixels , displayPolynomial , displayRoILines,
displayDetectedRegion , displayText , displayGuidelines , displayPolySearchRegion,
display_one, display_two, display_multi )
from common.sobel import apply_thresholds, apply_perspective_transform, perspectiveTransform, erodeDilateImage
pp = pprint.PrettyPrinter(indent=2, width=100)
print(' Loading pipeline.py - cwd:', os.getcwd())
class VideoPipeline(object):
NAME = 'ALFConfig'
def __init__(self, cameraConfig, **kwargs):
self.camera = cameraConfig
self.height = self.camera.height
self.width = self.camera.width
self.camera_x = self.camera.width //2
self.camera_y = self.camera.height
self.debug = kwargs.get('debug' , False)
self.debug2 = kwargs.get('debug2' , False)
self.debug3 = kwargs.get('debug3' , False)
self.displayResults = kwargs.get('displayResults' , False)
self.displayFittingInfo = kwargs.get('displayFittingInfo' , False)
self.displayRealignment = kwargs.get('displayRealignment' , False)
self.overlayBeta = kwargs.get('overlayBeta' , 0.7)
self.ERODE_DILATE = kwargs.get('erode_dilate' , False)
self.mode = kwargs.get('mode' , 1)
self.POLY_DEGREE = kwargs.get('poly_degree' , 2)
self.MIN_POLY_DEGREE = kwargs.get('min_poly_degree' , 2)
self.MIN_X_SPREAD = kwargs.get('min_x_spread' , 90)
self.MIN_Y_SPREAD = kwargs.get('min_y_spread' , 350)
self.HISTORY = kwargs.get('history' , 8)
self.COMPUTE_HISTORY = kwargs.get('compute_history' , 2)
self.NWINDOWS = kwargs.get('nwindows' , 30)
self.HISTOGRAM_WIDTH_RANGE = kwargs.get('hist_width_range' , 600)
self.HISTOGRAM_DEPTH_RANGE = kwargs.get('hist_depth_range' , 2 * self.height // 3)
self.WINDOW_SRCH_MRGN = kwargs.get('window_search_margin' , 55)
self.INIT_WINDOW_SRCH_MRGN = kwargs.get('init_window_search_margin' , self.WINDOW_SRCH_MRGN)
self.MINPIX = kwargs.get('minpix' , 90)
self.MAXPIX = kwargs.get('maxpix' , 8000)
self.POLY_SRCH_MRGN = kwargs.get('poly_search_margin' , 45)
self.IMAGE_RATIO_HIGH_THRESHOLD = kwargs.get('image_ratio_high_threshold', 40)
self.IMAGE_RATIO_LOW_THRESHOLD = kwargs.get('image_ratio_low_threshold' , 2)
self.LANE_COUNT_THRESHOLD = kwargs.get('lane_count_threshold' , 4500)
self.LANE_RATIO_LOW_THRESHOLD = kwargs.get('lane_ratio_low_threshold' , 2)
self.LANE_RATIO_HIGH_THRESHOLD = kwargs.get('lane_ratio_high_threshold' , 60)
self.RSE_THRESHOLD = kwargs.get('rse_threshold' , 80)
self.PARALLEL_LINES_MARGIN = kwargs.get('parallel_lines_margin' , 70)
self.YELLOW_DETECTION_LIMIT = kwargs.get('yello_limit' , 25)
self.RED_DETECTION_LIMIT = kwargs.get('red_limit' , 50)
self.OFF_CENTER_ROI_THRESHOLD = kwargs.get('off_center_roi_threshold', 60)
self.CURRENT_OFFCTR_ROI_THR = np.copy(self.OFF_CENTER_ROI_THRESHOLD)
self.HISTOGRAM_SEARCH_RANGE = (self.camera_x - self.HISTOGRAM_WIDTH_RANGE, self.camera_x + self.HISTOGRAM_WIDTH_RANGE)
## Thresholding Parameters
self.HIGH_RGB_THRESHOLD = kwargs.get('high_rgb_threshold' , 255) # 180) # 220
self.MED_RGB_THRESHOLD = kwargs.get('med_rgb_threshold' , 255) # 180) # 175 ## chgd from 110 2-26-20
self.LOW_RGB_THRESHOLD = kwargs.get('low_rgb_threshold' , 255) # 100) # 175 ## chgd from 110 2-26-20
self.VLOW_RGB_THRESHOLD = kwargs.get('vlow_rgb_threshold' , 255) # 35) # 175 ## chgd from 110 2-26-20
self.XHIGH_SAT_THRESHOLD = kwargs.get('xhigh_sat_threshold' , 255) # 120) # 150
self.HIGH_SAT_THRESHOLD = kwargs.get('high_sat_threshold' , 255) # 65) # 150
self.LOW_SAT_THRESHOLD = kwargs.get('low_sat_threshold' , 255) # 20) # 20 ## chgd from 110 2-26-20
self.XHIGH_THRESHOLDING = kwargs.get('xhigh_thresholding' , 'cmb_mag_x')
self.HIGH_THRESHOLDING = kwargs.get('high_thresholding' , 'cmb_mag_x')
self.NORMAL_THRESHOLDING = kwargs.get('med_thresholding' , 'cmb_rgb_lvl_sat')
self.LOW_THRESHOLDING = kwargs.get('low_thresholding' , 'cmb_mag_xy')
self.VLOW_THRESHOLDING = kwargs.get('vlow_thresholding' , 'cmb_mag_xy')
self.HISAT_THRESHOLDING = kwargs.get('hisat_thresholding' , 'cmb_mag_x')
self.LOWSAT_THRESHOLDING = kwargs.get('lowsat_thresholding' , 'cmb_hue_x')
# self.DARK_THRESHOLDING = 'cmb_mag_x'
# self.lowsat_thresholding = 'cmb_rgb_lvl_sat_mag'
# self.NORMAL_THRESHOLDING = 'cmb_rgb_lvl_sat_mag_x'
## set threshold limits for various conditions
self.initialize_thresholding_parameters()
self.slidingWindowBootstrap = True
self.firstFrame = True
self.RoIAdjustment = False
self.validLaneDetections = False
self.imgThrshldHistory = []
self.imgCondHistory = []
self.imgAcceptHistory = []
self.imgAdjustHistory = []
self.diffsSrcDynPoints = []
self.offctr_history = []
self.imgPixelRatio = []
self.src_points_history = []
self.HLS_key = ['Hue', 'Lvl', 'Sat']
self.RGB_key = ['Red', 'Grn', 'Blu']
self.imgUndistStats = self.initImageInfoDict()
self.imgWarpedStats = self.initImageInfoDict()
self.ttlFullReject = 0
self.ttlSkipFrameDetect = 0
self.ttlRejectedFrames = 0
self.ttlAcceptedFrames = 0
self.ttlRejectedFramesSinceAccepted = 0
self.ttlAcceptedFramesSinceRejected = 0
## Parameters for perspective transformation source/destination points
self.y_src_top = kwargs.get('y_src_top' , 480) ## 460 -> 465 y_src_bot - 255
self.y_src_bot = kwargs.get('y_src_bot' , self.height) ## image.shape[0] - 20
self.RoI_x_adj = kwargs.get('RoI_x_adj' , 25)
self.lane_theta = kwargs.get('lane_theta' , 40) ## Lane Angle
self.x_bot_disp = kwargs.get('bot_x_disp' , 375)
self.x_dst_left = kwargs.get('x_dst_left' , 300)
self.x_dst_right = kwargs.get('x_dst_right' , 1000)
self.y_dst_top = kwargs.get('y_dst_top' , 0)
self.y_dst_bot = kwargs.get('y_dst_bot' , self.height - 1)
## Parameters indicating extent of detected region to be displayed on final image
self.displayRegionTop = kwargs.get('displayRegionTop' , self.y_src_top)
self.displayRegionBot = kwargs.get('displayRegionBot' , self.y_src_bot)
print(' y_src_bot: ', self.y_src_bot, ' displayRegionBot : ', self.displayRegionBot)
self.src_points_list, self.src_points = self.build_source_RoI_region()
self.dst_points_list, self.dst_points = self.build_dest_RoI_region()
self.prev_src_points_list = copy.copy(self.src_points_list)
## Destination points for Perspective Transform
self.curvature_y_eval = self.y_src_bot
self.offCenter_y_eval = self.y_src_bot
self.np_format = {
'float' : lambda x: "%7.2f" % x,
'int' : lambda x: "%5d" % x
}
np.set_printoptions(linewidth=195, precision=4, floatmode='fixed', threshold =500, formatter = self.np_format)
self.LeftLane = Line(name = 'Left', history = self.HISTORY, compute_history = self.COMPUTE_HISTORY,
poly_degree = self.POLY_DEGREE, min_poly_degree = self.MIN_POLY_DEGREE,
min_x_spread = self.MIN_X_SPREAD, min_y_spread = self.MIN_Y_SPREAD,
height = self.height, y_src_top = self.y_src_top, y_src_bot = self.y_src_bot,
rse_threshold = self.RSE_THRESHOLD)
self.RightLane= Line(name = 'Right', history = self.HISTORY, compute_history = self.COMPUTE_HISTORY,
poly_degree = self.POLY_DEGREE, min_poly_degree = self.MIN_POLY_DEGREE,
min_x_spread = self.MIN_X_SPREAD, min_y_spread = self.MIN_Y_SPREAD,
height = self.height, y_src_top = self.y_src_top, y_src_bot = self.y_src_bot,
rse_threshold = self.RSE_THRESHOLD)
print(' Pipeline initialization complete...')
def initImageInfoDict(self):
plots_dict = {}
plots_dict.setdefault('RGB', [])
plots_dict.setdefault('HLS', [])
for key1 in self.RGB_key + self.HLS_key : ## ['Hue', 'Lvl', 'Sat', 'Red', 'Grn', 'Blu', 'RGB']:
plots_dict.setdefault(key1, [])
return plots_dict
def saveImageStats(self, image, imageDict):
imgHLS = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
imageDict['RGB'].append(np.round(image.mean(),0))
imageDict['HLS'].append(np.round(imgHLS.mean(),0))
img_RGB_Avgs = np.round(image.mean(axis=(0,1)),0)
img_HLS_Avgs = np.round(imgHLS.mean(axis=(0,1)),0)
for i,key in enumerate(self.RGB_key):
imageDict[key].append(img_RGB_Avgs[i])
for i,key in enumerate(self.HLS_key):
imageDict[key].append(img_HLS_Avgs[i])
def process_one_frame(self, **kwargs):
self.debug = kwargs.get('debug' , True)
self.debug2 = kwargs.get('debug2' , True)
self.debug3 = kwargs.get('debug3' , False)
read_next = kwargs.get('read_next', True)
size = kwargs.get('size', (15,7))
show = kwargs.get('show', True)
# display = kwargs.get('display', True)
self.displayResults = kwargs.get('displayResults' , self.displayResults )
self.displayFittingInfo = kwargs.get('displayFittingInfo', self.displayFittingInfo)
self.displayRealignment = kwargs.get('displayRealignment', self.displayRealignment)
# print(kwargs)
# print(f' displayFittingInfo: {self.displayFittingInfo} displayRealignment:{self.displayRealignment} displayResults:{self.displayResults}')
if read_next:
rc1= self.inVideo.getNextFrame()
else:
rc1 = True
if rc1:
outputImage, disp = self(displayResults = self.displayResults,
displayFittingInfo = self.displayFittingInfo,
displayRealignment = self.displayRealignment,
debug = self.debug, debug2 = self.debug2, debug3 = self.debug3)
self.outVideo.saveFrameToVideo(outputImage, debug = False)
# _ = display_one(outputImage, size=size, title = self.frameTitle)
else:
outputImage, disp = None, None
winsound.MessageBeep(type=winsound.MB_ICONHAND)
return (outputImage, disp)
def process_frame_range(self, toFrame, **kwargs):
self.debug = kwargs.get('debug' , False)
self.debug2 = kwargs.get('debug2' , False)
self.debug3 = kwargs.get('debug3' , False)
display = kwargs.get('display', False)
disp_interval = kwargs.get('disp_interval', 50)
size = kwargs.get('size', (15,5))
show = kwargs.get('show', True)
self.displayResults = kwargs.get('displayResults' , self.displayResults )
self.displayFittingInfo = kwargs.get('displayFittingInfo', self.displayFittingInfo)
self.displayRealignment = kwargs.get('displayRealignment', self.displayRealignment)
if toFrame > self.inVideo.ttlFrames:
toFrame = self.inVideo.ttlFrames
print(' displayFittingInfo: ', self.displayFittingInfo, ' displayRealignment:', self.displayRealignment, ' displayResults: ', self.displayResults)
print(' Process frames : {} to: {} of {} frames'.format(self.inVideo.currFrameNum, toFrame, self.inVideo.ttlFrames), flush= True)
rc1 = True
# progress_bar = tqdm( range(self.inVideo.currFrameNum, toFrame), unit=' frames ',
# initial = self.inVideo.currFrameNum) ## , postfix={'loss':cost_np, 'acc': accuracy_np})
# for i in progress_bar:
# for i in trange(self.inVideo.currFrameNum, toFrame):
while self.inVideo.currFrameNum < toFrame and rc1:
rc1 = self.inVideo.getNextFrame()
if rc1:
output, disp = self(displayResults = self.displayResults,
displayFittingInfo = self.displayFittingInfo,
displayRealignment = self.displayRealignment,
debug = self.debug, debug2 = self.debug2, debug3 = self.debug3)
self.outVideo.saveFrameToVideo(output, debug = self.debug)
else:
break
if (self.inVideo.currFrameNum % disp_interval == 0):
print(f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} - Completed {self.inVideo.currFrameNum} frames ")
if show : ## or (110 <=Pipeline.inVideo.currFrameNum <=160) :
display_two(self.prevBestFit, self.imgLanePxls, size = (15,5),
title1 = 'Prev best fit (Cyan: Prev fit, Yellow: New proposal)' ,
title2 = 'ImgLanePxls (Cyan: Prev fit, Yellow: New proposal, Fuschia: New Best Fit)' )
display_one(output, size= size, title = self.inVideo.frameTitle)
print('Finshed - Current frame number :', self.inVideo.currFrameNum)
return
def __call__(self, **kwargs ):
'''
'''
self.debug = kwargs.get('debug' , False)
self.debug2 = kwargs.get('debug2', False)
self.debug3 = kwargs.get('debug3', False)
self.debug4 = kwargs.get('debug4', False)
self.displayResults = kwargs.get('displayResults' , self.displayResults )
self.displayFittingInfo = kwargs.get('displayFittingInfo', self.displayFittingInfo)
self.displayRealignment = kwargs.get('displayRealignment', self.displayRealignment)
self.exit = kwargs.get('exit' , 0)
self.mode = kwargs.get('mode' , self.mode)
self.slidingWindowBootstrap = kwargs.get('slidingWindowBootstrap' , self.slidingWindowBootstrap)
self.image = self.inVideo.image
self.frameTitle = self.inVideo.frameTitle
self.resultExtraInfo = None
###----------------------------------------------------------------------------------------------
### PIPELINE
###----------------------------------------------------------------------------------------------
self.imgUndist = self.camera.undistortImage(self.image)
self.saveImageStats(self.imgUndist, self.imgUndistStats)
self.src_points_history.append(self.src_points)
self.imgWarped, self.M , self.Minv = perspectiveTransform(self.imgUndist, self.src_points, self.dst_points, debug = self.debug4)
self.saveImageStats(self.imgWarped, self.imgWarpedStats)
self.imgRoI = displayRoILines(self.imgUndist, self.src_points_list, thickness = 2)
self.imgRoIWarped, _, _ = perspectiveTransform(self.imgRoI , self.src_points , self.dst_points)
self.imgRoIWarped = displayRoILines(self.imgRoIWarped , self.dst_points_list, thickness = 2, color = 'yellow')
###----------------------------------------------------------------------------------------------
### Select image to process based on MODE parameter, and select thrsholding parameters
###----------------------------------------------------------------------------------------------
self.set_thresholding_parms()
###----------------------------------------------------------------------------------------------
### Debug Info
###----------------------------------------------------------------------------------------------
if self.debug:
self.debugInfo_ImageInfo()
self.debugInfo_ImageSummaryInfo()
self.debugInfo_srcPointsRoI(title= 'Perspective Tx. source points')
###----------------------------------------------------------------------------------------------
### Apply thresholding and Warping of thresholded images
###----------------------------------------------------------------------------------------------
if self.mode == 1:
self.image_to_threshold = self.imgUndist
else:
self.image_to_threshold = self.imgWarped
outputs = apply_thresholds(self.image_to_threshold, self.thresholdParms)
if self.mode == 1:
warped_outputs = apply_perspective_transform(outputs, self.thresholdStrs, self.src_points, self.dst_points,
size = (15,5), debug = self.debug)
self.working_image = warped_outputs[self.thresholdMethod]
self.imgThrshld = outputs[self.thresholdMethod]
else:
self.working_image = outputs[self.thresholdMethod]
self.imgThrshld = outputs[self.thresholdMethod]
# display_one(self.imgThrshld, size=(15,7), title = 'imgThrshld')
# display_two(self.imgThrshld, self.working_image, title1 = 'imgThrshld', title2 = 'working_image')
# if self.exit == 1:
# return self.imgThrshld, None
###----------------------------------------------------------------------------------------------
## if ERODE_DILATE flag is True, erode/dilate thresholded image
###----------------------------------------------------------------------------------------------
# if self.+mode == 1: ### Warped AFTER thresholding
# self.post_threshold, _, Minv = perspectiveTransform(self.imgThrshld, self.src_points, self.dst_points, debug = self.debug4)
# else: ### Warped BEFORE thresholding
# self.post_threshold = self.imgThrshld
# if self.ERODE_DILATE:
# self.working_image = erodeDilateImage(self.post_threshold , ksize = 3, iters = 3)
# else:
# self.working_image = self.post_threshold
# self.working_image = self.post_threshold
###----------------------------------------------------------------------------------------------
## INTERMEDIATE DEBUG DISPLAYS
###----------------------------------------------------------------------------------------------
# if debug and displayResults: ## and self.mode == 2:
if self.debug:
self.debugInfo_ThresholdedImage()
###----------------------------------------------------------------------------------------------
### Find lane pixels
###----------------------------------------------------------------------------------------------
if self.slidingWindowBootstrap:
window_search_margin = self.INIT_WINDOW_SRCH_MRGN if self.firstFrame else self.WINDOW_SRCH_MRGN
reset_search_base = (self.firstFrame or self.imgAcceptHistory[-1] < -10)
if self.RoIAdjustment :
reset_search_base = False
self.out_img, self.histogram, self.detStats = sliding_window_detection(self.working_image,
self.LeftLane, self.RightLane,
nwindows = self.NWINDOWS,
histWidthRange = self.HISTOGRAM_WIDTH_RANGE,
histDepthRange = self.HISTOGRAM_DEPTH_RANGE,
search_margin = window_search_margin,
reset_search_base = reset_search_base,
debug = self.debug,
debug2 = self.debug2)
else:
self.out_img, self.histogram, self.detStats = polynomial_proximity_detection(self.working_image,
self.LeftLane, self.RightLane,
search_margin = self.POLY_SRCH_MRGN,
debug = self.debug)
if self.debug:
self.debugInfo_LaneDetInfo()
self.assess_lane_detections()
# if self.exit == 2:
# return self.out_img, None
###----------------------------------------------------------------------------------------------
### Fit polynomial on found lane pixels
###----------------------------------------------------------------------------------------------
for Lane in [self.LeftLane, self.RightLane]:
Lane.fit_polynomial(debug = self.debug)
self.assess_fitted_polynomials()
if self.debug:
self.debugInfo_DetectedLanes(display=0, size = (15,5))
if self.displayFittingInfo:
self.debugInfo_displayFittingInfo()
###----------------------------------------------------------------------------------------------
### Build output image frame
###----------------------------------------------------------------------------------------------
self.build_result_image()
###----------------------------------------------------------------------------------------------
### Determine if an adjustment of the Perspective transformation window is necessary and if so,
### adjust the SRC_POINTS_LIST and/or DST_POINTS_LIST accordingly
###----------------------------------------------------------------------------------------------
self.adjust_RoI_window()
###----------------------------------------------------------------------------------------------
### All done - build display results if requested
###----------------------------------------------------------------------------------------------
if self.displayResults:
self.build_display_results()
if self.firstFrame :
self.firstFrame = False
return self.resultImage, self.resultExtraInfo
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def assess_lane_detections(self):
imgPixelRatio, self.NztoSrchNzRatio, self.NztoImageNzRatio, ttlImageNZPixels, ttlLaneNZPixels = self.detStats
self.imgPixelRatio.append(imgPixelRatio)
lower_nz_pxl_cnt = round(np.sum(self.working_image[480:,:]) * 100/(self.height*self.width//3),2)
if self.debug:
print()
print('assess_lane_detections()')
print('-'*40)
print(' Lower image non_zero pixel ratio: %{:8.2f}'.format(lower_nz_pxl_cnt))
print(' (Image NZ pixels to Total Pixels in image) imgPixelRatio : %{:8.2f} \n'\
' (Detected NZ Pixels to All pixels in Search Region) NZtoSrNZRatio : %{:8.2f} \n' \
' (Detected NZ Pixels to All NZ Pixels in image) NztoTtlNzRatio: %{:8.2f}'.format(
imgPixelRatio , self.NztoSrchNzRatio , self.NztoImageNzRatio ))
print()
msgs = []
image_conditions = []
##------------------------------------------------------------------------------------------
## Frame / Lane detection Quality checks
##------------------------------------------------------------------------------------------
for Lane in [self.LeftLane, self.RightLane]:
if (Lane.pixelCount[-1] < self.LANE_COUNT_THRESHOLD):
image_conditions.append(10)
msgs.append(' *** (10) {:5s} Lane pixel count under threshold - Pxl Count: {:7.0f} < Count Threshold: ({:4d}) '.format(
Lane.name, Lane.pixelCount[-1], self.LANE_COUNT_THRESHOLD))
Lane.goodLaneDetection = False
elif (Lane.pixelRatio[-1] < self.LANE_RATIO_LOW_THRESHOLD):
image_conditions.append(11)
msgs.append(' *** (11) {:5s} Lane pixel ratio under threshold - Pxl Ratio: {:7.3f} < Ratio Threshold: ({:7.3f}) '\
' Pxl Count: {:7.0f} - Count Threshold: ({:4d})'.format(Lane.name,
Lane.pixelRatio[-1], self.LANE_RATIO_LOW_THRESHOLD, Lane.pixelCount[-1], self.LANE_COUNT_THRESHOLD))
Lane.goodLaneDetection = False
elif (Lane.pixelRatio[-1] > self.LANE_RATIO_HIGH_THRESHOLD) and \
(self.NztoImageNzRatio < 30):
image_conditions.append(12)
msgs.append(' *** (12) {:5s} Lane pxl ratio > threshold - Pxl Ratio: {:7.3f} > Ratio Threshold: ({:7.3f}) '\
' Det Nz to Ttl Nz Ratio: ({:7.3f})'.format(Lane.name,
Lane.pixelRatio[-1], self.LANE_RATIO_HIGH_THRESHOLD, self.NztoImageNzRatio))
Lane.goodLaneDetection = False
else:
Lane.goodLaneDetection = True
##------------------------------------------------------------------------------------------
## Frame Level Quality checks
##------------------------------------------------------------------------------------------
self.frameGoodQuality = True
self.bothLanesPixelRatio = self.LeftLane.pixelRatio[-1] + self.RightLane.pixelRatio[-1]
if self.imgPixelRatio[-1] > self.IMAGE_RATIO_HIGH_THRESHOLD: ## self.IMAGE_RATIO_HIGH_THRESHOLD:
image_conditions.append(20)
msgs.append(' *** (20) imgPixelRatio: ratio of non-zero pixels in image {} > image ratio HIGH threshold {}'.
format(self.imgPixelRatio[-1], self.IMAGE_RATIO_HIGH_THRESHOLD))
self.frameGoodQuality = False
if self.imgPixelRatio[-1] < self.IMAGE_RATIO_LOW_THRESHOLD:
image_conditions.append(21)
msgs.append(' *** (21) imgPixelRatio: ratio of non-zero pixels in image {} < image ratio LOW threshold {}'.
format(self.imgPixelRatio[-1], self.IMAGE_RATIO_LOW_THRESHOLD))
if self.bothLanesPixelRatio < self.IMAGE_RATIO_LOW_THRESHOLD:
image_conditions.append(30)
msgs.append(' *** (30) bothLanesPixelRatio: Left+Right non-zero pixel ratio {} < image ratio LOW threshold {}.'.
format(self.bothLanesPixelRatio, self.IMAGE_RATIO_LOW_THRESHOLD))
# if self.bothLanesPixelRatio > self.IMAGE_RATIO_HIGH_THRESHOLD:
# image_conditions.append(31)
# msgs.append(' *** (31) bothLanesPixelRatio: Left+Right non-zero pixel ratio {} > image ratio HIGH threshold {}.'.
# format(self.bothLanesPixelRatio, self.IMAGE_RATIO_LOW_THRESHOLD))
if (lower_nz_pxl_cnt > 45 ):
image_conditions.append(40)
msgs.append(' *** (31) Warped image lower 1/3 non-zero pixel count {} > 45 '.format(lower_nz_pxl_cnt))
self.frameGoodQuality = False
if (self.imgWarpedStats['RGB'][-1]> self.HIGH_RGB_THRESHOLD) and (self.imgWarpedStats['Sat'][-1] > self.XHIGH_SAT_THRESHOLD):
image_conditions.append(40)
msgs.append(' *** (40) Warped Image High Mean RGB {} / Mean SAT {} '.
format(self.imgWarpedStats['RGB'][-1], self.imgWarpedStats['Sat'][-1]))
self.frameGoodQuality = False
self.goodLaneDetections = (self.LeftLane.goodLaneDetection and self.RightLane.goodLaneDetection)
self.imgCondHistory.append(image_conditions)
if self.debug:
print(' Image conditions: ', image_conditions)
for msg in msgs:
print(msg)
print()
print(' left Pxl Count: {:7.0f} or right Pxl Count: {:7.0f} - LANE_COUNT_THRESHOLD : {:7.0f} '.
format(self.LeftLane.pixelCount[-1], self.RightLane.pixelCount[-1], self.LANE_COUNT_THRESHOLD))
print(' left Pxl Ratio: {:7.2f} or right Pxl Ratio: {:7.2f} - LANE RATIO LOW THRSHLD: {:7.2f} HIGH THRSHLD {:7.2f}'.
format(self.LeftLane.pixelRatio[-1], self.RightLane.pixelRatio[-1],
self.LANE_RATIO_LOW_THRESHOLD, self.LANE_RATIO_HIGH_THRESHOLD))
print(' Image NZ pixel ratio (imgPixelRatio) : {:7.2f} - IMG RATIO LOW THRSHLD: {:7.2f} HIGH THRSHLD {:7.2f}'.
format(self.imgPixelRatio[-1], self.IMAGE_RATIO_LOW_THRESHOLD, self.IMAGE_RATIO_HIGH_THRESHOLD))
# print(' Left+Right : %{:7.2f} imgPixelRatio: %{:7.2f} '.
# format(self.bothLanesPixelRatio, self.imgPixelRatio[-1] ))
print(' L+R NZ pixel ratio (bothLanesPixelRatio) : {:7.2f} - IMG RATIO LOW THRSHLD: {:7.2f} HIGH THRSHLD {:7.2f}'.
format(self.bothLanesPixelRatio, self.IMAGE_RATIO_LOW_THRESHOLD, self.IMAGE_RATIO_HIGH_THRESHOLD))
print(' imgWarped stats RGB: {:7.2f} SAT: {:7.2f} HIGH_RGB_THRSHLD: {:7.2f} '\
' HIGH_SAT_THRSHLD {:7.2f} EXTRA HIGH_SAT_THRSHLD {:7.2f}'.
format(self.imgWarpedStats['RGB'][-1], self.imgWarpedStats['Sat'][-1],
self.HIGH_RGB_THRESHOLD, self.HIGH_SAT_THRESHOLD, self.XHIGH_SAT_THRESHOLD))
print()
print(' Lane Detections Results - Left: {} Right: {} goodLaneDetections: {} frameGoodQuality: {}'.format(
str(self.LeftLane.goodLaneDetection).upper(), str(self.RightLane.goodLaneDetection).upper(),
str(self.goodLaneDetections).upper() , str(self.frameGoodQuality).upper() ))
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def assess_fitted_polynomials(self):
if self.debug:
print()
print('assess_fitted_polynomials()')
print('-'*40)
### Individual lane assessments
for Lane in [self.LeftLane, self.RightLane]:
if (self.slidingWindowBootstrap and self.RoIAdjustment):
## Realignment of the perspective transformation window will reuslt in a
## High RSE Error. We will allow this error rate when it is a result of a
## RoI realignment. Other wise proceed nornally.
Lane.acceptPolynomial = True
Lane.reset_best_fit(debug = self.debug)
msg2 = ' {:5s} lane fitted polynomial - RoIAdjustment performed - Polynomial fit will be accepted \n'.format(Lane.name)
elif (Lane.goodLaneDetection and self.frameGoodQuality):
Lane.acceptPolynomial = True
# Lane.reset_best_fit(debug = self.debug)
msg2 = ' {:5s} lane fitted polynomial - acceptPolynomial: {} (GoodLaneDetection: {} & frameGoodQuality: {})'.format(
Lane.name, Lane.acceptPolynomial, Lane.goodLaneDetection, self.frameGoodQuality)
# elif not (Lane.goodLaneDetection and self.frameGoodQuality):
# Lane.acceptPolynomial = False
# msg2 = ' {:5s} lane fitted polynomial - acceptPolynomial: {} (GoodLaneDetection: {} & frameGoodQuality: {})'.format(
# Lane.name, Lane.acceptPolynomial, Lane.goodLaneDetection, self.frameGoodQuality)
elif Lane.curve_spread_x > (2 * Lane.pixel_spread_x):
Lane.acceptPolynomial = False
msg2 = ' {:5s} lane fitted polynomial x spread {} > 2*PixelSpread {} '.format(
Lane.name, Lane.curve_spread_x, (2 * Lane.pixel_spread_x))
elif not (Lane.goodLaneDetection):
Lane.acceptPolynomial = False
msg2 = ' {:5s} lane fitted polynomial - acceptPolynomial: {} (GoodLaneDetection: {} & frameGoodQuality: {})'.format(
Lane.name, Lane.acceptPolynomial, Lane.goodLaneDetection, self.frameGoodQuality)
else:
Lane.acceptPolynomial = True if (Lane.RSE < Lane.RSE_THRESHOLD) else False
msg2 = ' {:5s} lane fitted polynomial - acceptPolynomial: {}'.format(Lane.name, Lane.acceptPolynomial)
if self.debug :
print(msg2)
### Joint Lane assessments
if (self.LeftLane.acceptPolynomial ^ self.RightLane.acceptPolynomial) and (self.goodLaneDetections):
self.compareLanes()
for Lane in [self.LeftLane, self.RightLane]:
if Lane.acceptPolynomial:
Lane.acceptFittedPolynomial(debug = self.debug, debug2 = self.debug2)
else:
Lane.rejectFittedPolynomial(debug = self.debug, debug2 = self.debug2)
### Frame level actions that need to be taken based on acceptance or rejection of polynomials
self.acceptPolynomials = self.LeftLane.acceptPolynomial and self.RightLane.acceptPolynomial and self.frameGoodQuality
fullReject = not (self.LeftLane.acceptPolynomial or self.RightLane.acceptPolynomial or self.frameGoodQuality)
# red_status = not ((self.LeftLane.acceptPolynomial ^ self.RightLane.acceptPolynomial) ^ self.frameGoodQuality)
# yellow_status = not red_status
if self.acceptPolynomials: ## everything good
self.ttlAcceptedFrames += 1
self.ttlRejectedFramesSinceAccepted = 0
self.ttlAcceptedFramesSinceRejected += 1
self.validLaneDetections = True
self.polyRegionColor1 = 'green'
self.slidingWindowBootstrap = False
acceptCode = 0
elif fullReject: ## everything bad
self.ttlFullReject += 1
self.ttlRejectedFramesSinceAccepted = 0
self.ttlAcceptedFramesSinceRejected = 0
self.slidingWindowBootstrap = False
self.validLaneDetections = False
self.polyRegionColor1 = 'lightgray'
acceptCode = -40
else:
self.ttlRejectedFrames += 1
self.ttlAcceptedFramesSinceRejected = 0
self.ttlRejectedFramesSinceAccepted += 1
self.validLaneDetections = True
# self.slidingWindowBootstrap = True if self.frameGoodQuality else False
# doesnt work well in YELLOW conditions.
if self.ttlRejectedFramesSinceAccepted < self.YELLOW_DETECTION_LIMIT:
self.slidingWindowBootstrap = False
self.polyRegionColor1 = 'yellow'
acceptCode = -10
else:
#
self.slidingWindowBootstrap = True if self.frameGoodQuality else False
self.polyRegionColor1 = 'red'
if self.ttlRejectedFramesSinceAccepted < self.RED_DETECTION_LIMIT:
acceptCode = -20
else:
# self.polyRegionColor1 = 'lightgray'
acceptCode = -30
self.imgAcceptHistory.append(acceptCode)
### Display debug info
if self.debug:
print()
for lane in [self.LeftLane, self.RightLane]:
if lane.acceptPolynomial:
print('=> {:5s} Lane ACCEPT polynomial - Accepted frames Since Last Rejected: {:4d}'.format(
lane.name, lane.ttlAcceptedFramesSinceRejected))
else:
print('=> {:5s} Lane REJECT polynomial - Rejected frames Since Last Detected: {:4d}'.format(
lane.name, lane.ttlRejectedFramesSinceDetected))
print()
print('=> acceptPolynomials: {} frameGoodQuality: ({})'.format(
str(self.acceptPolynomials).upper(), str(self.frameGoodQuality).upper() ))
print(' slidingWindowBootstrap: {} validLaneDetections: {} acceptCode: {} displayColor: {} '.format(
self.slidingWindowBootstrap, self.validLaneDetections, acceptCode, self.polyRegionColor1 ))
print(' Total Accepted sinceLast Rejected: {:3d} Rejected since Last Accepted: {:3d} \n'.format(
self.ttlAcceptedFramesSinceRejected, self.ttlRejectedFramesSinceAccepted ))
self.debugInfo_DetectedLanes()
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def compareLanes(self, **kwargs):
left_ckpts = self.LeftLane.best_linepos if self.LeftLane.acceptPolynomial else self.LeftLane.current_linepos
right_ckpts = self.RightLane.best_linepos if self.RightLane.acceptPolynomial else self.RightLane.current_linepos
diff = right_ckpts - left_ckpts
min_diff = np.round(diff.min(),0)
max_diff = np.round(diff.max(),0)
diff_spread = round(max_diff - min_diff,0)
diff_meters = np.round((np.array(right_ckpts)- np.array(left_ckpts))*self.LeftLane.MX,3)
min_diff_meters = np.round(diff_meters.min(),3)
max_diff_meters = np.round(diff_meters.max(),3)
diff_spread_meters = round(max_diff_meters - min_diff_meters,3)
rejectedLane = self.LeftLane if self.RightLane.acceptPolynomial else self.RightLane
acceptedLane = self.RightLane if self.RightLane.acceptPolynomial else self.LeftLane
print()
print('compareLanes()')
print(' ', self.LeftLane.y_checkpoints)
print(' left_ckpts :', left_ckpts )
print(' right_ckpts :', right_ckpts)
print(' diff (pixels) :', diff , 'Min: ', min_diff, ' Max: ', max_diff, ' spread:', diff_spread)
print(' diff (meters) :', diff_meters , 'Min: ', min_diff_meters, ' Max: ', max_diff_meters, ' spread:', diff_spread_meters)
if diff_spread < self.PARALLEL_LINES_MARGIN:
print()
print(' Spread between accepted lane ({}) and rejected lane ({}) is less than {} pixels - rejected lane will be accepted'.format(
acceptedLane.name, rejectedLane.name, self.PARALLEL_LINES_MARGIN))
print()
rejectedLane.acceptPolynomial = True
rejectedLane.reset_best_fit(debug = self.debug)
return
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def build_result_image(self, **kwargs):
disp_start = kwargs.get('start' , self.displayRegionTop)
disp_end = kwargs.get('end' , self.displayRegionBot)
polyRegionColor1 = kwargs.get('polyRegionColor1', 'green')
min_radius = min(self.LeftLane.radius_history[-1], self.RightLane.radius_history[-1])
min_radius_avg = min(self.LeftLane.radius_avg, self.RightLane.radius_avg)
if 100 <= min_radius_avg < 125:
disp_start += 25
elif 125 <= min_radius_avg < 200:
disp_start += 15
elif 200 <= min_radius_avg < 250:
disp_start += 15
elif 250 <= min_radius_avg < 300:
disp_start += 15
elif 300 <= min_radius_avg < 350:
disp_start += 10
elif 350 <= min_radius_avg < 400:
disp_start += 10
elif 400 <= min_radius_avg < 450:
disp_start += 5
elif 450 <= min_radius_avg < 500:
disp_start += 0
else: ## if min_radius_avg > 250:
disp_start += 0
if self.debug:
print('buildResultImage()')
print('-'*15)
print(' Hist LLane : ', [round(i,3) for i in self.LeftLane.radius_history[-10:]] )
print(' Hist RLane : ', [round(i,3) for i in self.RightLane.radius_history[-10:]])
# 0 print('Radius Diff History (m) : ', ['{:8.3f}'.format(i-j) for i,j in zip(RLane.radius, LLane.radius)])
print(' Avg LLane : [-5:] : {:8.0f} [-10:] : {:8.0f} '.format(self.LeftLane.radius_avg,
np.round(np.mean( self.LeftLane.radius_history[-10:]),3)))
print(' Avg RLane : [-5:] : {:8.0f} [-10:] : {:8.0f} '.format(self.RightLane.radius_avg,
np.round(np.mean(self.RightLane.radius_history[-10:]),3)))
print(' Original disp_start : {:8d} end: {:8d} '.format(self.displayRegionTop, self.displayRegionBot))
print(' Min avg radius: {:8.0f}'.format( min_radius_avg))
print(' Modified disp start : {:8d} end: {:8d}'.format(disp_start, disp_end))
self.curv_msg = curvatureMsg(self.LeftLane , self.RightLane, debug = self.debug2)
self.oc_msg = offCenterMsg(self.LeftLane , self.RightLane, self.camera_x, debug = self.debug2)
thr_msg = '{:5s} - {:22s}'.format(self.Conditions.upper(), self.thresholdMethod)
stat_msg = 'RGB: {:3.0f} Hue:{:3.0f} SAT: {:3.0f}'.format(self.imgWarpedStats['RGB'][-1],
self.imgWarpedStats['Hue'][-1], self.imgWarpedStats['Sat'][-1])
# if self.validLaneDetections:
# pass
# else:
# beta = 0.3
if True:
self.resultImage, self.dyn_src_points_list = displayDetectedRegion(self.imgUndist,
self.LeftLane.fitted_best ,
self.RightLane.fitted_best,
self.Minv,
disp_start = disp_start,
disp_end = disp_end ,
alpha = 0.7,
beta = self.overlayBeta ,
color = self.polyRegionColor1,
frameTitle = self.frameTitle,
debug = self.debug2)
# else:
# self.resultImage = np.copy(self.imgUndist)
displayText(self.resultImage, 40, 40, self.frameTitle, fontHeight = 20)
if self.validLaneDetections:
displayText(self.resultImage, 40, 80, self.curv_msg , fontHeight = 20)
displayText(self.resultImage, 40,120, self.oc_msg , fontHeight = 20)
else:
displayText(self.resultImage, 40, 80, 'Unable to detect lanes' , fontHeight = 20)
displayText(self.resultImage, 850, 40, thr_msg , fontHeight = 20)
displayText(self.resultImage, 850, 80, stat_msg , fontHeight = 20)
# displayGuidelines(self.resultImage, draw = 'y');
return
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def adjust_RoI_window(self, **kwargs):
'''
Adjust the perspective transformation source points based on predefined criteria
'''
# min_radius = min(self.LeftLane.radius[-1], self.RightLane.radius[-1])
### Build output image frame
mid_point_pixels = self.LeftLane.line_base_pixels[-1] + (self.RightLane.line_base_pixels[-1] -self.LeftLane.line_base_pixels[-1]) / 2
off_center_pixels = round(self.camera_x - mid_point_pixels,0)
self.offctr_history.append(off_center_pixels)
self.dyn_src_points = np.array(self.dyn_src_points_list, dtype = np.float32)
diffs = [abs(i[0] - j[0]) for i,j in zip(self.src_points_list[:2], self.dyn_src_points_list[:2])]
max_diffs = max(diffs)
self.diffsSrcDynPoints.append(max_diffs)
if self.debug:
# np.set_printoptions(linewidth=195, precision=4, floatmode='fixed', threshold =500, formatter = self.np_format)
print()
print('adjust_RoI_window() - FirstFrame:', self.firstFrame, ' AcceptPolynomial:', self.acceptPolynomials )
print('-'*65)
print(' x_base : Left: {:8.2f} Right: {:8.2f} '.format( self.LeftLane.x_base[-1], self.RightLane.x_base[-1]))
print(' Image Pixel Ratios : Left: {:8.2f} Right: {:8.2f} Total: {:8.2f}'.format(
self.LeftLane.pixelRatio[-1], self.RightLane.pixelRatio[-1], self.imgPixelRatio[-1]))
# print(' Min last radius : {:7.0f}'.format( min_radius))
# print(' Left radius : {:7.2f} History: {} '.format(self.LeftLane.radius[-1], self.LeftLane.radius[-10:]))
# print(' Right radius : {:7.2f} History: {} '.format(self.RightLane.radius[-1], self.RightLane.radius[-10:]))
# print()
print(' off center pixels : {:7.2f} History: {} '.format(off_center_pixels, self.offctr_history[-10:]))
print(' diff(dyn_src, src) : {:7.2f} History: {} '.format(max_diffs, self.diffsSrcDynPoints[-10:]))
print(' Pixel ratio - Left : {:7.2f} History: {} '.format( self.LeftLane.pixelRatio[-1], self.LeftLane.pixelRatio[-10:]))
print(' Pixel ratio - Right : {:7.2f} History: {} '.format(self.RightLane.pixelRatio[-1], self.RightLane.pixelRatio[-10:]))
print(' Pixel ratio - Image : {:7.2f} History: {} '.format(self.imgPixelRatio[-1], self.imgPixelRatio[-10:]))
print()
print(' src_points_list : {} '.format(self.src_points_list))
print(' dyn_src_points_list : {} '.format(self.dyn_src_points_list))
print(' diffs : {} '.format(diffs))
print()
if self.displayRealignment or self.debug:
print(' Perspective transform source points - OffCtr Pxls: {} max source point diff: {} OffCtr Threshold: {} imgPxlRatio: {} acceptCode: {}'.format(
off_center_pixels, max_diffs, self.OFF_CENTER_ROI_THRESHOLD, self.imgPixelRatio[-1], self.imgAcceptHistory[-1]))
###----------------------------------------------------------------------------------------------
# if quality of last image threshold is > %80 and we need to run a bootstrap, set up to do so in
# next video frame
###----------------------------------------------------------------------------------------------
if (self.acceptPolynomials) and \
(( max_diffs >= self.OFF_CENTER_ROI_THRESHOLD )) :
# or (self.firstFrame)):
# ( ( max_diffs > self.CURRENT_OFFCTR_ROI_THR ) or (self.firstFrame)):
if self.displayRealignment or self.debug:
print()
print(' Adjust perspective transform source points - OffCtr Pxls: {} max_diffs: {} imgPxlRatio: {} '.format(
off_center_pixels, max_diffs, self.imgPixelRatio[-1]))
print(' ','-'*100)
print(' Cur src_points_list : {} '.format(self.src_points_list))
print()
print(' New src_points_list : {} '.format(self.dyn_src_points_list))
print(' Prev Left x_base : ', self.LeftLane.x_base[-2], ' Right x_base :', self.RightLane.x_base[-2])
print(' New Left x_base : ', self.LeftLane.x_base[-1], ' Right x_base :', self.RightLane.x_base[-1])
print()
self.debugInfo_srcPointsRoI(title= 'source points prior to realignment')
self.debugInfo_newSrcPointsRoI(title= 'new source points after realignment')
self.prev_src_points_list = self.src_points_list
self.src_points_list = self.dyn_src_points_list
self.src_points = np.array(self.dyn_src_points_list, dtype = np.float32)
self.slidingWindowBootstrap = True
self.RoIAdjustment = True
self.imgAdjustHistory.append((len(self.offctr_history), self.offctr_history[-1], self.diffsSrcDynPoints[-1]))
# self.LeftLane.x_base.append (self.dyn_src_points_list[3][0])
# self.RightLane.x_base.append(self.dyn_src_points_list[2][0])
self.LeftLane.x_base.append (self.x_dst_left)
self.RightLane.x_base.append(self.x_dst_right)
# self.LeftLane.next_x_base = self.x_dst_left
# self.RightLane.next_x_base = self.x_dst_right
else:
self.RoIAdjustment = False
return
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def build_display_results(self, **kwargs):
debug = kwargs.get('debug', False)
debug2 = kwargs.get('debug2', False)
debug3 = kwargs.get('debug3', False)
debug4 = kwargs.get('debug4', False)
polyRegionColor1 = kwargs.get('color1', 'green')
if debug:
print(' Left lane MR fit : ', self.LeftLane.proposed_fit , ' Right lane MR fit : ', self.RightLane.proposed_fit)
print(' Left lane MR best fit : ', self.LeftLane.best_fit , ' Right lane MR best fit: ', self.RightLane.best_fit)
print(' Left radius @ y = 10 : '+str(self.LeftLane.get_radius(10)) +" m Right radius: "+str(self.RightLane.get_radius(10))+" m")
print(' Left radius @ y = 700 : '+str(self.LeftLane.get_radius(700))+" m Right radius: "+str(self.RightLane.get_radius(700))+" m")
print(' Curvature message : ', self.curv_msg)
print(' Off Center Message : ', self.oc_msg)
result_1, _ = displayDetectedRegion(self.imgUndist, self.LeftLane.proposed_curve, self.RightLane.proposed_curve,
self.Minv, disp_start= self.displayRegionTop , beta = 0.2,
color = self.polyRegionColor1, debug = False)
displayText(result_1, 40, 40, self.frameTitle, fontHeight = 20)
displayText(result_1, 40, 80, self.curv_msg, fontHeight = 20)
displayText(result_1, 40,120, self.oc_msg, fontHeight = 20)
# displayGuidelines(result_1, draw = 'y');
###----------------------------------------------------------------------------------------------
### undistorted color image & perpective transformed image -- With RoI line display
###----------------------------------------------------------------------------------------------
# imgRoI, imgRoIWarped = self.debugInfo_srcPointsRoI(display = False, title= 'Perspec. Tx. source points')
# imgLanePxls = self.visualizeLaneDetection(display = False)
###----------------------------------------------------------------------------------------------
## Certain operations are not performed based on the processing mode selected
## Generate images for skipped operations for display purposes for display purposes
###----------------------------------------------------------------------------------------------
if self.mode == 1:
# print(' Display mode 1')
### results of applying thresholding AFTER warping undistorted image
imgWarped, _, _ = perspectiveTransform(self.imgUndist, self.src_points, self.dst_points, debug = debug4)
thresholdParms = self.ImageThresholds[2][self.Conditions]
output2 = apply_thresholds(self.imgWarped, thresholdParms, debug = debug2)
self.imgWarpedThrshld = output2[self.thresholdMethod]
self.imgThrshldWarped = self.working_image
else:
# print(' Display mode 2')
### results of applying thresholding BEFORE warping undistorted image
thresholdParms = self.ImageThresholds[1][self.Conditions]
output2 = apply_thresholds(self.imgUndist, thresholdParms, debug = debug2)
self.imgThrshld = output2[self.thresholdMethod]
self.imgThrshldWarped, _, _ = perspectiveTransform(self.imgThrshld, self.src_points, self.dst_points, debug = debug4)
self.imgWarpedThrshld = self.working_image
self.resultExtraInfo = PlotDisplay(6,2)
self.resultExtraInfo.addPlot(self.image , title = 'original frame - '+self.frameTitle)
self.resultExtraInfo.addPlot(self.imgUndist , title = 'imgUndist - Undistorted Image')
self.resultExtraInfo.addPlot(self.imgRoI , title = 'imgRoI' )
self.resultExtraInfo.addPlot(self.imgRoIWarped, title = 'imgRoIWarped' )
self.resultExtraInfo.addPlot(self.imgThrshld , title = 'imgThrshld - Thresholded image')
self.resultExtraInfo.addPlot(self.imgWarped , title = 'imgWarped - Warped Image')
self.resultExtraInfo.addPlot(self.imgThrshldWarped, title = 'imgThrshldWarped - Img Thresholded ---> Warped (Mode 1)')
self.resultExtraInfo.addPlot(self.imgWarpedThrshld, title = 'imgWarpedThrshld - Img Warped ---> Thresholded (Mode 2)')
self.resultExtraInfo.addPlot(self.imgLanePxls , title = 'ImgLanePxls (Black: Prev fit, Yellow: New fit, Red: Best Fit)' )
self.resultExtraInfo.addPlot(self.histogram , title = 'Histogram of activated pixels', type = 'plot' )
self.resultExtraInfo.addPlot(result_1 , title = 'result_1 : Using LAST fit')
self.resultExtraInfo.addPlot(self.resultImage , title = 'finalImage : Using BEST fit'+self.frameTitle)
self.resultExtraInfo.closePlot()
return
def displayConfig(self):
"""Display Configuration values."""
ttl = (self.NAME.upper() if self.NAME is not None else '') + " Configuration Parameters:"
print()
print(ttl)
print("-"*len(ttl))
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
def reset(self):
self.slidingWindowBootstrap = True
self.firstFrame = True
self.LeftLane = Line(history = self.HISTORY, height = self.height, y_src_top = self.y_src_top, y_src_bot = self.y_src_bot)
self.RightLane = Line(history = self.HISTORY, height = self.height, y_src_top = self.y_src_top, y_src_bot = self.y_src_bot)
return True
def compute_top_x_disp(self):
top_left_x = ((self.y_src_bot - self.y_src_top) / self.tan_theta) + (self.x_src_center - self.x_bot_disp)
top_x_disp = int(round(self.x_src_center - top_left_x,0))
print('self.x_src_top_left: ',top_left_x, ' top_x_disp: ', top_x_disp)
return top_x_disp
def build_source_RoI_region(self):
self.x_src_center = 640 + self.RoI_x_adj
self.tan_theta = (self.lane_theta * np.pi)/180
self.x_top_disp = self.compute_top_x_disp()
self.x_src_bot_left = self.x_src_center - self.x_bot_disp # = 295)
self.x_src_bot_right = self.x_src_center + self.x_bot_disp # = 1105)
self.x_src_top_left = self.x_src_center - self.x_top_disp # = 600) ## 580 -> 573
self.x_src_top_right = self.x_src_center + self.x_top_disp # = 740)
src_points_list = [ (self.x_src_top_left , self.y_src_top),
(self.x_src_top_right, self.y_src_top),
(self.x_src_bot_right, self.y_src_bot),
(self.x_src_bot_left , self.y_src_bot)]
src_points_array = np.array(src_points_list, dtype = np.float32)
return src_points_list, src_points_array
def build_dest_RoI_region(self):
dst_points_list = [ (self.x_dst_left , self.y_dst_top),
(self.x_dst_right , self.y_dst_top),
(self.x_dst_right , self.y_dst_bot),
(self.x_dst_left , self.y_dst_bot)]
dst_points_array = np.array(dst_points_list, dtype = np.float32)
return dst_points_list, dst_points_array
def set_thresholding_parms(self):
'''
select thresholding parameters based on current image condtiions
currently we only compare the RGB mean value against a threshold
other criteria can be considered
'''
if (self.imgWarpedStats['Sat'][-1] > self.XHIGH_SAT_THRESHOLD) or \
(self.imgWarpedStats['RGB'][-1] > self.HIGH_RGB_THRESHOLD):
self.Conditions = 'xhigh'
historyFlag = 30
elif (self.imgWarpedStats['RGB'][-1] < self.VLOW_RGB_THRESHOLD) :
self.Conditions = 'vlow'
historyFlag = -20
elif (self.imgWarpedStats['RGB'][-1] < self.LOW_RGB_THRESHOLD) :
if (self.imgWarpedStats['Sat'][-1] < self.LOW_SAT_THRESHOLD):
self.Conditions = 'lowsat'
historyFlag = -30
elif (self.imgWarpedStats['Sat'][-1] > self.HIGH_SAT_THRESHOLD):
self.Conditions = 'hisat'
historyFlag = +20
else:
self.Conditions = 'low'
historyFlag = -10
elif (self.imgWarpedStats['RGB'][-1] < self.MED_RGB_THRESHOLD) :
if (self.imgWarpedStats['Sat'][-1] > self.HIGH_SAT_THRESHOLD):
self.Conditions = 'hisat'
historyFlag = 20
# if (self.imgWarpedStats['Sat'][-1] < self.LOW_SAT_THRESHOLD):
# self.Conditions = 'lowsat'
# historyFlag = -30
else:
self.Conditions = 'med'
historyFlag = 0
# elif (self.imgWarpedStats['RGB'][-1] < self.HIGH_RGB_THRESHOLD) :
else:
if (self.imgWarpedStats['Sat'][-1] > self.HIGH_SAT_THRESHOLD):
self.Conditions = 'hisat'
historyFlag = 20
else:
self.Conditions = 'high'
historyFlag = 10
self.imgThrshldHistory.append(historyFlag)
self.thresholdMethod = self.thresholdMethods[self.mode][self.Conditions]
self.thresholdStrs = self.itStr[self.mode][self.Conditions]
self.thresholdParms = self.ImageThresholds[self.mode][self.Conditions]
return
def initialize_thresholding_parameters(self):
##---------------------------------------------
## Image Thresholding params
##---------------------------------------------
self.ImageThresholds = defaultdict(dict) ## { 1: {} , 2: {} }
self.itStr = defaultdict(dict) ## { 1: {} , 2: {} }
self.thresholdMethods = defaultdict(dict) ## { 1: {} , 2: {} }
self.thresholdMethods[1]['xhigh'] = self.XHIGH_THRESHOLDING
self.thresholdMethods[1]['high'] = self.HIGH_THRESHOLDING
self.thresholdMethods[1]['med'] = self.NORMAL_THRESHOLDING
self.thresholdMethods[1]['low'] = self.LOW_THRESHOLDING
self.thresholdMethods[1]['vlow'] = self.VLOW_THRESHOLDING
self.thresholdMethods[1]['hisat'] = self.HISAT_THRESHOLDING
self.thresholdMethods[1]['lowsat'] = self.LOWSAT_THRESHOLDING
## Normal Light Conditions ------------
self.ImageThresholds[1]['xhigh'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (35,255) ,
'dir_thr' : (40,65) ,
'sat_thr' : (110,255) ,
'lvl_thr' : (205, 255),
'rgb_thr' : (205,255) ,
'hue_thr' : None
}
self.ImageThresholds[1]['high'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (35,255) ,
'dir_thr' : (40,65) ,
'sat_thr' : (110,255) ,
'lvl_thr' : (205,255),
'rgb_thr' : (205,255) ,
'hue_thr' : None
}
self.ImageThresholds[1]['med'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (35,255) ,
'dir_thr' : (40,65) ,
'sat_thr' : (110,255) ,
'lvl_thr' : (205,255),
'rgb_thr' : (205,255) ,
'hue_thr' : None
}
## Dark Light Conditions ------------
self.ImageThresholds[1]['low']= {
'ksize' : 7 ,
'x_thr' : ( 30,255) ,
'y_thr' : ( 30,255) , ## changed from ( 30,255) 2-26-20
'mag_thr' : ( 35,255) ,
'dir_thr' : ( 40, 65) ,
'sat_thr' : (160,255) , ## changed from (110,255) 2-26-20
'lvl_thr' : (205,255) ,
'rgb_thr' : (205,255) ,
'hue_thr' : None
}
## Dark Light Conditions ------------
self.ImageThresholds[1]['vlow']= {
'ksize' : 7 ,
'x_thr' : ( 30,255) ,
'y_thr' : ( 30,255) , ## changed from ( 30,255) 2-26-20
'mag_thr' : ( 35,255) ,
'dir_thr' : ( 40, 65) ,
'sat_thr' : (160,255) , ## changed from (110,255) 2-26-20
'lvl_thr' : (205,255) ,
'rgb_thr' : (205,255) ,
'hue_thr' : None
}
self.ImageThresholds[1]['hisat'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (35,255) ,
'dir_thr' : (40,65) ,
'sat_thr' : (110,255) ,
'lvl_thr' : (205, 255),
'rgb_thr' : (205,255) ,
'hue_thr' : None
}
self.ImageThresholds[1]['lowsat']= {
'ksize' : 7 ,
'x_thr' : (45,255) ,
'y_thr' : None ,
'mag_thr' : None , ### (25,250) ,
'dir_thr' : None ,
'sat_thr' : None ,
'lvl_thr' : None ,
'rgb_thr' : None ,
'hue_thr' : ( 15, 50)
}
##------------------------------------
## Warped Image Threshold params
##------------------------------------
self.thresholdMethods[2]['xhigh'] = self.XHIGH_THRESHOLDING
self.thresholdMethods[2]['high'] = self.HIGH_THRESHOLDING
self.thresholdMethods[2]['med'] = self.NORMAL_THRESHOLDING
self.thresholdMethods[2]['low'] = self.LOW_THRESHOLDING
self.thresholdMethods[2]['vlow'] = self.VLOW_THRESHOLDING
self.thresholdMethods[2]['hisat'] = self.HISAT_THRESHOLDING
self.thresholdMethods[2]['lowsat'] = self.LOWSAT_THRESHOLDING
self.ImageThresholds[2]['xhigh'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (10,50) ,
'dir_thr' : (0,30) ,
'sat_thr' : (60, 255) , ### (80, 255) ,
'lvl_thr' : (180,255) ,
'rgb_thr' : (180,255) ,
'hue_thr' : None
}
## Normal Light Conditions ------------
self.ImageThresholds[2]['high'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (10,50) ,
'dir_thr' : (0,30) ,
'sat_thr' : (60, 255) , ### (80, 255) ,
'lvl_thr' : (180,255) ,
'rgb_thr' : (180,255) ,
'hue_thr' : None
}
self.ImageThresholds[2]['med'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (10,50) ,
'dir_thr' : (0,30) ,
'sat_thr' : (60, 255) , ### (80, 255) ,
'lvl_thr' : (180,255) ,
'rgb_thr' : (180,255) ,
'hue_thr' : None
}
## dark conditions--------------
self.ImageThresholds[2]['low'] = {
'ksize' : 7 ,
'x_thr' : (70,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (5, 100) , ### (25,250) ,
'dir_thr' : (0,30) ,
'sat_thr' : (130,255) ,
'lvl_thr' : (200,255) ,
'rgb_thr' : (200,255) ,
'hue_thr' : ( 15, 50)
}
## dark conditions--------------
self.ImageThresholds[2]['vlow'] = {
'ksize' : 7 ,
'x_thr' : (70,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (5, 100) , ### (25,250) ,
'dir_thr' : (0,30) ,
'sat_thr' : (130,255) ,
'lvl_thr' : (200,255) ,
'rgb_thr' : (200,255) ,
'hue_thr' : ( 15, 50)
}
self.ImageThresholds[2]['hisat'] = {
'ksize' : 7 ,
'x_thr' : (30,255) ,
'y_thr' : (70,255) ,
'mag_thr' : (10,50) ,
'dir_thr' : (0,30) ,
'sat_thr' : (60, 255) , ### (80, 255) ,
'lvl_thr' : (180,255) ,
'rgb_thr' : (180,255) ,
'hue_thr' : None
}
self.ImageThresholds[2]['lowsat']= {
'ksize' : 7 ,
'x_thr' : (45,255) ,
'y_thr' : None ,
'mag_thr' : None , ### (25,250) ,
'dir_thr' : None ,
'sat_thr' : None ,
'lvl_thr' : None ,
'rgb_thr' : None ,
'hue_thr' : ( 15, 50)
}
self.thresholds_to_str()
def thresholds_to_str(self, debug = False):
for mode in [1,2]:
for cond in self.ImageThresholds[mode].keys():
if debug:
print(mode , ' Threshold key: ',cond)
self.itStr[mode][cond] = {}
for thr in self.ImageThresholds[mode][cond].keys():
self.itStr[mode][cond][thr] = str(self.ImageThresholds[mode][cond][thr])
if debug:
print(' thr : ', thr, ' ', self.ImageThresholds[mode][cond][thr])
def display_thresholds(self, mode = None):
line_length = 148
line_prefix = ' ' * 2
if mode is None:
mode = [self.mode]
if isinstance(mode, int):
mode = [mode]
print()
thrshlds = ' Thresholds: HIGH RGB: {} MED RGB: {} LOW RGB: {} VLOW RGB: {} XHIGH SAT: {} HIGH SAT: {} LOW SAT: {} '.format(
self.HIGH_RGB_THRESHOLD , self.MED_RGB_THRESHOLD , self.LOW_RGB_THRESHOLD, self.VLOW_RGB_THRESHOLD,
self.XHIGH_SAT_THRESHOLD, self.HIGH_SAT_THRESHOLD, self.LOW_SAT_THRESHOLD)
print( line_prefix, thrshlds.center(148))
print()
for mod in mode:
print(line_prefix, '-' * line_length)
print(line_prefix, '| {:8s} | {:^18s} | {:^16s} | {:^16s} | {:^16s} | {:^16s} || {:^16s} | {:^16s} |'.format('',
'PL[X-High]','PL[High]','PL[Med]','PL[Low]','PL[VLow]','PL[HiSat]','PL[LoSat]'))
print(line_prefix, '| {:8s} | RGB>{:<3d} or SAT>{:<3d} |{:>4d} > RGB > {:<4d} |{:>4d} > RGB > {:<4d} |{:>4d} > RGB > {:<4d} |'\
' RGB < {:<4d} || SAT > {:<4d} | SAT < {:<4d} |'.format('', self.HIGH_RGB_THRESHOLD, self.XHIGH_SAT_THRESHOLD,
self.HIGH_RGB_THRESHOLD, self.MED_RGB_THRESHOLD, self.MED_RGB_THRESHOLD , self.LOW_RGB_THRESHOLD ,
self.LOW_RGB_THRESHOLD , self.VLOW_RGB_THRESHOLD, self.VLOW_RGB_THRESHOLD, self.HIGH_SAT_THRESHOLD, self.LOW_SAT_THRESHOLD))
print(line_prefix, '-' * line_length)
print(line_prefix, '| Mode{:2d} : {:^18s} | {:^16s} | {:^16s} | {:^16s} | {:^16s} || {:^16s} | {:^16s} |'.format(mod,
self.thresholdMethods[mod]['xhigh'], self.thresholdMethods[mod]['high'],
self.thresholdMethods[mod]['med'] , self.thresholdMethods[mod]['low'],
self.thresholdMethods[mod]['vlow'] , self.thresholdMethods[mod]['hisat'], self.thresholdMethods[mod]['lowsat']))
print(line_prefix, '-' * line_length)
for ke in self.ImageThresholds[mod]['xhigh'].keys():
print(line_prefix, '| {:8s} : {:^18s} | {:^16s} | {:^16s} | {:^16s} | {:^16s} || {:^16s} | {:^16s} |'.format(ke,
str(self.ImageThresholds[mod]['xhigh'][ke]) ,
str(self.ImageThresholds[mod]['high'][ke]) ,
str(self.ImageThresholds[mod]['med'][ke]) ,
str(self.ImageThresholds[mod]['low'][ke]) ,
str(self.ImageThresholds[mod]['vlow'][ke]) ,
str(self.ImageThresholds[mod]['hisat'][ke]),
str(self.ImageThresholds[mod]['lowsat'][ke])
))
print(line_prefix, '-' * line_length)
print()
return
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def debugInfo_DetectedLanes(self, display = 3, size = (24,9)):
self.prevBestFit = colorLanePixels(self.out_img, self.LeftLane, self.RightLane)
if self.HISTORY > 1:
self.prevBestFit = displayPolynomial(self.prevBestFit, self.LeftLane.fitted_best_history, self.RightLane.fitted_best_history,
iteration = -2, color = 'aqua')
self.prevBestFit = displayPolynomial(self.prevBestFit, self.LeftLane.proposed_curve, self.RightLane.proposed_curve,
iteration = -1, color = 'yellow')
# currentFit = displayPolynomial(prevBestFit, self.LeftLane.proposed_curve, self.RightLane.proposed_curve, iteration = -1, color = 'yellow')
self.imgLanePxls = displayPolynomial(self.prevBestFit, self.LeftLane.fitted_best, self.RightLane.fitted_best, color = 'fuchsia', thickness = 2)
if display:
# print(' y_src_top_left : {} y_src_top_right: {} y_src_bot_left: {} y_src_bot_right: {}'.format(self.dst_points_list))
# self.y_src_top, self.y_src_top, self.y_src_bot, self.y_src_bot))
if display in [1,3]:
print(' x_src_top_left : {} x_src_top_right: {} x_src_bot_left: {} x_src_bot_right: {}'.format(
self.src_points_list[0], self.src_points_list[1],self.src_points_list[3],self.src_points_list[2]))
display_two(self.working_image, self.out_img, size = size, title1 = 'working_image - '+self.frameTitle,
title2 = 'out_img ')
if display in [2,3]:
display_two(self.prevBestFit, self.imgLanePxls, size = size, title1 = 'Prev best fit (Cyan: Prev fit, Yellow: New proposal)' ,
title2 = 'ImgLanePxls (Cyan: Prev fit, Yellow: New proposal, Fuschia: New Best Fit)' )
print()
return
def debugInfo_ImageSummaryInfo(self):
print('Frame: {:4d} - {:.0f} ms - Image RGB: {:3.0f} ({:3.0f},{:3.0f},{:3.0f}) '\
' WARPED RGB: {:3.0f} HLS: {:3.0f} H: {:3.0f} L: {:3.0f} S: {:3.0f}'\
' {:5s} - {:10s}'.format(self.inVideo.currFrameNum, self.inVideo.currPos,
self.imgUndistStats['RGB'][-1],
self.imgUndistStats['Red'][-1], self.imgUndistStats['Grn'][-1], self.imgUndistStats['Blu'][-1],
self.imgWarpedStats['RGB'][-1], self.imgWarpedStats['HLS'][-1],
self.imgWarpedStats['Hue'][-1], self.imgWarpedStats['Lvl'][-1], self.imgWarpedStats['Sat'][-1],
self.Conditions.upper(), self.thresholdMethod))
if self.debug:
print( ' Thresholds: HIGH RGB: {} MED RGB: {} LOW RGB: {} VLOW RGB: {} X-HIGH SAT: {} HIGH SAT: {} LOW SAT: {} '.
format(self.HIGH_RGB_THRESHOLD, self.MED_RGB_THRESHOLD , self.LOW_RGB_THRESHOLD,
self.VLOW_RGB_THRESHOLD, self.XHIGH_SAT_THRESHOLD, self.HIGH_SAT_THRESHOLD, self.LOW_SAT_THRESHOLD))
return
def debugInfo_ImageInfo(self, frame = -1):
print('Frame: {:4.0f} - Mode: {:2d} imgUndist - Avgs RGB: {:6.2f} HLS:{:6.2f} Sat: {:6.2f} Hue: {:6.2f} Lvl: {:6.2f}'\
' -- {:5s} - {:10s}'.format( self.inVideo.currFrameNum, self.mode,
self.imgUndistStats['RGB'][frame], self.imgUndistStats['HLS'][frame],
self.imgUndistStats['Sat'][frame], self.imgUndistStats['Hue'][frame],
self.imgUndistStats['Lvl'][frame], self.Conditions , self.thresholdMethod))
print(' {:22s} imgWarped - Avgs RGB: {:6.2f} HLS:{:6.2f} Sat: {:6.2f} Hue: {:6.2f} Lvl: {:6.2f}'\
' -- {:5s} - {:10s}'.format( '',
self.imgWarpedStats['RGB'][frame], self.imgWarpedStats['HLS'][frame],
self.imgWarpedStats['Sat'][frame], self.imgWarpedStats['Hue'][frame],
self.imgWarpedStats['Lvl'][frame], self.Conditions , self.thresholdMethod))
display_multi(self.inVideo.image, self.imgUndist, self.imgWarped, title3 = 'Warped', grid2 = 'minor')
return
def debugInfo_LaneDetInfo(self):
imgPixelRatio, NztoSrchNzRatio, NztoImageNzRatio, ttlImageNZPixels, ttlLaneNZPixels = self.detStats
print(' NZ pixels - in image : {:8d} search reg: {:8d} '\
' Nz to imgPixel Ratio: %{:5.2f} Nz to SrchRegion Ratio : %{:5.2f} Nz to ImageNz Ratio: %{:5.2f}' .
format(ttlImageNZPixels, ttlLaneNZPixels, imgPixelRatio , NztoSrchNzRatio, NztoImageNzRatio))
print(' Detected Pixel Count L : {:8d} R : {:8d} Detected Pixel Ratio L: %{:5.2f} R: %{:5.2f} '.
format(self.LeftLane.pixelCount[-1], self.RightLane.pixelCount[-1],
self.LeftLane.pixelRatio[-1], self.RightLane.pixelRatio[-1]))
return
def debugInfo_ThresholdedImage(self):
display_two(self.imgThrshld, self.working_image, title1 = self.thresholdMethod +' '+str(np.sum(self.imgThrshld)),
title2 = 'After thresholding - '+str(np.sum(self.working_image)))
return
def debugInfo_srcPointsRoI(self, size = (24,9), title = None ):
print()
print(' x_top_disp : {:<13d} x_src_center : {:<13d} x_bot_disp : {:<4d} '.format(
self.x_top_disp, self.x_src_center, self.x_bot_disp))
print(' x_src_top_left : {:12s} x_src_top_right : {:12s} x_src_bot_left : {:12s} x_src_bot_right : {:12s}'.format(
str(self.src_points_list[0]), str(self.src_points_list[1]), str(self.src_points_list[3]), str(self.src_points_list[2])))
print(' y_src_top_left : {:12s} y_src_top_right : {:12s} y_src_bot_left : {:12s} y_src_bot_right : {:12s}'.format(
str(self.dst_points_list[0]), str(self.dst_points_list[1]), str(self.dst_points_list[3]), str(self.dst_points_list[2])))
display_two(self.imgRoI , self.imgRoIWarped, title1 = title , grid1 = 'major',
title2 = title + ' - after perspective transformation', grid2 = 'major', size = size)
print()
return
def debugInfo_newSrcPointsRoI(self, display = True, size = (24,9), title = None):
imgRoI = displayRoILines(self.imgUndist, self.dyn_src_points_list , color = 'blue', thickness = 2)
imgRoIWarped, _, _ = perspectiveTransform(imgRoI , self.dyn_src_points , self.dst_points)
imgRoIWarped = displayRoILines(imgRoIWarped , self.dst_points_list , thickness = 2, color = 'yellow')
display_two(imgRoI , imgRoIWarped , title1 = title , grid1 = 'major',
title2 = title+' - after perspective transformation ' , grid2 = 'major', size = size)
return imgRoI, imgRoIWarped
def debugInfo_DetectionTransform(self):
self.debugInfo_DetectedLanes(display=0)
# imgWarped, _, _ = perspectiveTransform(imgUnwarped , self.dyn_src_points , self.dst_points)
imgWarped = cv2.warpPerspective(self.imgLanePxls, self.Minv, self.imgLanePxls.shape[1::-1], flags=cv2.INTER_LINEAR)
display_two(self.imgLanePxls, imgWarped, title1 = 'Detection prewarped' , grid1 = 'minor',
title2 = ' Detection - Warped' , grid2 = 'major', size = (24,9))
return
def debugInfo_RoITransforms(self):
self.debugInfo_srcPointsRoI(title= 'source points prior to realignment')
self.debugInfo_newSrcPointsRoI()
return
##--------------------------------------------------------------------------------------
##
##--------------------------------------------------------------------------------------
def debugInfo_displayFittingInfo(self):
np_format = {}
np_format['float'] = lambda x: "%8.2f" % x
np_format['int'] = lambda x: "%8d" % x
np.set_printoptions(linewidth=195, precision=4, floatmode='fixed', threshold =100, formatter = np_format)
print()
print('='*70)
print('Display fitting info for ', self.frameTitle)
print('='*70)
print()
print('Proposed Polynomial left : {} right : {} '.format(self.LeftLane.proposed_fit, self.RightLane.proposed_fit))
print('Best Fit Polynomial left : {} right : {} '.format(self.LeftLane.best_fit, self.RightLane.best_fit))
print('Diff(proposed,best_fit) left : {} right : {} '.format( self.LeftLane.best_fit-self.LeftLane.proposed_fit,
self.RightLane.best_fit-self.RightLane.proposed_fit))
print('RSE(Proposed,best fit): left : {:<30.3f} right : {:<30.3f} '.format(self.LeftLane.RSE ,self.RightLane.RSE ))
print()
# print()
# print('Proposed Polynomial:')
# print('-'*40)
# print('left : {} right : {} '.format(self.LeftLane.proposed_fit, self.RightLane.proposed_fit))
# if len(self.LeftLane.proposed_fit_history) > 1:
print()
print('Best Fit Polynomials:')
print('-'*40)
for idx in range(-1, -min(len(self.LeftLane.best_fit_history), self.HISTORY+1) , -1):
ls, rs = self.LeftLane.best_fit_history[idx], self.RightLane.best_fit_history[idx]
print('left[{:2d}] : {} right[{:2d}] : {} '.format(idx,ls, idx,rs))
# print()
# print('Diff b/w proposed and best_fit polynomial ')
# print('-'*40)
# print('left : {} right : {} '.format( self.LeftLane.best_fit-self.LeftLane.proposed_fit,
# self.RightLane.best_fit-self.RightLane.proposed_fit) )
# print()
# print('Proposed RSE with best fit - self.LeftLane: {} RLane : {} '.format(self.LeftLane.RSE ,self.RightLane.RSE ))
# print()
#
# print('Best RSE Hist LLane : ', self.LeftLane.RSE_history[-15:])
# print('Best RSE Hist RLane : ', self.RightLane.RSE_history[-15:])
# print('Best fit RSE Hist LeftLane : ', ['{:8.3f}'.format(i) for i in self.LeftLane.RSE_history])
# print('Best fit RSE Hist RightLane : ', ['{:8.3f}'.format(i) for i in self.RightLane.RSE_history])
print()
print('-'*40)
print('Previously proposed Polynomials:')
print('-'*40)
for idx in range(-1, -min(len(self.LeftLane.proposed_fit_history), self.HISTORY+1) , -1):
ls, rs = self.LeftLane.proposed_fit_history[idx], self.RightLane.proposed_fit_history[idx]
print('left[{:2d}] : {} right[{:2d}] : {} '.format(idx,ls, idx,rs))
print()
print('RSE History - Left : ', self.LeftLane.RSE_history[-15:])
print('RSE History - Right : ', self.RightLane.RSE_history[-15:])
# print('fit RSE Hist LeftLane : ', self.LeftLane.RSE_history[-15:])
# print('fit RSE Hist RightLane : ', self.RightLane.RSE_history[-15:])
# print('fit RSE Hist RightLane : ', ['{:8.3f}'.format(i) for i in self.RightLane.RSE_history])
# print('fit RSE Hist LeftLane : ', ['{:8.3f}'.format(i) for i in self.LeftLane.RSE_history])
###--------------------------------------------------------------------------------------
### Radius of Curvature
###--------------------------------------------------------------------------------------
print()
print('-'*40)
print('Lane Radius from proposed fit:')
print('-'*40)
ls = self.LeftLane.current_radius
rs = self.RightLane.current_radius
diff = np.round(np.array(rs)- np.array(ls),3)
avg = np.round((np.array(rs) + np.array(ls))/2,3)
print(' Y : ', self.LeftLane.y_checkpoints)
print('left : ',ls , ' Avg:', np.round(
|
np.mean(ls)
|
numpy.mean
|
import os.path as op
import itertools
from operator import itemgetter
import multiprocessing
from functools import partial
import time
from matplotlib import pyplot as plt
import numpy as np
from numpy import pi
import scipy.signal as signal
import scipy.stats as stats
import mne
import mne.minimum_norm as minnorm
from tools_general import *
from tools_signal import *
from tools_meeg import *
from tools_source_space import *
from tools_connectivity import *
from tools_multivariate import *
from tools_connectivity_plot import *
from tools_lemon_dataset import *
from tools_harmonic_removal import *
from tools_psd_peak import *
# directories and settings -----------------------------------------------------
subject = 'fsaverage'
condition = 'EC'
_oct = '6'
inv_method = 'eLORETA'
subjects_dir = '/data/pt_02076/mne_data/MNE-fsaverage-data/'
raw_set_dir = '/data/pt_nro109/EEG_LEMON/BIDS_IDS/EEG_Preprocessed_BIDS/'
meta_file_path = '/data/pt_02076/LEMON/INFO/META_File_IDs_Age_Gender_Education_Drug_Smoke_SKID_LEMON.csv'
error_file = '/data/pt_02076/LEMON/log_files/connectivities_2022_3.txt'
path_peaks = '/data/pt_02076/LEMON/Products/peaks/EC/find_peaks/'
save_dir_graphs = '/data/pt_02076/LEMON/lemon_processed_data/networks_coh_peak_detection_no_perm/'
path_save_error_peaks = '/data/pt_02076/LEMON/Code_Outputs/error_peaks_alpha'
# subjects_dir = '/NOBACKUP/mne_data/'
# raw_set_dir = '/NOBACKUP/Data/lemon/LEMON_prep/'
# meta_file_path = '/NOBACKUP/Data/lemon/behaviour/META_File_IDs_Age_Gender_Education_Drug_Smoke_SKID_LEMON.csv'
# path_save = '/NOBACKUP/HarmoRemo_Paper/code_outputs/svd-broad-narrow/'
src_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-src.fif')
fwd_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-fwd.fif')
# -----------------------------------------------------
# read the parcellation
# -----------------------------------------------------
# parcellation = dict(name='aparc', abb='DK') # Desikan-Killiany
parcellation = dict(name='Schaefer2018_100Parcels_7Networks_order', abb='Schaefer100')
labels = mne.read_labels_from_annot(subject, subjects_dir=subjects_dir, parc=parcellation['name'])
labels = labels[:-2]
labels_sorted, idx_sorted = rearrange_labels(labels, order='anterior_posterior') # rearrange labels
n_parc = len(labels)
n_parc_range_prod = list(itertools.product(np.arange(n_parc), np.arange(n_parc)))
# -----------------------------------------------------
# settings
# -----------------------------------------------------
sfreq = 250
iir_params = dict(order=2, ftype='butter')
# -----------------------------------------
# the head
# -----------------------------------------
# read forward solution ---------------------------------------------------
fwd = mne.read_forward_solution(fwd_dir)
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)
leadfield = fwd_fixed['sol']['data']
src = fwd_fixed['src']
# -----------------------------------------------------
# read raw from set
# ---------------------------------------------------
ids1 = select_subjects('young', 'male', 'right', meta_file_path)
IDs = listdir_restricted(raw_set_dir, '_EC.set')
IDs = [id[:-7] for id in IDs]
IDs = np.sort(np.intersect1d(IDs, ids1))
# IDs_error = ['sub-010056', 'sub-010070', 'sub-010207', 'sub-010218', 'sub-010238', 'sub-010304', 'sub-010308', 'sub-010314', 'sub-010241']
dict_alphapeaks = load_pickle(path_save_error_peaks)
IDs_error = list(dict_alphapeaks.keys())
tstart = time.time()
for i_subj, subj in enumerate(IDs[:30]):
try:
print(' ******** subject %d/%d ************' % (i_subj + 1, len(IDs)))
# raw_name = op.join(raw_set_dir, subj + '-EC-pruned with ICA.set')
raw_name = op.join(raw_set_dir, subj + '_EC.set')
raw = read_eeglab_standard_chanloc(raw_name) # , bads=['VEOG']
assert (sfreq == raw.info['sfreq'])
raw_data = raw.get_data()
raw_info = raw.info
clab = raw_info['ch_names']
n_chan = len(clab)
inv_op = inverse_operator(raw_data.shape, fwd, raw_info)
if subj in IDs_error:
peak_alpha = dict_alphapeaks[subj]
else:
peaks_file = op.join(path_peaks, subj + '-peaks.npz')
peaks =
|
np.load(peaks_file)
|
numpy.load
|
import numpy as np
import pytest
from pandas import Index, date_range
import pandas._testing as tm
from pandas.core.reshape.util import cartesian_product
class TestCartesianProduct:
def test_simple(self):
x, y = list("ABC"), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(["A", "A", "B", "B", "C", "C"])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range("2000-01-01", periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
expected1 = Index([1, 1, 2, 2])
expected2 = Index([1, 2, 1, 2])
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
def test_tzaware_retained(self):
x = date_range("2000-01-01", periods=2, tz="US/Pacific")
y = np.array([3, 4])
result1, result2 = cartesian_product([x, y])
expected = x.repeat(2)
tm.assert_index_equal(result1, expected)
def test_tzaware_retained_categorical(self):
x = date_range("2000-01-01", periods=2, tz="US/Pacific").astype("category")
y = np.array([3, 4])
result1, result2 = cartesian_product([x, y])
expected = x.repeat(2)
tm.assert_index_equal(result1, expected)
def test_empty(self):
# product of empty factors
X = [[], [0, 1], []]
Y = [[], [], ["a", "b", "c"]]
for x, y in zip(X, Y):
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=
|
np.asarray(y)
|
numpy.asarray
|
#
# Author: <NAME>, 2002
#
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import xrange
from numpy import pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, \
where, mgrid, cos, sin, exp, place, seterr, issubdtype, extract, \
less, vectorize, inexact, nan, zeros, sometrue, atleast_1d
from ._ufuncs import ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch, binom
from . import _ufuncs
import types
from . import specfun
from . import orthogonal
import warnings
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros',
'erfcinv', 'erfinv', 'errprint', 'euler', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_harm', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def sinc(x):
"""Returns sin(pi*x)/(pi*x) at all points of array x.
"""
x = asarray(x)
w = pi * x
# w might contain 0, and so temporarily turn off warnings
# while calculating sin(w)/w.
old_settings = seterr(all='ignore')
s = sin(w) / w
seterr(**old_settings)
return where(x == 0, 1.0, s)
def diric(x,n):
"""Returns the periodic sinc function, also called the Dirichlet function:
diric(x) = sin(x *n / 2) / (n sin(x / 2))
where n is a positive integer.
"""
x,n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape,ytype)
mask1 = (n <= 0) | (n != floor(n))
place(y,mask1,nan)
z = asarray(x / 2.0 / pi)
mask2 = (1-mask1) & (z == floor(z))
zsub = extract(mask2,z)
nsub = extract(mask2,n)
place(y,mask2,pow(-1,zsub*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask,x)
nsub = extract(mask,n)
place(y,mask,sin(nsub*xsub/2.0)/(nsub*sin(xsub/2.0)))
return y
def jnjnp_zeros(nt):
"""Compute nt (<=1200) zeros of the Bessel functions Jn and Jn'
and arange them in order of their magnitudes.
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of the Bessel functions Jn(x), Jn'(x), Yn(x), and
Yn'(x), respectively. Returns 4 arrays of length nt.
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn(x).
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn'(x).
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn'(x).
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y0(z), z0, and the value
of Y0'(z0) = -Y1(z0) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1(z), z1, and the value
of Y1'(z1) = Y0(z1) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1'(z), z1', and the value
of Y1(z1') at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
def jvp(v,z,n=1):
"""Return the nth derivative of Jv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Return the nth derivative of Yv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Return the nth derivative of Kv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Return the nth derivative of Iv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Return the nth derivative of H1v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Return the nth derivative of H2v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute the spherical Bessel function jn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute the spherical Bessel function yn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute the spherical Bessel functions, jn(z) and yn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
def sph_in(n,z):
"""Compute the spherical Bessel function in(z) and its derivative for
all orders up to and including n.
"""
if not (
|
isscalar(n)
|
numpy.isscalar
|
r"""
Estimate entropy after a fit.
The :func:`gmm_entropy` function computes the entropy from a Gaussian mixture
model. This provides a reasonable estimate even for non-Gaussian distributions.
This is the recommended method for estimating the entropy of a sample.
The :func:`cov_entropy` method computes the entropy associated with the
covariance matrix. This covariance matrix can be estimated during the
fitting procedure (BFGS updates an estimate of the Hessian matrix for example),
or computed by estimating derivatives when the fit is complete.
The :class:`MVNEntropy` class estimates the covariance from an MCMC sample and
uses this covariance to estimate the entropy. This gives a better
estimate of the entropy than the equivalent direct calculation, which requires
many more samples for a good kernel density estimate. The *reject_normal*
attribute is *True* if the MCMC sample is significantly different from normal.
Unfortunately, this almost always the case for any reasonable sample size that
isn't strictly gaussian.
The :func:`entropy` function computes the entropy directly from a set
of MCMC samples, normalized by a scale factor computed from the kernel density
estimate at a subset of the points.\ [#Kramer]_
There are many other entropy calculations implemented within this file, as
well as a number of sampling distributions for which the true entropy is known.
Furthermore, entropy was computed against dream output and checked for
consistency. None of the methods is truly excellent in terms of minimum
sample size, maximum dimensions and speed, but many of them are pretty
good.
The following is an informal summary of the results from different algorithms
applied to DREAM output::
from .entropy import Timer as T
# Try MVN ... only good for normal distributions, but very fast
with T(): M = entropy.MVNEntropy(drawn.points)
print("Entropy from MVN: %s"%str(M))
# Try wnn ... no good.
with T(): S_wnn, Serr_wnn = entropy.wnn_entropy(drawn.points, n_est=20000)
print("Entropy from wnn: %s"%str(S_wnn))
# Try wnn with bootstrap ... still no good.
with T(): S_wnn, Serr_wnn = entropy.wnn_bootstrap(drawn.points)
print("Entropy from wnn bootstrap: %s"%str(S_wnn))
# Try wnn entropy with thinning ... still no good.
#drawn = self.draw(portion=portion, vars=vars,
# selection=selection, thin=10)
with T(): S_wnn, Serr_wnn = entropy.wnn_entropy(points)
print("Entropy from wnn: %s"%str(S_wnn))
# Try wnn with gmm ... still no good
with T(): S_wnn, Serr_wnn = entropy.wnn_entropy(drawn.points, n_est=20000, gmm=20)
print("Entropy from wnn with gmm: %s"%str(S_wnn))
# Try pure gmm ... pretty good
with T(): S_gmm, Serr_gmm = entropy.gmm_entropy(drawn.points, n_est=10000)
print("Entropy from gmm: %s"%str(S_gmm))
# Try kde from statsmodels ... pretty good
with T(): S_kde_stats = entropy.kde_entropy_statsmodels(drawn.points, n_est=10000)
print("Entropy from kde statsmodels: %s"%str(S_kde_stats))
# Try kde from sklearn ... pretty good
with T(): S_kde = entropy.kde_entropy_sklearn(drawn.points, n_est=10000)
print("Entropy from kde sklearn: %s"%str(S_kde))
# Try kde from sklearn at points from gmm ... pretty good
with T(): S_kde_gmm = entropy.kde_entropy_sklearn_gmm(drawn.points, n_est=10000)
print("Entropy from kde+gmm: %s"%str(S_kde_gmm))
# Try Kramer ... pretty good, but doesn't support marginal entropy
with T(): S, Serr = entropy.entropy(drawn.points, drawn.logp, N_entropy=n_est)
print("Entropy from Kramer: %s"%str(S))
.. [#Kramer]
<NAME>., <NAME>., <NAME>., <NAME>., 2010.
Computation of the posterior entropy in a Bayesian framework
for parameter estimation in biological networks,
in: 2010 IEEE International Conference on Control Applications (CCA).
Presented at the 2010 IEEE International Conference on
Control Applications (CCA), pp. 493-498.
doi:10.1109/CCA.2010.5611198
.. [#Turjillo-Ortiz]
<NAME>. and <NAME>. (2003). Mskekur: Mardia's
multivariate skewness and kurtosis coefficients and its hypotheses
testing. A MATLAB file. [WWW document].
`<http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=3519>`_
.. [#Mardia1970]
<NAME>. (1970), Measures of multivariate skewnees and kurtosis with
applications. Biometrika, 57(3):519-530.
.. [#Mardia1974]
<NAME>. (1974), Applications of some measures of multivariate skewness
and kurtosis for testing normality and robustness studies. Sankhy A,
36:115-128
.. [#Stevens]
<NAME>. (1992), Applied Multivariate Statistics for Social Sciences.
2nd. ed. New-Jersey:Lawrance Erlbaum Associates Publishers. pp. 247-248.
"""
from __future__ import division, print_function
__all__ = ["entropy", "gmm_entropy", "cov_entropy", "wnn_entropy", "MVNEntropy"]
import numpy as np
from numpy import mean, std, exp, log, sqrt, log2, pi, e, nan
from numpy.random import permutation, choice
from scipy import stats
from scipy.stats import norm, chi2
from scipy.special import gammaln, digamma
LN2 = log(2)
def standardize(x):
"""
Standardize the points by removing the mean and scaling by the standard
deviation.
"""
# TODO: check if it is better to multiply by inverse covariance
# That would serve to unrotate and unscale the dimensions together,
# but squishing them down individually might be just as good.
# compute zscores for the each variable independently
mu, sigma = mean(x, axis=0), std(x, axis=0, ddof=1)
# Protect against NaN when sigma is zero. If sigma is zero
# then all points are equal, so x == mu and z-score is zero.
return (x - mu)/(sigma + (sigma==0.)), mu, sigma
def kde_entropy_statsmodels(points, n_est=None):
"""
Use statsmodels KDEMultivariate pdf to estimate entropy.
Density evaluated at sample points.
Slow and fails for bimodal, dirichlet; poor for high dimensional MVN.
"""
from statsmodels.nonparametric.kernel_density import KDEMultivariate
n, d = points.shape
# Default to the full set
if n_est is None:
n_est = n
# reduce size of draw to n_est
if n_est >= n:
x = points
else:
x = points[permutation(n)[:n_est]]
n = n_est
predictor = KDEMultivariate(data=x, var_type='c'*d)
p = predictor.pdf()
H = -np.mean(log(p))
return H / LN2
def kde_entropy_sklearn(points, n_est=None):
"""
Use sklearn.neigbors.KernelDensity pdf to estimate entropy.
Data is standardized before analysis.
Sample points drawn from the kernel density estimate.
Fails for bimodal and dirichlet, similar to statsmodels kde.
"""
n, d = points.shape
# Default to the full set
if n_est is None:
n_est = n
# reduce size of draw to n_est
if n_est >= n:
x = points
else:
x = points[permutation(n)[:n_est]]
n = n_est
#logp = sklearn_log_density(points, evaluation_points=n_est)
logp = sklearn_log_density(x, evaluation_points=x)
H = -np.mean(logp)
return H / LN2
def kde_entropy_sklearn_gmm(points, n_est=None, n_components=None):
"""
Use sklearn.neigbors.KernelDensity pdf to estimate entropy.
Data is standardized before kde.
Sample points drawn from gaussian mixture model from original points.
Fails for bimodal and dirichlet, similar to statsmodels kde.
"""
from sklearn.mixture import BayesianGaussianMixture as GMM
n, d = points.shape
# Default to the full set
if n_est is None:
n_est = n
# reduce size of draw to n_est
if n_est >= n:
x = points
else:
x = points[permutation(n)[:n_est]]
n = n_est
if n_components is None:
n_components = int(5*sqrt(d))
predictor = GMM(n_components=n_components, covariance_type='full',
#verbose=True,
max_iter=1000)
predictor.fit(x)
evaluation_points, _ = predictor.sample(n_est)
logp = sklearn_log_density(x, evaluation_points=evaluation_points)
H = -np.mean(logp)
return H / LN2
def gmm_entropy(points, n_est=None, n_components=None):
r"""
Use sklearn.mixture.BayesianGaussianMixture to estimate entropy.
*points* are the data points in the sample.
*n_est* are the number of points to use in the estimation; default is
10,000 points, or 0 for all the points.
*n_components* are the number of Gaussians in the mixture. Default is
$5 \sqrt{d}$ where $d$ is the number of dimensions.
Returns estimated entropy and uncertainty in the estimate.
This method uses BayesianGaussianMixture from scikit-learn to build a
model of the point distribution, then uses Monte Carlo sampling to
determine the entropy of that distribution. The entropy uncertainty is
computed from the variance in the MC sample scaled by the number of
samples. This does not incorporate any uncertainty in the sampling that
generated the point distribution or the uncertainty in the GMM used to
model that distribution.
"""
#from sklearn.mixture import GaussianMixture as GMM
from sklearn.mixture import BayesianGaussianMixture as GMM
n, d = points.shape
# Default to the full set
if n_est is None:
n_est = 10000
elif n_est == 0:
n_est = n
# reduce size of draw to n_est
if n_est >= n:
x = points
n_est = n
else:
x = points[permutation(n)[:n_est]]
n = n_est
if n_components is None:
n_components = int(5*sqrt(d))
## Standardization doesn't seem to help
## Note: sigma may be zero
#x, mu, sigma = standardize(x) # if standardized
predictor = GMM(n_components=n_components, covariance_type='full',
#verbose=True,
max_iter=1000)
predictor.fit(x)
eval_x, _ = predictor.sample(n_est)
weight_x = predictor.score_samples(eval_x)
H = -np.mean(weight_x)
#with np.errstate(divide='ignore'): H = H + np.sum(np.log(sigma)) # if standardized
dH = np.std(weight_x, ddof=1) / sqrt(n)
## cross-check against own calcs
#alt = GaussianMixture(predictor.weights_, mu=predictor.means_, sigma=predictor.covariances_)
#print("alt", H, alt.entropy())
#print(np.vstack((weight_x[:10], alt.logpdf(eval_x[:10]))).T)
return H / LN2, dH / LN2
def wnn_bootstrap(points, k=None, weights=True, n_est=None, reps=10, parts=10):
#raise NotImplementedError("deprecated; bootstrap doesn't help.")
n, d = points.shape
if n_est is None:
n_est = n//parts
results = [wnn_entropy(points, k=k, weights=weights, n_est=n_est)
for _ in range(reps)]
#print(results)
S, Serr = list(zip(*results))
return np.mean(S),
|
np.std(S)
|
numpy.std
|
import numpy as np
def preproccese(x):
int_feature = []
for i in x:
i = i.replace("\t", "")
int_feature.append(i)
int_feature = np.transpose(int_feature)
for i in int_feature:
if int_feature[1] == "admin":
int_feature[1] = 8
if int_feature[1] == "blue-collar":
int_feature[1] = 1
if int_feature[1] == "entrepreneur":
int_feature[1] = 3
if int_feature[1] == "housemaid":
int_feature[1] = 2
if int_feature[1] == "management":
int_feature[1] = 9
if int_feature[1] == "mgmt":
int_feature[1] = 0
if int_feature[1] == "retired":
int_feature[1] = 11
if int_feature[1] == "self-employed":
int_feature[1] = 7
if int_feature[1] == "services":
int_feature[1] = 4
if int_feature[1] == "student":
int_feature[1] = 12
if int_feature[1] == "technician":
int_feature[1] = 6
if int_feature[1] == "unemployed":
int_feature[1] = 10
if int_feature[1] == "unknown":
int_feature[1] = 5
else :
pass
for i in int_feature:
if int_feature[3] == "primary":
int_feature[3] = 0.1346548188653452
if int_feature[3] == "secondary":
int_feature[3] = 0.1769591910436981
if int_feature[3] == "tertiary":
int_feature[3] = 0.26437086092715234
if int_feature[3] == "unknown":
int_feature[3] = 0.21070234113712374
for i in int_feature:
if int_feature[10] == "apr":
int_feature[10] = 1.0
if int_feature[10] == "aug":
int_feature[10] = 0.15
if int_feature[10] == "dec":
int_feature[10] = 1.0
if int_feature[10] == "feb":
int_feature[10] = 1.0
if int_feature[10] == "jan":
int_feature[10] = 1.0
if int_feature[10] == "jul":
int_feature[10] = 0.0946
if int_feature[10] == "jun":
int_feature[10] = 0.11
if int_feature[10] == "mar":
int_feature[10] = 1.0
if int_feature[10] == "may":
int_feature[10] = 0.10
if int_feature[10] == "nov":
int_feature[10] = 0.1
if int_feature[10] == "oct":
int_feature[10] = 1
if int_feature[10] == "sep":
int_feature[10] = 1.0
for i in int_feature:
if int_feature[9] == "1":
int_feature[9] = 0.64
if int_feature[9] == "2":
int_feature[9] = 0.31
if int_feature[9] == "3":
int_feature[9] = 0.29
if int_feature[9] == "4":
int_feature[9] = 0.31
if int_feature[9] == "5":
int_feature[9] = 0.19
if int_feature[9] == "6":
int_feature[9] = 0.151
if int_feature[9] == "7":
int_feature[9] = 0.13
if int_feature[9] == "8":
int_feature[9] = 1.58
if int_feature[9] == "9":
int_feature[9] = .14
if int_feature[9] == "10":
int_feature[9] = 0.29
if int_feature[9] == "11":
int_feature[9] = 0.20
if int_feature[9] == "12":
int_feature[9] = 0.25
if int_feature[9] == "13":
int_feature[9] = 0.27
if int_feature[9] == "14":
int_feature[9] = 0.18
if int_feature[9] == "15":
int_feature[9] = 0.25
if int_feature[9] == "16":
int_feature[9] = 0.18
if int_feature[9] == "17":
int_feature[9] = 0.22
if int_feature[9] == "18":
int_feature[9] = 0.0946
if int_feature[9] == "19":
int_feature[9] = 0.11
if int_feature[9] == "20":
int_feature[9] = 0.13
if int_feature[9] == "21":
int_feature[9] = 0.16
if int_feature[9] == "22":
int_feature[9] = 0.19
if int_feature[9] == "23":
int_feature[9] = 0.14
if int_feature[9] == "24":
int_feature[9] = 0.16
if int_feature[9] == "25":
int_feature[9] = 0.23
if int_feature[9] == "26":
int_feature[9] = 0.20
if int_feature[9] == "27":
int_feature[9] = 0.22
if int_feature[9] == "28":
int_feature[9] = 0.146
if int_feature[9] == "29":
int_feature[9] = 0.144
if int_feature[9] == "30":
int_feature[9] = 0.25
if int_feature[9] == "31":
int_feature[9] =0.761
int_feature[-4] = 1.0
int_feature[-3] = 1.0
int_feature[-5] = 0.0
for i in int_feature:
if int_feature[6] == "yes":
int_feature[6] = 1
if int_feature[6] == "no":
int_feature[6] = 0
for i in int_feature:
if int_feature[4] == "yes":
int_feature[4] = 1
if int_feature[4] == "no":
int_feature[4] = 0
for i in int_feature:
if int_feature[7] == "yes":
int_feature[7] = 1
if int_feature[7] == "no":
int_feature[7] = 0
if int_feature[2] == "married":
int_feature =
|
np.append(int_feature,0)
|
numpy.append
|
import io, os, glob, shutil, re
import six
import requests
import random
import numpy as np
from tqdm import tqdm
import tarfile, zipfile, gzip
from functools import partial
import xml.etree.ElementTree as ET
import json, csv
from collections import defaultdict
import torch
from torchtext import data
from torch.utils.data import Dataset, DataLoader
from torchtext.data import Field, NestedField, LabelField, BucketIterator
from transformers import BertTokenizer, DistilBertTokenizer
from sklearn.model_selection import KFold, StratifiedKFold
import nltk, spacy
from ..utils.download_utils import download_from_url
with open("/root/keys.json",'r') as f:
apikeys = json.load(f)
"""
Novelty Dataset Base class (torchtext TabularDataset)
"""
class NoveltyDataset(data.TabularDataset):
urls = []
dirname = ""
name = "novelty"
@classmethod
def create_jsonl(cls, path):
cls.process_data(path)
@staticmethod
def sort_key(ex):
return data.interleave_keys(len(ex.source), len(ex.target))
@classmethod
def download(cls, root, check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset.
"""
path = os.path.join(root, cls.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in cls.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print("downloading {}".format(filename))
download_from_url(url, zpath)
zroot, ext = os.path.splitext(zpath)
_, ext_inner = os.path.splitext(zroot)
if ext == ".zip":
with zipfile.ZipFile(zpath, "r") as zfile:
print("extracting")
zfile.extractall(path)
# tarfile cannot handle bare .gz files
elif ext == ".tgz" or ext == ".gz" and ext_inner == ".tar":
with tarfile.open(zpath, "r:gz") as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
elif ext == ".gz":
with gzip.open(zpath, "rb") as gz:
with open(zroot, "wb") as uncompressed:
shutil.copyfileobj(gz, uncompressed)
return os.path.join(path, cls.dirname)
@classmethod
def splits(
cls,
text_field,
label_field,
parse_field=None,
extra_fields={},
root=".data",
train="train.jsonl",
validation="val.jsonl",
test="test.jsonl",
):
"""Create dataset objects for splits of the SNLI dataset.
This is the most flexible way to use the dataset.
Arguments:
text_field: The field that will be used for premise and hypothesis
data.
label_field: The field that will be used for label data.
parse_field: The field that will be used for shift-reduce parser
transitions, or None to not include them.
extra_fields: A dict[json_key: Tuple(field_name, Field)]
root: The root directory that the dataset's zip archive will be
expanded into.
train: The filename of the train data. Default: 'train.jsonl'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.jsonl'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.jsonl'.
"""
path = cls.download(root)
if not os.path.exists(os.path.join(path, train)):
cls.create_jsonl(path)
if parse_field is None:
fields = {
"source": ("source", text_field),
"target_text": ("target", text_field),
"DLA": ("label", label_field),
}
for key in extra_fields:
if key not in fields.keys():
fields[key] = extra_fields[key]
return super(NoveltyDataset, cls).splits(
path,
root,
train,
validation,
test,
format="json",
fields=fields,
filter_pred=lambda ex: ex.label != "-",
)
class APWSJ(NoveltyDataset):
urls = [
(
apikeys['APWSJ_URL'],
"dataset_aw.zip",
)
]
dirname = "trec"
name = "apwsj"
@classmethod
def process_apwsj(cls, path):
AP_path = os.path.join(path, "AP")
AP_files = glob.glob(os.path.join(AP_path, "*.gz"))
for i in AP_files:
with gzip.open(i, "r") as f:
text = f.read()
with open(i[:-3], "wb") as f_new:
f_new.write(text)
os.remove(i)
wsj = os.path.join(path, "TREC", "wsj")
ap = os.path.join(path, "TREC", "AP")
ap_others = os.path.join(path, "AP")
wsj_others = os.path.join(path, "WSJ", "wsj_split")
cmunrf = os.path.join(path, "CMUNRF")
wsj_files = glob.glob(wsj + "/*")
ap_files = glob.glob(ap + "/*")
wsj_other_files = []
ap_other_files = glob.glob(os.path.join(ap_others, "*"))
wsj_big = glob.glob(os.path.join(wsj_others, "*"))
for i in wsj_big:
for file_path in glob.glob(os.path.join(i, "*")):
wsj_other_files.append(file_path)
docs_json = {}
errors = 0
for wsj_file in wsj_files:
with open(wsj_file, "r") as f:
txt = f.read()
docs = [
i.split("<DOC>")[1]
for i in filter(lambda x: len(x) > 10, txt.split("</DOC>"))
]
for doc in docs:
try:
id = doc.split("<DOCNO>")[1].split("</DOCNO>")[0]
text = doc.split("<TEXT>")[1].split("</TEXT>")[0]
docs_json[id] = text
except:
errors += 1
for ap_file in ap_files:
with open(ap_file, "r", encoding="latin-1") as f:
txt = f.read()
docs = [
i.split("<DOC>")[1]
for i in filter(lambda x: len(x) > 10, txt.split("</DOC>"))
]
for doc in docs:
try:
id = doc.split("<DOCNO>")[1].split("</DOCNO>")[0]
text = doc.split("<TEXT>")[1].split("</TEXT>")[0]
docs_json[id] = text
except:
errors += 1
for wsj_file in wsj_other_files:
with open(wsj_file, "r") as f:
txt = f.read()
docs = [
i.split("<DOC>")[1]
for i in filter(lambda x: len(x) > 10, txt.split("</DOC>"))
]
for doc in docs:
try:
id = doc.split("<DOCNO>")[1].split("</DOCNO>")[0]
text = doc.split("<TEXT>")[1].split("</TEXT>")[0]
docs_json[id] = text
except:
errors += 1
for ap_file in ap_other_files:
with open(ap_file, "r", encoding="latin-1") as f:
txt = f.read()
docs = [
i.split("<DOC>")[1]
for i in filter(lambda x: len(x) > 10, txt.split("</DOC>"))
]
for doc in docs:
try:
id = doc.split("<DOCNO>")[1].split("</DOCNO>")[0]
text = doc.split("<TEXT>")[1].split("</TEXT>")[0]
docs_json[id] = text
except:
errors += 1
print("Reading APWSJ dataset, Errors : ", errors)
docs_json = {k.strip(): v.strip() for k, v in docs_json.items()}
topic_to_doc_file = os.path.join(cmunrf, "NoveltyData/apwsj.qrels")
with open(topic_to_doc_file, "r") as f:
topic_to_doc = f.read()
topic_doc = [
(i.split(" 0 ")[1][:-2], i.split(" 0 ")[0])
for i in topic_to_doc.split("\n")
]
topics = "q101, q102, q103, q104, q105, q106, q107, q108, q109, q111, q112, q113, q114, q115, q116, q117, q118, q119, q120, q121, q123, q124, q125, q127, q128, q129, q132, q135, q136, q137, q138, q139, q141"
topic_list = topics.split(", ")
filterd_docid = [(k, v) for k, v in topic_doc if v in topic_list]
def crawl(red_dict, doc, crawled):
ans = []
for cdoc in red_dict[doc]:
ans.append(cdoc)
if crawled[cdoc] == 0:
try:
red_dict[cdoc] = crawl(red_dict, cdoc, crawled)
crawled[cdoc] = 1
ans += red_dict[cdoc]
except:
crawled[cdoc] = 1
return ans
wf = os.path.join(cmunrf, "redundancy_list_without_partially_redundant.txt")
redundancy_path = os.path.join(cmunrf, "NoveltyData/redundancy.apwsj.result")
topics_allowed = "q101, q102, q103, q104, q105, q106, q107, q108, q109, q111, q112, q113, q114, q115, q116, q117, q118, q119, q120, q121, q123, q124, q125, q127, q128, q129, q132, q135, q136, q137, q138, q139, q141"
topics_allowed = topics_allowed.split(", ")
red_dict = dict()
allow_partially_redundant = 1
for line in open(redundancy_path, "r"):
tokens = line.split()
if tokens[2] == "?":
if allow_partially_redundant == 1:
red_dict[tokens[0] + "/" + tokens[1]] = [
tokens[0] + "/" + i for i in tokens[3:]
]
else:
red_dict[tokens[0] + "/" + tokens[1]] = [
tokens[0] + "/" + i for i in tokens[2:]
]
crawled = defaultdict(int)
for doc in red_dict:
if crawled[doc] == 0:
red_dict[doc] = crawl(red_dict, doc, crawled)
crawled[doc] = 1
with open(wf, "w") as f:
for doc in red_dict:
if doc.split("/")[0] in topics_allowed:
f.write(
" ".join(
doc.split("/") + [i.split("/")[1] for i in red_dict[doc]]
)
+ "\n"
)
write_file = os.path.join(cmunrf, "novel_list_without_partially_redundant.txt")
topics = topic_list
doc_topic_dict = defaultdict(list)
for i in topic_doc:
doc_topic_dict[i[0]].append(i[1])
docs_sorted = (
open(os.path.join(cmunrf, "NoveltyData/apwsj88-90.rel.docno.sorted"), "r")
.read()
.splitlines()
)
sorted_doc_topic_dict = defaultdict(list)
for doc in docs_sorted:
if len(doc_topic_dict[doc]) > 0:
for t in doc_topic_dict[doc]:
sorted_doc_topic_dict[t].append(doc)
redundant_dict = defaultdict(lambda: defaultdict(int))
for line in open(
os.path.join(cmunrf, "redundancy_list_without_partially_redundant.txt"), "r"
):
tokens = line.split()
redundant_dict[tokens[0]][tokens[1]] = 1
novel_list = []
for topic in topics:
if topic in topics_allowed:
for i in range(len(sorted_doc_topic_dict[topic])):
if redundant_dict[topic][sorted_doc_topic_dict[topic][i]] != 1:
if i > 0:
# take at most 5 latest docs in case of novel
novel_list.append(
" ".join(
[topic, sorted_doc_topic_dict[topic][i]]
+ sorted_doc_topic_dict[topic][max(0, i - 5) : i]
)
)
with open(write_file, "w") as f:
f.write("\n".join(novel_list))
# Novel cases
novel_docs = os.path.join(cmunrf, "novel_list_without_partially_redundant.txt")
with open(novel_docs, "r") as f:
novel_doc_list = [i.split() for i in f.read().split("\n")]
# Redundant cases
red_docs = os.path.join(
cmunrf, "redundancy_list_without_partially_redundant.txt"
)
with open(red_docs, "r") as f:
red_doc_list = [i.split() for i in f.read().split("\n")]
red_doc_list = filter(lambda x: len(x) > 0, red_doc_list)
novel_doc_list = filter(lambda x: len(x) > 0, novel_doc_list)
missing_file_log = os.path.join(cmunrf, "missing_log.txt")
missing_doc_ids = []
dataset = []
s_not_found = 0
t_not_found = 0
for i in novel_doc_list:
target_id = i[1]
source_ids = i[2:]
if target_id in docs_json.keys():
data_inst = {}
data_inst["target_text"] = docs_json[target_id]
data_inst["source"] = ""
for source_id in source_ids:
if source_id in docs_json.keys():
data_inst["source"] += docs_json[source_id] + ". \n"
else:
missing_doc_ids.append(str(source_id))
data_inst["DLA"] = "Novel"
else:
missing_doc_ids.append(str(target_id))
#
if data_inst["source"] != "":
dataset.append(data_inst)
for i in red_doc_list:
target_id = i[1]
source_ids = i[2:]
if target_id in docs_json.keys():
data_inst = {}
data_inst["target_text"] = docs_json[target_id]
data_inst["source"] = ""
for source_id in source_ids:
if source_id in docs_json.keys():
data_inst["source"] += docs_json[source_id] + ". \n"
else:
missing_doc_ids.append(str(source_id))
data_inst["DLA"] = "Non-Novel"
else:
missing_doc_ids.append(str(target_id))
if data_inst["source"] != "":
dataset.append(data_inst)
with open(missing_file_log, "w") as f:
f.write("\n".join(missing_doc_ids))
dataset_json = {}
for i in range(len(dataset)):
dataset_json[i] = dataset[i]
return dataset_json
@classmethod
def process_data(cls, path):
cmunrf_url = "http://www.cs.cmu.edu/~yiz/research/NoveltyData/CMUNRF1.tar"
cmunrf_path = os.path.join(path, "CMUNRF1.tar")
download_from_url(cmunrf_url, cmunrf_path)
data_zips = [
(os.path.join(path, "AP.tar"), os.path.join(path, "AP")),
(os.path.join(path, "trec.zip"), os.path.join(path, "TREC")),
(os.path.join(path, "wsj_split.zip"), os.path.join(path, "WSJ")),
(os.path.join(path, "CMUNRF1.tar"), os.path.join(path, "CMUNRF")),
]
for data_zip in data_zips:
shutil.unpack_archive(data_zip[0], data_zip[1])
"""
Process APWSJ
"""
dataset_json = cls.process_apwsj(path)
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, "apwsj.jsonl"), "w") as f:
f.writelines([json.dumps(i) + "\n" for i in dataset_json.values()])
# with open(os.path.join(path, "dlnd.jsonl"), "w") as f:
# json.dump(list(dataset.values()), f)
@classmethod
def splits(
cls,
text_field,
label_field,
parse_field=None,
root=".data",
train="apwsj.jsonl",
validation=None,
test=None,
):
return super(APWSJ, cls).splits(
text_field,
label_field,
parse_field=parse_field,
root=root,
train=train,
validation=validation,
test=test,
)
class DLND(NoveltyDataset):
urls = [
(
apikeys['DLND_URL'],
"TAP-DLND-1.0_LREC2018_modified.zip",
)
]
dirname = "TAP-DLND-1.0_LREC2018_modified"
name = "dlnd"
@classmethod
def process_data(cls, path):
all_direc = [
os.path.join(path, direc, direc1)
for direc in os.listdir(path)
if os.path.isdir(os.path.join(path, direc))
for direc1 in os.listdir(os.path.join(path, direc))
]
source_files = [
[
os.path.join(direc, "source", file)
for file in os.listdir(os.path.join(direc, "source"))
if file.endswith(".txt")
]
for direc in all_direc
]
target_files = [
[
os.path.join(direc, "target", file)
for file in os.listdir(os.path.join(direc, "target"))
if file.endswith(".txt")
]
for direc in all_direc
]
source_docs = [
[
open(file_name, "r", encoding="latin-1")
.read()
.encode("ascii", "ignore")
.decode()
for file_name in direc
]
for direc in source_files
]
target_docs = [
[
open(file_name, "r", encoding="latin-1")
.read()
.encode("ascii", "ignore")
.decode()
for file_name in direc
]
for direc in target_files
]
data = []
for i in range(len(target_docs)):
for j in range(len(target_docs[i])):
label = [
tag.attrib["DLA"]
for tag in ET.parse(target_files[i][j][:-4] + ".xml").findall(
"feature"
)
if "DLA" in tag.attrib.keys()
][0]
data.append(
[target_docs[i][j]]
+ [source_docs[i][k] for k in range(len(source_docs[i]))]
+ ["Novel" if label.lower() == "novel" else "Non-Novel"]
)
dataset = []
for i in data:
dataset.append(
{"source":"\n".join(i[1:-1]), "target_text": i[0], "DLA": i[-1]}
)
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, "dlnd.jsonl"), "w") as f:
f.writelines([json.dumps(i) + "\n" for i in dataset])
@classmethod
def splits(
cls,
text_field,
label_field,
parse_field=None,
root=".data",
train="dlnd.jsonl",
validation=None,
test=None,
):
return super(DLND, cls).splits(
text_field,
label_field,
parse_field=parse_field,
root=root,
train=train,
validation=validation,
test=test,
)
class Webis(NoveltyDataset):
urls = [
(
"https://zenodo.org/record/3251771/files/Webis-CPC-11.zip",
"Webis-CPC-11.zip",
)
]
dirname = "Webis-CPC-11"
name = "webis"
@classmethod
def process_data(cls, path):
original = glob.glob(os.path.join(path, "*original*"))
metadata = glob.glob(os.path.join(path, "*metadata*"))
paraphrase = glob.glob(os.path.join(path, "*paraphrase*"))
assert len(original) == len(metadata) == len(paraphrase)
ids = [i.split("/")[-1].split("-")[0] for i in original]
data = {int(i): {} for i in ids}
to_pop = []
for id in data.keys():
org_file = os.path.join(path, f"{id}-original.txt")
para_file = os.path.join(path, f"{id}-paraphrase.txt")
meta_file = os.path.join(path, f"{id}-metadata.txt")
with open(org_file, "r") as f:
org = f.read()
with open(para_file, "r") as f:
par = f.read()
with open(meta_file, "r") as f:
text = f.read()
novel = re.findall("Paraphrase: (.*)", text)[0] == "Yes"
if len(org) > 10 and len(par) > 10:
data[id]["source"] = org.replace("\n", "")
data[id]["target_text"] = par.replace("\n", "")
data[id]["DLA"] = novel
else:
to_pop.append(id)
for id in to_pop:
data.pop(id, None)
dataset = data.values()
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, "webis.jsonl"), "w") as f:
f.writelines([json.dumps(i) + "\n" for i in dataset])
@classmethod
def splits(
cls,
text_field,
label_field,
parse_field=None,
root=".data",
train="webis.jsonl",
validation=None,
test=None,
):
return super(Webis, cls).splits(
text_field,
label_field,
parse_field=parse_field,
root=root,
train=train,
validation=validation,
test=test,
)
"""
PyTorch Dataset/DataLoader
"""
class DLND_Dataset(Dataset):
def __init__(self, data):
self.data = data
self.fields = self.data.fields
def __len__(self):
return len(self.data.examples)
def __getitem__(self, idx):
source = (
self.fields["source"].process([self.data.examples[idx].source]).squeeze()
)
target = (
self.fields["target"].process([self.data.examples[idx].target]).squeeze()
)
label = self.fields["label"].process([self.data.examples[idx].label]).squeeze()
return [source, target], label
"""
Novelty Dataset Class
"""
class Novelty:
def __init__(
self,
options,
sentence_field=None,
):
self.options = options
if sentence_field == None:
self.sentence_field = Field(
batch_first=True,
use_vocab=options["use_vocab"],
lower=options["lower"],
preprocessing=options["preprocessing"],
tokenize=options["tokenize"],
fix_length=options["max_len"],
init_token=options["init_token"],
eos_token=options["eos_token"],
pad_token=options["pad_token"],
unk_token=options["unk_token"],
)
build_vocab = True
else:
self.sentence_field = sentence_field
build_vocab = False
if options["sent_tokenizer"] == "spacy":
import spacy
from spacy.lang.en import English
nlp = English()
nlp.add_pipe(nlp.create_pipe("sentencizer"))
def sent_tokenize(raw_text):
doc = nlp(raw_text)
sentences = [sent.string.strip() for sent in doc.sents]
return sentences
self.sent_tok = lambda x: sent_tokenize(x)
else:
self.sent_tok = lambda x: nltk.sent_tokenize(x)
if options["doc_field"]:
self.TEXT_FIELD = self.sentence_field
else:
self.TEXT_FIELD = NestedField(
self.sentence_field,
tokenize=self.sent_tok,
fix_length=options["max_num_sent"],
)
self.LABEL = LabelField(dtype=torch.long)
if options["dataset"] == "dlnd":
dataset = DLND
if options["dataset"] == "apwsj":
dataset = APWSJ
if options["dataset"] == "webis":
dataset = Webis
(self.data,) = dataset.splits(self.TEXT_FIELD, self.LABEL)
if self.options.get("labeled", -1) != -1:
num_labeled = self.options.get("labeled", False)
self.dataset_labeled, self.test = self.data.split(
split_ratio=0.8, stratified=True, random_state=random.getstate()
)
data_size = len(self.dataset_labeled)
percentage = num_labeled / data_size
self.train, self.dev = self.dataset_labeled.split(
split_ratio=percentage, stratified=True, random_state=random.getstate()
)
else:
self.train, self.dev, self.test = self.data.split(
split_ratio=[0.8, 0.1, 0.1],
stratified=True,
random_state=random.getstate(),
)
self.LABEL.build_vocab(self.train)
if build_vocab:
self.TEXT_FIELD.build_vocab(self.train, self.dev)
self.train_iter, self.val_iter, self.test_iter = BucketIterator.splits(
(self.train, self.dev, self.test),
batch_size=options["batch_size"],
device=options["device"],
)
def iter_folds(self):
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=1029)
train_exs_arr = np.array(self.data.examples)
labels = np.array([i.label for i in self.data.examples])
fields = self.data.fields
for train_idx, test_idx in kf.split(train_exs_arr, y=labels):
yield (
BucketIterator(
data.Dataset(train_exs_arr[train_idx], fields),
batch_size=self.options["batch_size"],
device=self.options["device"],
),
BucketIterator(
data.Dataset(train_exs_arr[test_idx], fields),
batch_size=self.options["batch_size"],
device=self.options["device"],
),
)
def vocab_size(self):
if self.options["use_vocab"]:
return len(self.TEXT_FIELD.nesting_field.vocab)
else:
return self.tokenizer.vocab_size
def padding_idx(self):
if self.options["use_vocab"]:
return self.TEXT_FIELD.nesting_field.vocab.stoi[self.options["pad_token"]]
else:
return self.options["pad_token"]
def out_dim(self):
return len(self.LABEL.vocab)
def labels(self):
return self.LABEL.vocab.stoi
def get_dataloaders(self):
train_dl = DataLoader(
DLND_Dataset(self.train),
batch_size=self.options["batch_size"],
shuffle=True,
)
dev_dl = DataLoader(
DLND_Dataset(self.dev), batch_size=self.options["batch_size"], shuffle=True
)
test_dl = DataLoader(
DLND_Dataset(self.test), batch_size=self.options["batch_size"], shuffle=True
)
return train_dl, dev_dl, test_dl
def get_numpy_data(self):
def get_numpy(data_iter):
np_data = {}
attr_list = ["source", "target", "label"]
for attr in attr_list:
data = np.concatenate(
[getattr(i, attr).detach().cpu().numpy() for i in data_iter]
)
np_data[attr] = data
src =
|
np.expand_dims(np_data["source"], 1)
|
numpy.expand_dims
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 18:57:33 2020
@author: kahdi
"""
#%% Import packages
import numpy as np
import tensorflow as tf
from scipy.linalg import qr
import time
#%% Define functions here
# =============================================================================
# Define the ODE of Van der Pol
# =============================================================================
def VanderPol(u,t,p):
du1=u[1]
du2=p[0]*(1-u[0]**2)*u[1]-u[0]
du=np.array([du1,du2])
#p0=0.5
return du
# =============================================================================
# Define the ODE of Duffing
# =============================================================================
def Duffing(u,t,p):
du1=u[1]
du2=-p[0]*u[1]-p[1]*u[0]-p[2]*u[0]**3
du=np.array([du1,du2])
#p0=[0.2,0.05,1]
return du
# =============================================================================
# Define the ODE of Lotka
# =============================================================================
def Lotka(u,t,p):
du1=p[0]*u[0]-p[1]*u[0]*u[1]
du2=p[1]*u[0]*u[1]-2*p[0]*u[1]
du=np.array([du1,du2])
#p0=[1,10]
return du
# =============================================================================
# Define the ODE for cubic oscalator
# =============================================================================
def CubicOsc(u,t,p):
du1=p[0]*u[0]**3+p[1]*u[1]**3
du2=p[2]*u[0]**3+p[3]*u[1]**3
du=np.array([du1,du2])
#p0=[-0.1,2,-2,-0.1]
return du
# =============================================================================
# Define the ODE for the Lorenz system
# =============================================================================
def Lorenz(u, t, p):
du1 = p[0]*u[0]+p[1]*u[1]
du2 = p[2]*u[0]+p[3]*u[0]*u[2]+p[4]*u[1]
du3 = p[5]*u[0]*u[1]+p[6]*u[2]
du=np.array([du1,du2,du3])
#p0=[-10.0,10.0,28.0,-1.0,-1.0,1.0,-8/3]
return du
# =============================================================================
# Define the ODE for the Rössler attractor
# =============================================================================
def Rossler(u, t, p):
du1 = -u[1]-u[2]
du2 = u[0]+p[0]*u[1]
du3 = p[1]+u[0]*u[2]-p[2]*u[2]
du=np.array([du1,du2,du3])
#p0=[0.1,0.0,0.0]
return du
# =============================================================================
# Define the ODE of Lorenz96 (Modified from https://en.wikipedia.org/wiki/Lorenz_96_model)
# =============================================================================
def Lorenz96(x, t, p):
# Lorenz 96 model
# Compute state derivatives
N=p[0]
d = np.zeros(np.shape(x))
# First the 3 edge cases: i=1,2,N
d[0] = (x[1] - x[N-2]) * x[N-1] - x[0]
d[1] = (x[2] - x[N-1]) * x[0] - x[1]
d[N-1] = (x[0] - x[N-3]) * x[N-2] - x[N-1]
# Then the general case
for i in range(2, N-1):
d[i] = (x[i+1] - x[i-2]) * x[i-1] - x[i]
# Add the forcing term
d = d + p[1]
#p0=[10,8]
# Return the state derivatives
return d
# =============================================================================
# Define the library function for the SINDy
# =============================================================================
def Lib(x,libOrder):
# First get the dimension of the x
n,m=x.shape
print("The first dimension of the input data:",n)
print("The second dimension of the input data:",m)
# Lib order 0
Theta=np.ones((n,1),dtype=float)
# Lib order 1
if libOrder>=1:
for i in range(m):
Theta=
|
np.concatenate((Theta,x[:,[i]]),axis=1)
|
numpy.concatenate
|
import math
import os
import numpy as np
def __filenames_sorted_mapper(filename: str) -> int:
return int(filename.split(".")[0].split("_")[-1])
def __load_camera_params_from_file(pic_num, depth_images) -> dict:
result = {}
params_path = depth_images[pic_num][:-5] + "txt"
with open(params_path, 'r') as input_file:
for line in input_file:
field_name_start = 0
field_name_end = line.find(" ")
field_name = line[field_name_start:field_name_end]
value_start = line.find("=") + 2 # skip space after '='
if field_name == "cam_angle":
value_end = line.find(";")
else:
value_end = line.find(";") - 1
value = line[value_start:value_end]
result[field_name] = value
return result
def provide_filenames(general_path) -> (list, list):
path = general_path # as paths are equal
filenames = os.listdir(path)
rgb_filenames = (filter(lambda x: x.endswith(".png"), filenames))
depth_filenames = (filter(lambda x: x.endswith(".depth"), filenames))
rgb_filenames = sorted(rgb_filenames, key=__filenames_sorted_mapper)
depth_filenames = sorted(depth_filenames, key=__filenames_sorted_mapper)
full_rgb_filenames = []
full_depth_filenames = []
for rgb_filename in rgb_filenames:
full_rgb_filenames.append(os.path.join(general_path, rgb_filename))
for depth_filename in depth_filenames:
full_depth_filenames.append(os.path.join(general_path, depth_filename))
return full_rgb_filenames, full_depth_filenames
def __get_camera_params_for_frame(pic_num, depth_images):
# Adopted from https://www.doc.ic.ac.uk/~ahanda/VaFRIC/getcamK.m
camera_params_raw = __load_camera_params_from_file(pic_num, depth_images)
cam_dir = np.fromstring(camera_params_raw["cam_dir"][1:-1], dtype=float, sep=',').T
cam_right = np.fromstring(camera_params_raw["cam_right"][1:-1], dtype=float, sep=',').T
cam_up = np.fromstring(camera_params_raw["cam_up"][1:-1], dtype=float, sep=',').T
focal = np.linalg.norm(cam_dir)
aspect = np.linalg.norm(cam_right) / np.linalg.norm(cam_up)
angle = 2 * math.atan(np.linalg.norm(cam_right) / 2 / focal)
width = 640
height = 480
psx = 2 * focal * math.tan(0.5 * angle) / width
psy = 2 * focal * math.tan(0.5 * angle) / aspect / height
psx = psx / focal
psy = psy / focal
o_x = (width + 1) * 0.5
o_y = (height + 1) * 0.5
fx = 1 / psx
fy = -1 / psy
cx = o_x
cy = o_y
return fx, fy, cx, cy
def getting_points(frame_num, depth_images, cam_intrinsic):
# Adopted from https://www.doc.ic.ac.uk/~ahanda/VaFRIC/compute3Dpositions.m
depth_frame_path = depth_images[frame_num]
fx, fy, cx, cy = __get_camera_params_for_frame(frame_num, depth_images)
x_matrix = np.tile(np.arange(cam_intrinsic.width), (cam_intrinsic.height, 1)).flatten()
y_matrix = np.transpose(np.tile(
|
np.arange(cam_intrinsic.height)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import itertools
def compute_cofactor(x):
u, s, vt = np.linalg.svd(x)
detsi = np.empty_like(s)
for i in range(len(s)):
detsi[i] = np.prod(s[:i]) * np.prod(s[i+1:])
return np.linalg.det(u.dot(vt)) * u.dot(np.diag(detsi)).dot(vt)
def minor(arr, i, j):
l0 = list(itertools.chain(range(i), range(i+1, arr.shape[0])))
l1 = list(itertools.chain(range(j), range(j+1, arr.shape[1])))
return arr[np.array(l0)[:,np.newaxis], np.array(l1)]
def compute_cofactor_brute(x):
ret = np.empty_like(x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
v = np.linalg.det(minor(x, i, j))
if (i + j) % 2:
v = -v
ret[i, j] = v
return ret
def check(x):
print('input')
print(x)
print('det:', np.linalg.det(x))
c0 = compute_cofactor(x)
c1 = compute_cofactor_brute(x)
print('c0')
print(c0)
print('c1')
print(c1)
print('diff', np.abs(c0 - c1).max())
def main():
x =
|
np.random.uniform(1, 4, (5, 5))
|
numpy.random.uniform
|
import warnings
from typing import Any, List, Optional, Tuple, Union
import numpy as np
from scipy import sparse as sps
from .categorical_matrix import CategoricalMatrix
from .dense_matrix import DenseMatrix
from .ext.split import is_sorted, split_col_subsets
from .matrix_base import MatrixBase
from .sparse_matrix import SparseMatrix
from .standardized_mat import StandardizedMatrix
from .util import (
check_matvec_out_shape,
check_transpose_matvec_out_shape,
set_up_rows_or_cols,
)
def as_mx(a: Any):
"""Convert an array to a corresponding MatrixBase type.
If the input is already a MatrixBase, return untouched.
If the input is sparse, return a SparseMatrix.
If the input is a numpy array, return a DenseMatrix.
Raise an error is input is another type.
"""
if isinstance(a, (MatrixBase, StandardizedMatrix)):
return a
elif sps.issparse(a):
return SparseMatrix(a)
elif isinstance(a, np.ndarray):
return DenseMatrix(a)
else:
raise ValueError(f"Cannot convert type {type(a)} to Matrix.")
def _prepare_out_array(out: Optional[np.ndarray], out_shape, out_dtype):
if out is None:
out = np.zeros(out_shape, out_dtype)
else:
# TODO: make this a re-usable method that all the matrix classes
# can use to check their out parameter
if out.dtype != out_dtype:
raise ValueError(
f"out array is required to have dtype {out_dtype} but has"
f"dtype {out.dtype}"
)
return out
def _filter_out_empty(matrices, indices):
keep_idxs = [i for i, m in enumerate(matrices) if m.shape[1] > 0]
out_mats = [matrices[i] for i in keep_idxs]
out_idxs = [indices[i] for i in keep_idxs]
return out_mats, out_idxs
def _combine_matrices(matrices, indices):
"""
Combine multiple SparseMatrix and DenseMatrix objects into a single object of each type.
``matrices`` is and ``indices`` marks which columns they correspond to.
Categorical matrices remain unmodified by this function since categorical
matrices cannot be combined (each categorical matrix represents a single category).
Parameters
----------
matrices:
The MatrixBase matrices to be combined.
indices:
The columns the each matrix corresponds to.
"""
n_row = matrices[0].shape[0]
for mat_type_, stack_fn in [
(DenseMatrix, np.hstack),
(SparseMatrix, sps.hstack),
]:
this_type_matrices = [
i for i, mat in enumerate(matrices) if isinstance(mat, mat_type_)
]
if len(this_type_matrices) > 1:
new_matrix = mat_type_(stack_fn([matrices[i] for i in this_type_matrices]))
new_indices = np.concatenate([indices[i] for i in this_type_matrices])
sorter =
|
np.argsort(new_indices)
|
numpy.argsort
|
import numpy as np
import matplotlib.pyplot as pplot
import csv
norm = 1000.
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1.-x)
return 1./(1.+
|
np.exp(-x)
|
numpy.exp
|
import csv
import numpy as np
import aljpy
from . import simulator, common, arrdict
import scipy as sp
import scipy.special
from pkg_resources import resource_filename
from datetime import date
# TODO: verify if this can be changed.
N_AGES = 101
D_END = date(2020, 5, 20) # stop
# If lockdown, by how much do divide contact matrix?
LOCKDOWN_FACTOR = 2.
# How infectious are asymptomatic cases relative to symptomatic ones
# https://science.sciencemag.org/content/early/2020/03/13/science.abb3221
ASYMPTOMATIC_TRANSMISSIBILITY = 0.55
# DON'T CHANGE: we don't want p infect household to recalibrate for different policy what ifs on mean time to isolate
MEAN_TIME_TO_ISOLATE = 4.6 # DON'T CHANGE
TUNED = aljpy.dotdict(
Italy=aljpy.dotdict(
# increase probability of death for all ages and comorbidities by this amount
mortality_multiplier=4,
# Probability of infection given contact between two individuals
# This is currently set arbitrarily and will be calibrated to match the empirical r0
pigc=.029,
population=int(1e4),
n_infected_start=5.,
start_date=date(2020, 1, 22),
stay_home_date=date(2020, 3, 8),
lockdown_date=date(2022, 12, 31))) # no lockdown
AGE_GROUPS = {
'infected_1': '0-4', 'contact_1': '0-4', 'infected_2': '5-9',
'contact_2': '5-9', 'infected_3': '10-14', 'contact_3': '10-14',
'infected_4': '15-19', 'contact_4': '15-19', 'infected_5': '20-24',
'contact_5': '20-24', 'infected_6': '25-29', 'contact_6': '25-29',
'infected_7': '30-34', 'contact_7': '30-34', 'infected_8': '35-39',
'contact_8': '35-39', 'infected_9': '40-44', 'contact_9': '40-44',
'infected_10': '45-49', 'contact_10': '45-49', 'infected_11': '50-54',
'contact_11': '50-54', 'infected_12': '55-59', 'contact_12': '55-59',
'infected_13': '60-64', 'contact_13': '60-64', 'infected_14': '65-69',
'contact_14': '65-69', 'infected_15': '70-74', 'contact_15': '70-74',
'infected_16': '75-79', 'contact_16': '75-79'}
def read_contact_matrix(country):
"""Create a country-specific contact matrix from stored data.
Read a stored contact matrix based on age intervals. Return a matrix of
expected number of contacts for each pair of raw ages. Extrapolate to age
ranges that are not covered.
Args:
country (str): country name.
Returns:
float N_AGES x N_AGES matrix: expected number of contacts between of a person
of age i and age j is Poisson(matrix[i][j]).
"""
matrix = np.zeros((N_AGES, N_AGES))
with open(resource_filename(__package__, f'contactmatrices/{country}/All_{country}.csv'), 'r') as f:
csvraw = list(csv.reader(f))
col_headers = csvraw[0][1:-1]
row_headers = [row[0] for row in csvraw[1:]]
data = np.array([row[1:-1] for row in csvraw[1:]])
for i in range(len(row_headers)):
for j in range(len(col_headers)):
interval_infected = AGE_GROUPS[row_headers[i]]
interval_infected = [int(x) for x in interval_infected.split('-')]
interval_contact = AGE_GROUPS[col_headers[j]]
interval_contact = [int(x) for x in interval_contact.split('-')]
for age_infected in range(interval_infected[0], interval_infected[1]+1):
for age_contact in range(interval_contact[0], interval_contact[1]+1):
matrix[age_infected, age_contact] = float(data[i][j])/(interval_contact[1] - interval_contact[0] + 1)
# extrapolate from 79yo out to 100yo
# start by fixing the age of the infected person and then assuming linear decrease
# in their number of contacts of a given age, following the slope of the largest
# pair of age brackets that doesn't contain a diagonal term (since those are anomalously high)
for i in range(interval_infected[1]+1):
if i < 65: # 0-65
slope = (matrix[i, 70] - matrix[i, 75])/5
elif i < 70: # 65-70
slope = (matrix[i, 55] - matrix[i, 60])/5
elif i < 75: # 70-75
slope = (matrix[i, 60] - matrix[i, 65])/5
else: # 75-80
slope = (matrix[i, 65] - matrix[i, 70])/5
start_age = 79
if i >= 75:
start_age = 70
for j in range(interval_contact[1]+1, N_AGES):
matrix[i, j] = matrix[i, start_age] - slope*(j - start_age)
if matrix[i, j] < 0:
matrix[i, j] = 0
# fix diagonal terms
for i in range(interval_infected[1]+1, N_AGES):
matrix[i] = matrix[interval_infected[1]]
for i in range(int((100-80)/5)):
age = 80 + i*5
matrix[age:age+5, age:age+5] = matrix[79, 79]
matrix[age:age+5, 75:80] = matrix[75, 70]
matrix[100, 95:] = matrix[79, 79]
matrix[95:, 100] = matrix[79, 79]
return matrix
def transition_probabilities(mortality_multiplier):
"""2b. Construct transition probabilities between disease severities
There are three disease states: mild, severe and critical.
- Mild represents sub-hospitalization.
- Severe is hospitalization.
- Critical is ICU.
The key results of this section are:
- p_mild_severe: N_AGES x 2 x 2 matrix. For each age and comorbidity state
(length two bool vector indicating whether the individual has diabetes and/or
hypertension), what is the probability of the individual transitioning from
the mild to severe state.
- p_severe_critical, p_critical_death are the same for the other state transitions.
All of these probabilities are proportional to the base progression rate
for an (age, diabetes, hypertension) state which is stored in p_death_target
and estimated via logistic regression.
"""
# N_AGES vector: The probability of transitioning from the mild to
# severe state for a patient of age i is p_mild_severe_cdc[i]. We will match
# these overall probabilities.
# Source: https://www.cdc.gov/mmwr/volumes/69/wr/mm6912e2.htm?s_cid=mm6912e2_w#T1_down
# Using the lower bounds for probability of hospitalization, since that's more
# consistent with frequency of severe infection reported in
# https://www.nejm.org/doi/full/10.1056/NEJMoa2002032 (at a lower level of age granularity).
p_mild_severe_cdc = np.zeros(N_AGES)
p_mild_severe_cdc[0:20] = 0.016
p_mild_severe_cdc[20:45] = 0.143
p_mild_severe_cdc[45:55] = 0.212
p_mild_severe_cdc[55:65] = 0.205
p_mild_severe_cdc[65:75] = 0.286
p_mild_severe_cdc[75:85] = 0.305
p_mild_severe_cdc[85:] = 0.313
# overall probability of progression from critical to severe
# https://www.ecdc.europa.eu/sites/default/files/documents/RRA-sixth-update-Outbreak-of-novel-coronavirus-disease-2019-COVID-19.pdf
# taking midpoint of the intervals
overall_p_severe_critical = (0.15 + 0.2) / 2
# overall mortality, which is set separately, but rather how many individuals
# end up in critical state. 0.49 is from
# http://weekly.chinacdc.cn/en/article/id/e53946e2-c6c4-41e9-9a9b-fea8db1a8f51
overall_p_critical_death = 0.49
# go back to using CDC hospitalization rates as mild->severe
severe_critical_multiplier = overall_p_severe_critical / p_mild_severe_cdc
critical_death_multiplier = overall_p_critical_death / p_mild_severe_cdc
# get the overall CFR for each age/comorbidity combination by running the logistic model
"""
Mortality model. We fit a logistic regression to estimate p_mild_death from
(age, diabetes, hypertension) to match the marginal mortality rates from TODO.
The results of the logistic regression are used to set the disease severity
transition probabilities.
"""
c_age = np.loadtxt(resource_filename(__package__, 'comorbidities/c_age.txt'), delimiter=',').mean(axis=0)
"""float vector: Logistic regression weights for each age bracket."""
c_diabetes = np.loadtxt(resource_filename(__package__, 'comorbidities/c_diabetes.txt'), delimiter=',').mean(axis=0)
"""float: Logistic regression weight for diabetes."""
c_hyper = np.loadtxt(resource_filename(__package__, 'comorbidities/c_hypertension.txt'), delimiter=',').mean(axis=0)
"""float: Logistic regression weight for hypertension."""
intervals = np.loadtxt(resource_filename(__package__, 'comorbidities/comorbidity_age_intervals.txt'), delimiter=',')
def age_to_interval(i):
"""Return the corresponding comorbidity age interval for a specific age.
Args:
i (int): age.
Returns:
int: index of interval containing i in intervals.
"""
for idx, a in enumerate(intervals):
if i >= a[0] and i < a[1]:
return idx
return idx
p_death_target = np.zeros((N_AGES, 2, 2))
for i in range(N_AGES):
for diabetes_state in [0,1]:
for hyper_state in [0,1]:
if i < intervals[0][0]:
p_death_target[i, diabetes_state, hyper_state] = 0
else:
p_death_target[i, diabetes_state, hyper_state] = sp.special.expit(
c_age[age_to_interval(i)] + diabetes_state * c_diabetes +
hyper_state * c_hyper)
# p_death_target *= params['mortality_multiplier']
# p_death_target[p_death_target > 1] = 1
#calibrate the probability of the severe -> critical transition to match the
#overall CFR for each age/comorbidity combination
#age group, diabetes (0/1), hypertension (0/1)
progression_rate = np.zeros((N_AGES, 2, 2))
p_mild_severe = np.zeros((N_AGES, 2, 2))
"""float N_AGES x 2 x 2 vector: Probability a patient with a particular age combordity
profile transitions from mild to severe state."""
p_severe_critical = np.zeros((N_AGES, 2, 2))
"""float N_AGES x 2 x 2 vector: Probability a patient with a particular age combordity
profile transitions from severe to critical state."""
p_critical_death = np.zeros((N_AGES, 2, 2))
"""float N_AGES x 2 x 2 vector: Probability a patient with a particular age combordity
profile transitions from critical to dead state."""
for i in range(N_AGES):
for diabetes_state in [0,1]:
for hyper_state in [0,1]:
progression_rate[i, diabetes_state, hyper_state] = (p_death_target[i, diabetes_state, hyper_state]
/ (severe_critical_multiplier[i]
* critical_death_multiplier[i])) ** (1./3)
p_mild_severe[i, diabetes_state, hyper_state] = progression_rate[i, diabetes_state, hyper_state]
p_severe_critical[i, diabetes_state, hyper_state] = severe_critical_multiplier[i]*progression_rate[i, diabetes_state, hyper_state]
p_critical_death[i, diabetes_state, hyper_state] = critical_death_multiplier[i]*progression_rate[i, diabetes_state, hyper_state]
#no critical cases under 20 (CDC)
p_critical_death[:20] = 0
p_severe_critical[:20] = 0
#for now, just cap 80+yos with diabetes and hypertension
p_critical_death[p_critical_death > 1] = 1
p_mild_severe *= mortality_multiplier**(1/3)
p_severe_critical *= mortality_multiplier**(1/3)
p_critical_death *= mortality_multiplier**(1/3)
p_mild_severe[p_mild_severe > 1] = 1
p_severe_critical[p_severe_critical > 1] = 1
p_critical_death[p_critical_death > 1] = 1
return aljpy.dotdict(
p_mild_severe=p_mild_severe,
p_severe_critical=p_severe_critical,
p_critical_death=p_critical_death,
)
def lockdown_factor(factor):
return
|
np.array(((0, 14, factor), (15, 24, factor), (25, 39, factor), (40, 69, factor), (70, 100, factor)))
|
numpy.array
|
import numpy as np
from scipy import special
from scipy.stats import norm
import pdb
from config import *
class Lik_Layer(object):
"""Summary
Attributes:
D (TYPE): Description
N (TYPE): Description
"""
def __init__(self, N, D):
"""Summary
Args:
N (TYPE): Description
D (TYPE): Description
"""
self.N = N
self.D = D
def compute_log_Z(self, mout, vout, y, alpha=1.0):
"""Summary
Args:
mout (TYPE): Description
vout (TYPE): Description
y (TYPE): Description
alpha (float, optional): Description
"""
pass
def backprop_grads(self, mout, vout, dmout, dvout, alpha=1.0, scale=1.0):
"""Summary
Args:
mout (TYPE): Description
vout (TYPE): Description
dmout (TYPE): Description
dvout (TYPE): Description
alpha (float, optional): Description
scale (float, optional): Description
Returns:
TYPE: Description
"""
return {}
def compute_log_lik_exp(self, m, v, y):
pass
def backprop_grads_log_lik_exp(self, m, v, dm, dv, y, scale=1.0):
return {}
def init_hypers(self):
"""Summary
Returns:
TYPE: Description
"""
return {}
def get_hypers(self):
"""Summary
Returns:
TYPE: Description
"""
return {}
def update_hypers(self, params):
"""Summary
Args:
params (TYPE): Description
"""
pass
class Gauss_Layer(Lik_Layer):
"""Summary
Attributes:
sn (int): Description
"""
def __init__(self, N, D):
"""Summary
Args:
N (TYPE): Description
D (TYPE): Description
"""
super(Gauss_Layer, self).__init__(N, D)
self.sn = 0
def compute_log_Z(self, mout, vout, y, alpha=1.0, compute_dm2=False):
"""Summary
Args:
mout (TYPE): Description
vout (TYPE): Description
y (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
Raises:
RuntimeError: Description
"""
# real valued data, gaussian lik
if mout.ndim == 2:
sn2 = np.exp(2.0 * self.sn)
vout += sn2 / alpha
logZ = np.sum(-0.5 * (np.log(2 * np.pi * vout) +
(y - mout)**2 / vout))
logZ += y.shape[0] * self.D * (0.5 * np.log(2 * np.pi * sn2 / alpha)
- 0.5 * alpha * np.log(2 * np.pi * sn2))
dlogZ_dm = (y - mout) / vout
dlogZ_dv = -0.5 / vout + 0.5 * (y - mout)**2 / vout**2
if compute_dm2:
dlogZ_dm2 = - 1 / vout
return logZ, dlogZ_dm, dlogZ_dv, dlogZ_dm2
else:
return logZ, dlogZ_dm, dlogZ_dv
elif mout.ndim == 3:
sn2 = np.exp(2.0 * self.sn)
vout += sn2 / alpha
logZ = -0.5 * (np.log(2 * np.pi * vout) + (y - mout)**2 / vout)
logZ += (0.5 * np.log(2 * np.pi * sn2 / alpha) -
0.5 * alpha * np.log(2 * np.pi * sn2))
logZ_max = np.max(logZ, axis=0)
exp_term = np.exp(logZ - logZ_max)
sumexp = np.sum(exp_term, axis=0)
logZ_lse = logZ_max + np.log(sumexp)
logZ_lse -= np.log(mout.shape[0])
logZ = np.sum(logZ_lse)
dlogZ = exp_term / sumexp
dlogZ_dm = dlogZ * (y - mout) / vout
dlogZ_dv = dlogZ * (-0.5 / vout + 0.5 * (y - mout)**2 / vout**2)
return logZ, dlogZ_dm, dlogZ_dv
else:
raise RuntimeError('invalid ndim, ndim=%d' % mout.ndim)
def backprop_grads(self, mout, vout, dmout, dvout, alpha=1.0, scale=1.0):
"""Summary
Args:
mout (TYPE): Description
vout (TYPE): Description
dmout (TYPE): Description
dvout (TYPE): Description
alpha (float, optional): Description
scale (float, optional): Description
Returns:
TYPE: Description
Raises:
RuntimeError: Description
"""
sn2 = np.exp(2.0 * self.sn)
dv_sum = np.sum(dvout)
if mout.ndim == 2:
dim_prod = mout.shape[0] * self.D
elif mout.ndim == 3:
dim_prod = mout.shape[1] * self.D
else:
raise RuntimeError('invalid ndim, ndim=%d' % mout.ndim)
dsn = dv_sum * 2 * sn2 / alpha + dim_prod * (1 - alpha)
dsn *= scale
return {'sn': dsn}
def compute_log_lik_exp(self, mout, vout, y):
"""Summary
Args:
mout (TYPE): Description
vout (TYPE): Description
y (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
Raises:
RuntimeError: Description
"""
sn2 = np.exp(2.0 * self.sn)
# real valued data, gaussian lik
if mout.ndim == 2:
term1 = -0.5 * np.log(2 * np.pi * sn2)
term2 = -0.5 / sn2 * (y**2 - 2 * y * mout + mout**2 + vout)
de_dm = 1.0 / sn2 * (y - mout)
de_dv = -0.5 / sn2 * np.ones_like(vout)
exptn = term1 + term2
exptn_sum = np.sum(exptn)
return exptn_sum, de_dm, de_dv
elif mout.ndim == 3:
term1 = - 0.5 * np.log(2*np.pi*sn2)
term2 = - 0.5 * ((y - mout) ** 2 + vout) / sn2
sumterm = term1 + term2
logZ = np.sum(np.mean(sumterm, axis=0))
dlogZ_dm = (y - mout) / sn2 / mout.shape[0]
dlogZ_dv = - 0.5 / sn2 * np.ones_like(vout) / mout.shape[0]
return logZ, dlogZ_dm, dlogZ_dv
else:
raise RuntimeError('invalid ndim, ndim=%d' % mout.ndim)
def backprop_grads_log_lik_exp(self, m, v, dm, dv, y, scale=1.0):
# real valued data, gaussian lik
sn2 = np.exp(2.0 * self.sn)
if m.ndim == 2:
term1 = -1
term2 = 1 / sn2 * (y**2 - 2 * y * m + m**2 + v)
dsn = term1 + term2
dsn = scale * np.sum(dsn)
return {'sn': dsn}
elif m.ndim == 3:
term1 = - 1
term2 = ((y - m) ** 2 + v) / sn2
dsn = term1 + term2
dsn = scale * np.sum(dsn) / m.shape[0]
return {'sn': dsn}
else:
raise RuntimeError('invalid ndim, ndim=%d' % mout.ndim)
def output_probabilistic(self, mf, vf, alpha=1.0):
"""Summary
Args:
mf (TYPE): Description
vf (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
"""
return mf, vf + np.exp(2.0 * self.sn) / alpha
def init_hypers(self, key_suffix=''):
"""Summary
Args:
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
self.sn = np.log(0.01)
return {'sn' + key_suffix: self.sn}
def get_hypers(self, key_suffix=''):
"""Summary
Args:
key_suffix (str, optional): Description
Returns:
TYPE: Description
"""
return {'sn' + key_suffix: self.sn}
def update_hypers(self, params, key_suffix=''):
"""Summary
Args:
params (TYPE): Description
key_suffix (str, optional): Description
"""
self.sn = params['sn' + key_suffix]
class Probit_Layer(Lik_Layer):
"""Summary
"""
__gh_points = None
def _gh_points(self, T=20):
"""Summary
Args:
T (int, optional): Description
Returns:
TYPE: Description
"""
if self.__gh_points is None:
self.__gh_points = np.polynomial.hermite.hermgauss(T)
return self.__gh_points
def compute_log_Z(self, mout, vout, y, alpha=1.0, compute_dm2=False):
"""Summary
Args:
mout (TYPE): Description
vout (TYPE): Description
y (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
Raises:
RuntimeError: Description
"""
# binary data probit likelihood
if mout.ndim == 2:
if alpha == 1.0:
t = y * mout / np.sqrt(1 + vout)
Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
eps = 1e-16
logZ = np.sum(np.log(Z + eps))
dlogZ_dt = 1 / (Z + eps)
dlogZ_dt = dlogZ_dt / np.sqrt(2 * np.pi) * np.exp(-t**2.0 / 2)
dt_dm = y / np.sqrt(1 + vout)
dt_dv = -0.5 * y * mout / (1 + vout)**1.5
dlogZ_dm = dlogZ_dt * dt_dm
dlogZ_dv = dlogZ_dt * dt_dv
if compute_dm2:
beta = dlogZ_dm / y
dlogZ_dm2 = - (beta**2 + mout * y * beta / (1 + vout))
else:
gh_x, gh_w = self._gh_points(GH_DEGREE)
gh_x = gh_x[:, np.newaxis, np.newaxis]
gh_w = gh_w[:, np.newaxis, np.newaxis]
ts = gh_x * \
np.sqrt(2 * vout[np.newaxis, :, :]) + \
mout[np.newaxis, :, :]
eps = 1e-8
pdfs = 0.5 * (1 + special.erf(y * ts / np.sqrt(2))) + eps
Ztilted = np.sum(pdfs**alpha * gh_w, axis=0) / np.sqrt(np.pi)
logZ = np.sum(np.log(Ztilted))
a = pdfs**(alpha - 1.0) * np.exp(-ts**2 / 2)
dZdm = np.sum(gh_w * a, axis=0) * y * \
alpha / np.pi / np.sqrt(2)
dlogZ_dm = dZdm / Ztilted + eps
dZdv = np.sum(gh_w * (a * gh_x), axis=0) * y * \
alpha / np.pi / np.sqrt(2) / np.sqrt(2 * vout)
dlogZ_dv = dZdv / Ztilted + eps
if compute_dm2:
b = (alpha-1)*pdfs**(alpha-2)*np.exp(-ts**2)/np.sqrt(2*np.pi) \
- pdfs**(alpha-1) * y * ts * np.exp(-ts**2/2)
dZdm2 = np.sum(gh_w * b, axis=0) * alpha / np.pi / np.sqrt(2)
dlogZ_dm2 = -dZdm**2 / Ztilted**2 + dZdm2 / Ztilted + eps
elif mout.ndim == 3:
if alpha == 1.0:
t = y * mout / np.sqrt(1 + vout)
Z = 0.5 * (1 + special.erf(t / np.sqrt(2)))
eps = 1e-16
logZ_term = np.log(Z + eps)
logZ_max = np.max(logZ_term, axis=0)
exp_term = np.exp(logZ_term - logZ_max)
sumexp = np.sum(exp_term, axis=0)
logZ_lse = logZ_max + np.log(sumexp)
logZ_lse -= np.log(mout.shape[0])
logZ = np.sum(logZ_lse)
dlogZ = exp_term / sumexp
dlogZ_dt = 1 / (Z + eps)
dlogZ_dt = dlogZ_dt / np.sqrt(2 * np.pi) *
|
np.exp(-t**2.0 / 2)
|
numpy.exp
|
import numpy as np
from pylie import SO3
class SE3:
"""Represents an element of the SE(3) Lie group (poses in 3D)."""
def __init__(self, pose_tuple=(SO3(), np.zeros((3, 1)))):
"""Constructs an SE(3) element.
The default is the identity element.
:param pose_tuple: A tuple (rot3, t) (optional).
"""
self.rotation, self.translation = pose_tuple
@classmethod
def from_matrix(cls, T):
"""Construct an SE(3) element corresponding from a pose matrix.
The rotation is fitted to the closest rotation matrix, the bottom row of a 4x4 matrix is ignored.
:param T: 4x4 or 3x4 pose matrix.
:return: The SE(3) element.
"""
return cls((SO3(T[:3, :3]), T[:3, 3:4]))
@property
def rotation(self):
""" The so3 rotation, an element of SO(3)
:return: An SO3 object corresponding to the orientation.
"""
return self._rotation
@rotation.setter
def rotation(self, so3):
"""Sets the rotation
:param so3: An SO3
"""
if not isinstance(so3, SO3):
raise TypeError('Rotation must be a SO3')
self._rotation = so3
@property
def translation(self):
"""The translation, a 3D column vector
:return: A 3D column vector corresponding to the translation.
"""
return self._translation
@translation.setter
def translation(self, t):
"""Sets the translation
:param t: 3D column vector
"""
if not isinstance(t, np.ndarray) and t.shape == (3, 1):
raise TypeError('Translation must be a 3D column vector')
self._translation = t
def to_matrix(self):
"""Return the matrix representation of this pose.
:return: 4x4 SE(3) matrix
"""
T = np.identity(4)
T[0:3, 0:3] = self.rotation.matrix
T[0:3, 3] = self.translation.T
return T
def to_tuple(self):
"""Return the tuple representation of this pose
:return: (R (3x3 matrix), t (3D column vector)
"""
return (self.rotation.matrix, self.translation)
def compose(X, Y):
"""Compose this element X with another element Y on the right
:param Y: The other Pose3 element
:return: This element X composed with Y
"""
return SE3((X.rotation.__matmul__(Y.rotation), X.rotation * Y.translation + X.translation))
def inverse(self):
"""Compute the inverse of the current element X.
:return: The inverse of the current element.
"""
rot_inv = self.rotation.inverse()
return SE3((rot_inv, -(rot_inv * self.translation)))
def action(self, x):
"""Perform the action of the SE(3) element on the 3D column vector x.
:param x: 3D column vector to be transformed (or a matrix of 3D column vectors)
:return: The resulting rotated and translated 3D column vectors
"""
return self.rotation * x + self.translation
def adjoint(self):
"""The adjoint at the element.
:return: The adjoint, a 6x6 matrix.
"""
R = self.rotation.matrix
return np.block([[R, np.matmul(SO3.hat(self.translation), R)],
[np.zeros((3, 3)), R]])
def oplus(X, xi_vec):
"""Computes the right perturbation of Exp(xi_vec) on the element X.
:param xi_vec: The tangent space vector, a 6D column vector xi_vec = [rho_vec, theta_vec]^T.
:return: The perturbed SE3 element Y = X :math:`\\oplus` xi_vec.
"""
if not (isinstance(xi_vec, np.ndarray) and xi_vec.shape == (6, 1)):
raise TypeError('Argument must be a 6D column vector')
return X.__matmul__(SE3.Exp(xi_vec))
def ominus(Y, X):
"""Computes the tangent space vector at X between X and this element Y.
:param X: The other element
:return: The difference xi_vec = Y :math:'\\ominus' X
"""
if not isinstance(X, SE3):
raise TypeError('Argument must be an SE3')
return (X.inverse().__matmul__(Y)).Log()
def Log(self):
"""Computes the tangent space vector xi_vec at the current element X.
:return: The tangent space vector xi_vec = [rho_vec, theta_vec]^T.
"""
theta, u_vec = self.rotation.Log(split_angle_axis=True)
if theta == 0:
return np.vstack((self.translation, np.zeros((3, 1))))
theta_vec = theta * u_vec
a = np.sin(theta) / theta
b = (1 - np.cos(theta)) / (theta ** 2)
theta_hat = SO3.hat(theta_vec)
V_inv = np.identity(3) - 0.5 * theta_hat + np.linalg.matrix_power(theta_hat, 2) * (
1 - a / (2 * b)) / (theta ** 2)
rho_vec = np.matmul(V_inv, self.translation)
return np.vstack((rho_vec, theta_vec))
def jac_inverse_X_wrt_X(X):
"""Computes the Jacobian of the inverse operation X.inverse() with respect to the element X.
:return: The Jacobian (6x6 matrix)
"""
return -X.adjoint()
def jac_action_Xx_wrt_X(X, x):
"""Computes the Jacobian of the action X.action(x) with respect to the element X.
:param x: The 3D column vector x.
:return: The Jacobian (6x3 matrix)
"""
return np.block([[X.rotation.matrix, -(np.matmul(X.rotation.matrix, SO3.hat(x)))]])
def jac_action_Xx_wrt_x(X):
"""Computes the Jacobian of the action X.action(x) with respect to the element X.
:return: The Jacobian (3x3 matrix)
"""
return X.rotation.matrix
def jac_Y_ominus_X_wrt_X(Y, X):
"""Compute the Jacobian of Y.ominus(X) with respect to the element X.
:param X: The SE(3) element X.
:return: The Jacobian (6x6 matrix)
"""
return -SE3.jac_left_inverse(Y - X)
def jac_Y_ominus_X_wrt_Y(Y, X):
"""Compute the Jacobian of Y.ominus(X) with respect to the element Y.
:param X: The SE(3) element X.
:return: The Jacobian (6x6 matrix)
"""
return SE3.jac_right_inverse(Y - X)
def __mul__(self, other):
"""Multiplication operator performs action on vectors.
:param other: 3D column vector, or a matrix of 3D column vectors.
:return: Transformed 3D column vector
"""
if isinstance(other, np.ndarray) and other.shape[0] == 3:
# Other is matrix of 3D column vectors, perform action on vectors.
return self.action(other)
else:
raise TypeError('Argument must be a matrix of 3D column vectors')
def __matmul__(self, other):
"""Matrix multiplication operator performs composition on elements of SE(3).
:param other: Other SE3
:return: Composed SE3
"""
if isinstance(other, SE3):
# Other is SE3, perform composition.
return self.compose(other)
else:
raise TypeError('Argument must be an SE3')
def __add__(self, xi_vec):
"""Add operator performs the "oplus" operation on the element X.
:param xi_vec: The tangent space vector, a 6D column vector xi_vec = [rho_vec, theta_vec]^T..
:return: The perturbed SE3 element Y = X :math:`\\oplus` xi_vec.
"""
return self.oplus(xi_vec)
def __sub__(self, X):
"""Subtract operator performs the "ominus" operation at X between X and this element Y.
:param X: The other element
:return: The difference xi_vec = Y :math:'\\ominus' X
"""
return self.ominus(X)
def __len__(self):
"""Length operator returns the dimension of the tangent vector space,
which is equal to the number of degrees of freedom (DOF).
:return: The DOF for poses (6)
"""
return 6
@staticmethod
def hat(xi_vec):
"""Performs the hat operator on the tangent space vector xi_vec,
which returns the corresponding Lie Algebra matrix xi_hat.
:param xi_vec: 6d tangent space column vector xi_vec = [rho_vec, theta_vec]^T.
:return: The Lie Algebra (4x4 matrix).
"""
return np.block([[SO3.hat(xi_vec[3:]), xi_vec[:3]],
[np.zeros((1, 4))]])
@staticmethod
def vee(xi_hat):
"""Performs the vee operator on the Lie Algebra matrix xi_hat,
which returns the corresponding tangent space vector.
:param xi_hat: The Lie Algebra (4x4 matrix)
:return: 6d tangent space column vector xi_vec = [rho_vec, theta_vec]^T.
"""
return np.vstack((xi_hat[:3, 3:4], SO3.vee(xi_hat[:3, :3])))
@staticmethod
def Exp(xi_vec):
"""Computes the Exp-map on the Lie algebra vector xi_vec,
which transfers it to the corresponding Lie group element.
:param xi_vec: 6d tangent space column vector xi_vec = [rho_vec, theta_vec]^T.
:return: Corresponding SO(3) element
"""
xi_hat = SE3.hat(xi_vec)
theta = np.linalg.norm(xi_vec[3:])
if theta < 1e-10:
return SE3.from_matrix(np.identity(4) + xi_hat)
else:
return SE3.from_matrix(
np.identity(4) + xi_hat + ((1 - np.cos(theta)) / (theta ** 2)) * np.linalg.matrix_power(xi_hat, 2) +
((theta - np.sin(theta)) / (theta ** 3)) * np.linalg.matrix_power(xi_hat, 3))
@staticmethod
def jac_composition_XY_wrt_X(Y):
"""Computes the Jacobian of the composition X.compose(Y) with respect to the element X.
:param Y: SE3 element Y
:return: The Jacobian (6x6 matrix)
"""
R_Y_inv = Y.rotation.inverse().matrix
return np.block([[R_Y_inv, -(np.matmul(R_Y_inv, SO3.hat(Y.translation)))],
[np.zeros((3, 3)), R_Y_inv]])
@staticmethod
def jac_composition_XY_wrt_Y():
"""Computes the Jacobian of the composition X.compose(Y) with respect to the element Y.
:return: The Jacobian (6x6 identity matrix)
"""
return np.identity(6)
@staticmethod
def _Q_left(xi_vec):
rho_vec = xi_vec[:3]
theta_vec = xi_vec[3:]
theta = np.linalg.norm(theta_vec)
if theta < 1e-10:
return np.zeros((3, 3))
rho_hat = SO3.hat(rho_vec)
theta_hat = SO3.hat(theta_vec)
return 0.5 * rho_hat + ((theta - np.sin(theta)) / theta ** 3) * \
(np.matmul(theta_hat, rho_hat) + np.matmul(rho_hat, theta_hat) + np.matmul(np.matmul(theta_hat, rho_hat), theta_hat)) - \
((1 - 0.5 * theta ** 2 - np.cos(theta)) / theta ** 4) * \
(np.matmul(np.matmul(theta_hat, theta_hat), rho_hat) + np.matmul(np.matmul(rho_hat, theta_hat), theta_hat) -
3 * np.matmul(np.matmul(theta_hat, rho_hat), theta_hat)) - \
0.5 * ((1 - 0.5 * theta ** 2 - np.cos(theta)) / theta ** 4 - 3 *
((theta - np.sin(theta) - (theta ** 3 / 6)) / theta ** 5)) * \
(np.matmul(np.matmul(np.matmul(theta_hat, rho_hat), theta_hat), theta_hat) + np.matmul(np.matmul(np.matmul(theta_hat, theta_hat), rho_hat), theta_hat))
@staticmethod
def _Q_right(xi_vec):
return SE3._Q_left(-xi_vec)
@staticmethod
def jac_right(xi_vec):
"""Compute the right derivative of Exp(xi_vec) with respect to xi_vec.
:param xi_vec: The tangent space 6D column vector xi_vec = [rho_vec, theta_vec]^T.
:return: The Jacobian (6x6 matrix)
"""
theta_vec = xi_vec[3:]
J_r_theta = SO3.jac_right(theta_vec)
Q_r = SE3._Q_right(xi_vec)
return np.block([[J_r_theta, Q_r],
[
|
np.zeros((3, 3))
|
numpy.zeros
|
"""
Implements GPs for Euclidean spaces.
-- <EMAIL>
-- <EMAIL>
"""
from __future__ import division
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
# pylint: disable=no-member
from argparse import Namespace
import numpy as np
# Local imports
from . import gp_core, mf_gp
from . import kernel as gp_kernel
from ..utils.ancillary_utils import get_list_of_floats_as_str
from ..utils.general_utils import get_sublist_from_indices, map_to_bounds
from ..utils.option_handler import get_option_specs, load_options
from ..utils.oper_utils import random_sample_from_discrete_domain
from ..utils.reporters import get_reporter
_DFLT_KERNEL_TYPE = 'se'
# Some basic parameters for Euclidean GPs.
basic_euc_gp_args = [ \
get_option_specs('kernel_type', False, 'default',
'Specify type of kernel. This depends on the application.'),
get_option_specs('use_same_bandwidth', False, False,
('If true, will use same bandwidth on all dimensions. Applies only '
'when kernel_type is se or matern. Default=False.')), \
]
# Parameters for the SE kernel.
se_gp_args = [ \
]
# Parameters for the matern kernel
matern_gp_args = [ \
get_option_specs('matern_nu', False, -1.0, \
('Specify the nu value for the matern kernel. If negative, will fit.')),
]
# Parameters for the Polynomial kernel.
poly_gp_args = [ \
get_option_specs('use_same_scalings', False, False,
'If true uses same scalings on all dimensions. Default is False.'),
get_option_specs('poly_order', False, 1,
'Order of the polynomial to be used. Default is 1 (linear kernel).')
]
# Parameters for an additive kernel
add_gp_args = [ \
get_option_specs('use_additive_gp', False, False,
'Whether or not to use an additive GP. '),
get_option_specs('add_max_group_size', False, 6,
'The maximum number of groups in the additive grouping. '),
get_option_specs('add_grouping_criterion', False, 'randomised_ml',
'Specify the grouping algorithm, should be one of {randomised_ml}'),
get_option_specs('num_groups_per_group_size', False, -1,
'The number of groups to try per group size.'),
get_option_specs('add_group_size_criterion', False, 'sampled',
'Specify how to pick the group size, should be one of {max, sampled}.')
]
# Parameters for an esp kernel
esp_gp_args = [ \
get_option_specs('esp_order', False, -1,
'Order of the esp kernel. '),
get_option_specs('esp_kernel_type', False, 'se',
'Specify type of kernel. This depends on the application.'),
get_option_specs('esp_matern_nu', False, -1.0, \
('Specify the nu value for matern kernel. If negative, will fit.')),
]
# All parameters for a basic GP
euclidean_gp_args = gp_core.mandatory_gp_args + basic_euc_gp_args + se_gp_args + \
matern_gp_args + poly_gp_args + add_gp_args + esp_gp_args
# Hyper-parameters for Euclidean Multi-fidelity GPs
basic_mf_euc_gp_args = [ \
# Fidelity kernel ------------------------------------------------------------
get_option_specs('fidel_kernel_type', False, 'se', \
'Type of kernel for the fidelity space. Should be se, matern, poly or expdecay'),
# Secondary parameters for the fidelity kernel
get_option_specs('fidel_matern_nu', False, 2.5, \
('Specify the nu value for the matern kernel. If negative, will fit.')),
get_option_specs('fidel_use_same_bandwidth', False, False, \
('If true, will use same bandwidth on all fidelity dimensions. Applies only when ' \
'fidel_kernel_type is se or matern. Default=False.')),
get_option_specs('fidel_use_same_scalings', False, False, \
('If true, will use same scaling on all fidelity dimensions. Applies only when ' \
'fidel_kernel_type is poly. Default=False.')),
get_option_specs('fidel_poly_order', False, -1, \
('Order of the polynomial for fidelity kernel. Default = -1 (means will tune)')),
# Domain kernel --------------------------------------------------------------
get_option_specs('domain_kernel_type', False, 'se',
'Type of kernel for the domain space. Should be se, matern or poly'),
# Secondary parameters for the domain kernel
get_option_specs('domain_matern_nu', False, 2.5, \
('Specify the nu value for the matern kernel. If negative, will fit.')),
get_option_specs('domain_use_same_bandwidth', False, False, \
('If true, will use same bandwidth on all domain dimensions. Applies only when ' \
'domain_kernel_type is se or matern. Default=False.')),
get_option_specs('domain_use_same_scalings', False, False, \
('If true, will use same scaling on all domainity dimensions. Applies only when ' \
'domain_kernel_type is poly. Default=False.')),
get_option_specs('domain_poly_order', False, -1, \
('Order of the polynomial for domainity kernel. Default = -1 (means will fit)')),
# Additive models for the domain kernel
get_option_specs('domain_use_additive_gp', False, False,
'Whether or not to use an additive GP. '),
get_option_specs('domain_add_max_group_size', False, 6,
'The maximum number of groups in the additive grouping. '),
get_option_specs('domain_add_grouping_criterion', False, 'randomised_ml',
'Specify the grouping algorithm, should be one of {randomised_ml}'),
get_option_specs('domain_num_groups_per_group_size', False, -1,
'The number of groups to try per group size.'),
get_option_specs('domain_add_group_size_criterion', False, 'sampled', \
'Specify how to pick the group size, should be one of {max, sampled}.'), \
get_option_specs('domain_esp_order', False, -1,
'Order of the esp kernel. '),
get_option_specs('domain_esp_kernel_type', False, 'se',
'Specify type of kernel. This depends on the application.'),
get_option_specs('domain_esp_matern_nu', False, -1.0, \
('Specify the nu value for matern kernel. If negative, will fit.')),
get_option_specs('fidel_esp_order', False, -1,
'Order of the esp kernel. '),
get_option_specs('fidel_esp_kernel_type', False, 'se',
'Specify type of kernel. This depends on the application.'),
get_option_specs('fidel_esp_matern_nu', False, -1.0, \
('Specify the nu value for matern kernel. If negative, will fit.')),\
]
# Define this which includes mandatory_gp_args and basic_gp_args
euclidean_mf_gp_args = gp_core.mandatory_gp_args + basic_mf_euc_gp_args
# Part I: EuclideanGP and EuclideanGPFitter
# ======================================================================================
class EuclideanGP(gp_core.GP):
""" euclidean GP factory """
# pylint: disable=attribute-defined-outside-init
def __init__(self, X, Y, kernel, mean_func, noise_var,
kernel_hyperparams=None, build_posterior=True, reporter=None):
"""
X, Y: data
kern: could be an object or one of the following strings, 'se', 'poly', 'matern'
kernel_hyperparams: dictionary specifying the hyper-parameters for the kernel.
'se' : 'dim', 'scale' (optional), 'dim_bandwidths' (optional)
'poly' : 'dim', 'order', 'scale', 'dim_scalings' (optional)
'matern' : 'dim', 'nu' (optional), 'scale' (optional),
'dim_bandwidths' (optional)
"""
if isinstance(kernel, str):
kernel = self._get_kernel_from_type(kernel, kernel_hyperparams)
super(EuclideanGP, self).__init__(X, Y, kernel, mean_func, noise_var,
build_posterior, reporter)
@classmethod
def _get_kernel_from_type(cls, kernel_type, kernel_hyperparams):
""" Get different euclidean kernels based on kernel_type"""
if kernel_type in ['se']:
return gp_kernel.SEKernel(kernel_hyperparams['dim'], kernel_hyperparams['scale'],
kernel_hyperparams['dim_bandwidths'])
elif kernel_type in ['poly']:
return gp_kernel.PolyKernel(kernel_hyperparams['dim'], kernel_hyperparams['order'],
kernel_hyperparams['scale'],
kernel_hyperparams['dim_scalings'])
elif kernel_type in ['matern']:
return gp_kernel.MaternKernel(kernel_hyperparams['dim'],
kernel_hyperparams['nu'], kernel_hyperparams['scale'],
kernel_hyperparams['dim_bandwidths'])
elif kernel_type in ['esp']:
return gp_kernel.ESPKernelSE(kernel_hyperparams['dim'], kernel_hyperparams['scale'],
kernel_hyperparams['order'],
kernel_hyperparams['dim_bandwidths'])
else:
raise ValueError('Cannot construct kernel from kernel_type %s.' % (kernel_type))
def _child_str(self):
""" String representation for child GP. """
ke_str = self._get_kernel_str(self.kernel)
dim = 0 if len(self.X) == 0 else len(self.X[0])
mean_str = 'mu(0)=%0.3f'%(self.mean_func([np.zeros(dim,)])[0])
ret = 'scale: %0.3f, %s, %s' % (self.kernel.hyperparams['scale'], ke_str, mean_str)
return ret
@classmethod
def _get_kernel_str(cls, kern):
""" Gets a string format of the kernel depending on whether it is SE/Poly."""
if isinstance(kern, gp_kernel.AdditiveKernel):
return str(kern)
if isinstance(kern, gp_kernel.SEKernel) or isinstance(kern, gp_kernel.MaternKernel):
hp_name = 'dim_bandwidths'
kern_name = 'se' if isinstance(kern, gp_kernel.SEKernel) else \
'matern(%0.1f)' % (kern.hyperparams['nu'])
elif isinstance(kern, gp_kernel.PolyKernel):
hp_name = 'dim_scalings'
kern_name = 'poly'
else: # Return an empty string.
return ''
if kern.dim > 6:
ret = '%0.4f(avg)' % (kern.hyperparams[hp_name].mean())
else:
ret = get_list_of_floats_as_str(kern.hyperparams[hp_name])
ret = kern_name + '-' + ret
return ret
class EuclideanGPFitter(gp_core.GPFitter):
""" Fits a GP by tuning the kernel hyper-params. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, X, Y, options=None, reporter=None):
""" Constructor. """
self.dim = len(X[0])
reporter = get_reporter(reporter)
if options is None:
options = load_options(euclidean_gp_args, 'EuclideanGPFitter', reporter=reporter)
super(EuclideanGPFitter, self).__init__(X, Y, options, reporter)
def _child_set_up(self):
""" Sets parameters for GPFitter. """
# IMPORTANT: Keep this order when tuning for the hyper-parameters.
# Continuous: Mean value, GP noise, kernel scale, then the remaining Kernel params.
# Discrete: add_group_size, then the remaining kernel params.
# Check args - so that we don't have to keep doing this all the time
if self.options.kernel_type not in ['se', 'matern', 'poly', 'esp', 'default']:
raise ValueError('Unknown kernel_type. Should be either se, matern or poly.')
if self.options.noise_var_type not in ['tune', 'label', 'value']:
raise ValueError('Unknown noise_var_type. Should be either tune, label or value.')
if self.options.mean_func_type not in ['mean', 'median', 'const', 'zero', 'tune']:
raise ValueError('Unknown mean_func_type. Should be mean/median/const/zero/tune.')
# Set kernel type
if self.options.kernel_type == 'default':
self.kernel_type = _DFLT_KERNEL_TYPE
else:
self.kernel_type = self.options.kernel_type
# 1 & 2: mean value and noise variance - done in parent class.
# 3. Kernel parameters
if self.kernel_type == 'se':
self._se_kernel_set_up()
elif self.kernel_type == 'matern':
self._matern_kernel_set_up()
elif self.kernel_type in ['poly']:
self._poly_kernel_set_up()
elif self.kernel_type == 'esp':
self._esp_kernel_set_up()
# 4. Additive grouping
if self.options.use_additive_gp and self.kernel_type != 'esp':
self.add_group_size_idx_in_dscr_hp_vals = len(self.dscr_hp_vals)
self.add_max_group_size = min(self.options.add_max_group_size, self.dim)
self.dscr_hp_vals.append([x+1 for x in range(self.add_max_group_size)])
self.param_order.append(["additive_grp", "dscr"])
elif self.kernel_type == 'esp' and self.options.esp_order == -1:
self.dscr_hp_vals.append(list(range(1, max(self.dim, self.options.esp_order) + 1)))
self.param_order.append(["esp_order", "dscr"])
def _se_kernel_set_up(self):
""" Set up for the SE kernel. """
# Scale
self.scale_log_bounds = [np.log(0.1 * self.Y_var), np.log(10 * self.Y_var)]
self.param_order.append(["scale", "cts"])
# Bandwidths
X_std_norm = np.linalg.norm(self.X, 'fro') + 1e-4
single_bandwidth_log_bounds = [np.log(0.01 * X_std_norm), np.log(10 * X_std_norm)]
if self.options.use_same_bandwidth:
self.bandwidth_log_bounds = [single_bandwidth_log_bounds]
self.param_order.append(["same_dim_bandwidths", "cts"])
else:
self.bandwidth_log_bounds = [single_bandwidth_log_bounds] * self.dim
for _ in range(self.dim):
self.param_order.append(["dim_bandwidths", "cts"])
self.cts_hp_bounds += [self.scale_log_bounds] + self.bandwidth_log_bounds
def _matern_kernel_set_up(self):
""" Set up for the Matern kernel. """
# Set up scale and bandwidth - which is identical to the SE kernel.
self._se_kernel_set_up()
# Set up optimisation values for the nu parameter.
if self.options.matern_nu < 0:
self.dscr_hp_vals.append([0.5, 1.5, 2.5])
self.param_order.append(["nu", "dscr"])
def _poly_kernel_set_up(self):
""" Set up for the Poly kernel. """
raise NotImplementedError('Not implemented Poly kernel yet.')
def _esp_kernel_set_up(self):
""" Set up for the ESP kernel. """
if self.options.esp_kernel_type not in ['se', 'matern']:
raise NotImplementedError('Not implemented yet.')
# Scale
self.scale_log_bounds = [np.log(0.1 * self.Y_var), np.log(10 * self.Y_var)]
self.param_order.append(["scale", "cts"])
# Bandwidths
X_std_norm = np.linalg.norm(self.X, 'fro') + 1e-4
single_bandwidth_log_bounds = [np.log(0.01 * X_std_norm), np.log(10 * X_std_norm)]
self.bandwidth_log_bounds = [single_bandwidth_log_bounds] * self.dim
for _ in range(self.dim):
self.param_order.append(["dim_bandwidths", "cts"])
self.cts_hp_bounds += [self.scale_log_bounds] + self.bandwidth_log_bounds
if self.options.esp_kernel_type == 'matern' and self.options.esp_matern_nu < 0:
self.dscr_hp_vals.append([0.5, 1.5, 2.5])
self.param_order.append(["nu", "dscr"])
def _prep_init_kernel_hyperparams(self, kernel_type):
""" Wrapper to pack the kernel hyper-parameters into a dictionary. """
return prep_euclidean_integral_kernel_hyperparams(kernel_type, self.options, self.dim)
def _optimise_cts_hps_for_given_dscr_hps(self, given_dscr_hps):
""" Optimises the continuous hyper-parameters for the given discrete hyper-params.
"""
if not self.options.use_additive_gp:
return super(EuclideanGPFitter, self)._optimise_cts_hps_for_given_dscr_hps( \
given_dscr_hps)
else:
return optimise_cts_hps_for_given_dscr_hps_in_add_model(given_dscr_hps, \
self.options.num_groups_per_group_size, self.dim, self.hp_tune_max_evals, \
self.cts_hp_optimise, self._tuning_objective)
def _sample_cts_dscr_hps_for_rand_exp_sampling(self):
""" Samples continous and discrete hyper-parameters for rand_exp_sampling. """
if not self.options.use_additive_gp:
return super(EuclideanGPFitter, self)._sample_cts_dscr_hps_for_rand_exp_sampling()
else:
return sample_cts_dscr_hps_for_rand_exp_sampling_in_add_model( \
self.hp_tune_max_evals, self.cts_hp_bounds, self.dim, self.dscr_hp_vals, \
self.add_group_size_idx_in_dscr_hp_vals, self._tuning_objective)
def _child_build_gp(self, mean_func, noise_var, gp_cts_hps, gp_dscr_hps,
other_gp_params=None, *args, **kwargs):
""" Builds the GP. """
# Domain kernel --------------------------------------
kernel_hyperparams = self._prep_init_kernel_hyperparams(self.kernel_type)
add_gp_groupings = None
if self.options.use_additive_gp:
gp_dscr_hps = gp_dscr_hps[:-1] # The first element is the group size
add_gp_groupings = other_gp_params.add_gp_groupings
kernel, gp_cts_hps, gp_dscr_hps = \
get_euclidean_integral_gp_kernel(self.kernel_type, kernel_hyperparams, gp_cts_hps,
gp_dscr_hps, self.options.use_same_bandwidth,
add_gp_groupings, self.options.esp_kernel_type)
ret_gp = EuclideanGP(self.X, self.Y, kernel, mean_func, noise_var, *args, **kwargs)
return ret_gp, gp_cts_hps, gp_dscr_hps
# EuclideanGPFitter ends here -------------------------------------------------------
# Part II: EuclideanMFGP and EuclideanMFGPFitter
# ======================================================================================
# MFGP and Fitter in Euclidean spaces: An instantiation of MFGP when both fidel_space and
# domain are euclidean.
class EuclideanMFGP(mf_gp.MFGP):
""" An MFGP for Euclidean spaces. """
def __init__(self, ZZ, XX, YY, mf_kernel,
kernel_scale, fidel_kernel, domain_kernel,
mean_func, noise_var, *args, **kwargs):
""" Constructor. ZZ, XX, YY are the fidelity points, domain points and labels
respectively.
"""
if len(ZZ) != 0:
self.fidel_dim = len(ZZ[0])
self.domain_dim = len(XX[0])
if fidel_kernel is not None and domain_kernel is not None:
self.fidel_kernel = fidel_kernel
self.domain_kernel = domain_kernel
self.fidel_dim = fidel_kernel.dim
self.domain_dim = domain_kernel.dim
elif 'fidel_dim' in kwargs and 'domain_dim' in kwargs:
self.fidel_dim = kwargs['fidel_dim']
self.domain_dim = kwargs['domain_dim']
else:
raise Exception('Specify fidel_dim and domain_dim.')
self.fidel_coords = list(range(self.fidel_dim))
self.domain_coords = list(range(self.fidel_dim, self.fidel_dim + self.domain_dim))
if mf_kernel is None:
mf_kernel = gp_kernel.CoordinateProductKernel(self.fidel_dim + self.domain_dim, \
kernel_scale, [fidel_kernel, domain_kernel], \
[self.fidel_coords, self.domain_coords],)
# Otherwise, we assume mf_kernel is already an appropriate kernel
super(EuclideanMFGP, self).__init__(ZZ, XX, YY, mf_kernel, mean_func, noise_var,
*args, **kwargs)
def _test_fidel_domain_dims(self, test_fidel_dim, test_domain_dim):
""" Tests if test_fidel_dim and test_domain_dim are equal to self.fidel_dim and
self.domain_dim respectively and if not raises an error.
Mostly for internal use. """
if test_fidel_dim != self.fidel_dim or test_domain_dim != self.domain_dim:
raise ValueError('ZZ, XX dimensions should be (%d, %d). Given (%d, %d)'%( \
self.fidel_dim, self.domain_dim, test_fidel_dim, test_domain_dim))
def get_ZX_from_ZZ_XX(self, ZZ, XX):
""" Gets the coordinates in the joint space from the individual fidelity and
domain spaces. """
ordering = np.argsort(self.fidel_coords + self.domain_coords)
if hasattr(ZZ, '__iter__') and len(ZZ) == 0:
return []
elif hasattr(ZZ[0], '__iter__'):
# A list of points
self._test_fidel_domain_dims(len(ZZ[0]), len(XX[0]))
ZX_unordered = np.concatenate((np.array(ZZ), np.array(XX)), axis=1)
ZX = ZX_unordered[:, ordering]
return list(ZX)
else:
# A single new point
self._test_fidel_domain_dims(len(ZZ), len(XX))
zx_unordered = np.concatenate((ZZ, XX))
return zx_unordered[ordering]
def get_domain_pts(self, data_idxs=None):
""" Returns only the domain points. """
data_idxs = data_idxs if data_idxs is not None else range(self.num_tr_data)
return [self.XX[i] for i in data_idxs]
def get_fidel_pts(self, data_idxs=None):
""" Returns only the fidelity points. """
data_idxs = data_idxs if data_idxs is not None else range(self.num_tr_data)
return [self.ZZ[i] for i in data_idxs]
# A GPFitter for EuclideanMFGP objects. For now, this only considers product kernels
# between the fidel_space and domain.
class EuclideanMFGPFitter(mf_gp.MFGPFitter):
""" A fitter for GPs in multi-fidelity optimisation. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, ZZ, XX, YY, options=None, reporter=None):
""" Constructor. options should either be a Namespace, a list or None. """
reporter = get_reporter(reporter)
if options is None:
options = load_options(euclidean_mf_gp_args, 'MF-GP', reporter)
self.fidel_dim = len(ZZ[0])
self.domain_dim = len(XX[0])
self.input_dim = self.fidel_dim + self.domain_dim
super(EuclideanMFGPFitter, self).__init__(ZZ, XX, YY, options, reporter)
# Child set up Methods
# ===================================================================================
def _child_set_up(self):
""" Sets parameters fro GPFitter. """
# pylint: disable=too-many-branches
# Check args - so that we don't have to keep doing this all the time
if self.options.fidel_kernel_type not in ['se', 'matern', 'poly', 'expdecay']:
raise ValueError('Unknown fidel_kernel_type. Should be in {se, matern, poly, ' +
'expdecay.')
if self.options.domain_kernel_type not in ['se', 'matern', 'poly']:
raise ValueError('Unknown domain_kernel_type. Should be either se or poly.')
if self.options.noise_var_type not in ['tune', 'label', 'value']:
raise ValueError('Unknown noise_var_type. Should be either tune, label or value.')
if self.options.mean_func_type not in ['mean', 'median', 'const', 'zero',
'upper_bound', 'tune']:
raise ValueError(('Unknown mean_func_type. Should be one of ',
'mean/median/const/zero.'))
# Set some parameters we will be using often.
self.ZZ_std_norm = np.linalg.norm(self.ZZ, 'fro') + 5e-5
self.XX_std_norm = np.linalg.norm(self.XX, 'fro') + 5e-5
self.ZX_std_norm = np.sqrt(self.ZZ_std_norm**2 + self.XX_std_norm**2)
# Bounds for the hyper parameters
# -------------------------------
# Kernel scale
self.scale_log_bounds = [np.log(0.1 * self.Y_var), np.log(10 * self.Y_var)]
self.cts_hp_bounds.append(self.scale_log_bounds)
self.param_order.append(["scale", "cts"])
# Fidelity kernel
if self.options.fidel_kernel_type == 'se':
self._fidel_se_kernel_setup()
elif self.options.fidel_kernel_type == 'matern':
self._fidel_matern_kernel_setup()
elif self.options.fidel_kernel_type == 'poly':
self._fidel_poly_kernel_setup()
elif self.options.fidel_kernel_type == 'expdecay':
self._fidel_expdecay_kernel_setup()
elif self.options.fidel_kernel_type == 'esp':
self._fidel_esp_kernel_setup()
# Domain kernel
if self.options.domain_kernel_type == 'se':
self._domain_se_kernel_setup()
elif self.options.domain_kernel_type == 'matern':
self._domain_matern_kernel_setup()
elif self.options.domain_kernel_type == 'poly':
self._domain_poly_kernel_setup()
elif self.options.domain_kernel_type == 'esp':
self._domain_esp_kernel_setup()
# Additive grouping for domain kernel (this has to come after fidelity kernel set up).
if self.options.domain_use_additive_gp:
self.domain_add_group_size_idx_in_dscr_hp_vals = len(self.dscr_hp_vals)
self.domain_add_max_group_size = min(self.options.domain_add_max_group_size,
self.domain_dim)
self.dscr_hp_vals.append([x+1 for x in range(self.domain_add_max_group_size)])
self.param_order.append(["additive_grp", "dscr"])
# Functions to set up each fidelity kernel -------------------------------------------
def _fidel_se_kernel_setup(self):
""" Sets up the fidelity kernel as an SE kernel. """
self._fidel_se_matern_kernel_setup_common()
def _fidel_matern_kernel_setup(self):
""" Sets up the fidelity kernel as a Matern kernel. """
self._fidel_se_matern_kernel_setup_common()
# Set optimisation values for the nu parameter
if self.options.fidel_matern_nu < 0:
self.dscr_hp_vals.append([0.5, 1.5, 2.5])
self.param_order.append(["nu", "dscr"])
def _fidel_se_matern_kernel_setup_common(self):
""" Common operators for setting up as a SE or Matern kernel. """
if (hasattr(self.options, 'fidel_bandwidth_log_bounds') and
self.options.fidel_bandwidth_log_bounds is not None):
self.fidel_bandwidth_log_bounds = self.options.fidel_bandwidth_log_bounds
else:
self.fidel_bandwidth_log_bounds = self._get_bandwidth_log_bounds( \
self.fidel_dim, self.ZX_std_norm, self.options.fidel_use_same_bandwidth)
self.cts_hp_bounds.extend(self.fidel_bandwidth_log_bounds)
if self.options.fidel_use_same_bandwidth:
self.param_order.append(["same_dim_bandwidths", "cts"])
else:
for _ in range(self.fidel_dim):
self.param_order.append(["dim_bandwidths", "cts"])
def _fidel_poly_kernel_setup(self):
""" Sets up the fidelity kernel as a Poly kernel. """
self.fidel_scaling_log_bounds = self._get_poly_kernel_bounds(self.ZZ, self.XX, \
self.options.fidel_use_same_scalings)
self.cts_hp_bounds.extend(self.fidel_scaling_log_bounds)
def _fidel_expdecay_kernel_setup(self):
""" Sets up the fidelity kernel as an exponential decay kernel. """
# offset
if (hasattr(self.options, 'fidel_expdecay_offset_log_bounds') and
self.options.fidel_expdecay_offset_log_bounds is not None):
self.fidel_expdecay_offset_log_bounds = \
self.options.fidel_expdecay_offset_log_bounds
else:
scale_range = self.Y_var / np.sqrt(self.num_tr_data)
self.fidel_expdecay_offset_log_bounds = \
[np.log(0.1 * scale_range), np.log(10 * scale_range)]
# power log bounds
if (hasattr(self.options, 'fidel_expdecay_power_log_bounds') and
self.options.fidel_expdecay_power_log_bounds is not None):
self.fidel_expdecay_power_log_bounds = \
self.options.fidel_expdecay_power_log_bounds
else:
self.fidel_expdecay_power_log_bounds = \
[[np.log(1e-1), np.log(50)]] * self.fidel_dim
self.cts_hp_bounds.append(self.fidel_expdecay_offset_log_bounds)
self.cts_hp_bounds.extend(self.fidel_expdecay_power_log_bounds)
def _fidel_esp_kernel_setup(self):
""" Sets up the fidelity kernel as ESP kernel. """
if (hasattr(self.options, 'fidel_bandwidth_log_bounds') and
self.options.fidel_bandwidth_log_bounds is not None):
self.fidel_bandwidth_log_bounds = self.options.fidel_bandwidth_log_bounds
else:
self.fidel_bandwidth_log_bounds = self._get_bandwidth_log_bounds( \
self.fidel_dim, self.ZX_std_norm, False)
self.cts_hp_bounds.extend(self.fidel_bandwidth_log_bounds)
for _ in range(self.fidel_dim):
self.param_order.append(["dim_bandwidths", "cts"])
if self.options.fidel_esp_kernel_type == 'matern' and \
self.options.fidel_esp_matern_nu < 0:
self.dscr_hp_vals.append([0.5, 1.5, 2.5])
self.param_order.append(["nu", "dscr"])
# Functions to set up each domain kernel -------------------------------------------
def _domain_se_kernel_setup(self):
""" Sets up the domainity kernel as an SE kernel. """
self._domain_se_matern_kernel_setup_common()
def _domain_matern_kernel_setup(self):
""" Sets up the domainity kernel as a Matern kernel. """
self._domain_se_matern_kernel_setup_common()
# Set optimisation values for the nu parameter
if self.options.domain_matern_nu < 0:
self.dscr_hp_vals.append([0.5, 1.5, 2.5])
self.param_order.append(["nu", "dscr"])
def _domain_se_matern_kernel_setup_common(self):
""" Sets up the domain kernel as a SE kernel. """
if (hasattr(self.options, 'domain_bandwidth_log_bounds') and
self.options.domain_bandwidth_log_bounds is not None):
self.domain_bandwidth_log_bounds = self.options.domain_bandwidth_log_bounds
else:
self.domain_bandwidth_log_bounds = self._get_bandwidth_log_bounds( \
self.domain_dim, self.ZX_std_norm, False)
self.cts_hp_bounds.extend(self.domain_bandwidth_log_bounds)
if self.options.domain_use_same_bandwidth:
self.param_order.append(["same_dim_bandwidths", "cts"])
else:
for _ in range(self.domain_dim):
self.param_order.append(["dim_bandwidths", "cts"])
def _domain_poly_kernel_setup(self):
""" Sets up the domain kernel as a Poly kernel. """
self.domain_scaling_log_bounds = self._get_poly_kernel_bounds(self.ZZ, self.XX, \
self.options.domain_use_same_scalings)
self.cts_hp_bounds.extend(self.domain_scaling_log_bounds)
def _domain_esp_kernel_setup(self):
""" Sets up the domain kernel as ESP kernel. """
if (hasattr(self.options, 'domain_bandwidth_log_bounds') and
self.options.domain_bandwidth_log_bounds is not None):
self.domain_bandwidth_log_bounds = self.options.domain_bandwidth_log_bounds
else:
self.domain_bandwidth_log_bounds = self._get_bandwidth_log_bounds( \
self.domain_dim, self.ZX_std_norm, self.options.domain_use_same_bandwidth)
self.cts_hp_bounds.extend(self.domain_bandwidth_log_bounds)
for _ in range(self.domain_dim):
self.param_order.append(["dim_bandwidths", "cts"])
if self.options.domain_esp_kernel_type == 'matern' and \
self.options.domain_esp_matern_nu < 0:
self.dscr_hp_vals.append([0.5, 1.5, 2.5])
self.param_order.append(["nu", "dscr"])
@classmethod
def _get_bandwidth_log_bounds(cls, dim, single_bw_bounds, use_same_bandwidth):
""" Gets bandwidths for the SE kernel. """
if isinstance(single_bw_bounds, float) or isinstance(single_bw_bounds, int):
single_bw_bounds = [0.01*single_bw_bounds, 10*single_bw_bounds]
single_bandwidth_log_bounds = [np.log(x) for x in single_bw_bounds]
bandwidth_log_bounds = ([single_bandwidth_log_bounds] if use_same_bandwidth
else [single_bandwidth_log_bounds] * dim)
return bandwidth_log_bounds
def _get_poly_kernel_bounds(self, ZZ, XX, use_same_scalings):
""" Gets bandwidths for the Polynomial kerne. """
raise NotImplementedError('Yet to implement polynomial kernel.')
# _child_set_up methods end here -----------------------------------------------------
# fit_gp Methods
# ====================================================================================
def _optimise_cts_hps_for_given_dscr_hps(self, given_dscr_hps):
""" Optimises the continuous hyper-parameters for the given discrete hyper-params.
Overrides the methods from GPFiter
"""
if not self.options.domain_use_additive_gp:
return super(EuclideanMFGPFitter, self)._optimise_cts_hps_for_given_dscr_hps( \
given_dscr_hps)
else:
return optimise_cts_hps_for_given_dscr_hps_in_add_model(given_dscr_hps, \
self.options.domain_num_groups_per_group_size, self.domain_dim, \
self.hp_tune_max_evals, self.cts_hp_optimise, self._tuning_objective)
def _sample_cts_dscr_hps_for_rand_exp_sampling(self):
""" Samples continous and discrete hyper-parameters for rand_exp_sampling. """
if not self.options.domain_use_additive_gp:
return super(EuclideanMFGPFitter, self)._sample_cts_dscr_hps_for_rand_exp_sampling()
else:
return sample_cts_dscr_hps_for_rand_exp_sampling_in_add_model( \
self.hp_tune_max_evals, self.cts_hp_bounds, self.domain_dim, self.dscr_hp_vals, \
self.domain_add_group_size_idx_in_dscr_hp_vals, self._tuning_objective)
# build_gp Methods
# ====================================================================================
@classmethod
def _prep_init_fidel_domain_kernel_hyperparams(cls, kernel_type, dim, matern_nu,
poly_order, esp_order, esp_matern_nu):
""" Wrapper to pack the kernel hyper-parameters into a dictionary. """
hyperparams = {}
hyperparams['dim'] = dim
if kernel_type == 'matern' and matern_nu > 0:
hyperparams['nu'] = matern_nu
elif kernel_type == 'poly':
hyperparams['order'] = poly_order
elif kernel_type == 'esp':
if esp_order > 0:
hyperparams['esp_order'] = esp_order
if esp_matern_nu > 0:
hyperparams['esp_matern_nu'] = esp_matern_nu
return hyperparams
def _prep_init_fidel_kernel_hyperparams(self):
""" Wrapper to pack the fidelity kernel hyper-parameters into a dictionary. """
options = self.options
return self._prep_init_fidel_domain_kernel_hyperparams(options.fidel_kernel_type, \
self.fidel_dim, options.fidel_matern_nu, options.fidel_poly_order, \
options.fidel_esp_order, options.fidel_esp_matern_nu)
def _prep_init_domain_kernel_hyperparams(self):
""" Wrapper to pack the domain kernel hyper-parameters into a dictionary. """
options = self.options
return self._prep_init_fidel_domain_kernel_hyperparams(options.domain_kernel_type, \
self.domain_dim, options.domain_matern_nu, options.domain_poly_order, \
options.domain_esp_order, options.domain_esp_matern_nu)
def _child_build_gp(self, mean_func, noise_var, gp_cts_hps, gp_dscr_hps,
other_gp_params=None, *args, **kwargs):
""" Builds a Multi-fidelity GP from the hyper-parameters. """
# IMPORTANT: The order of the code matters in this function. Do not change.
# Kernel scale ---------------------------------------
ke_scale = np.exp(gp_cts_hps[0])
gp_cts_hps = gp_cts_hps[1:]
# Fidelity kernel ------------------------------------
fidel_kernel_hyperparams = self._prep_init_fidel_kernel_hyperparams()
fidel_kernel, gp_cts_hps, gp_dscr_hps = \
get_euclidean_integral_gp_kernel_with_scale(self.options.fidel_kernel_type, 1.0, \
fidel_kernel_hyperparams, gp_cts_hps, gp_dscr_hps, \
self.options.fidel_use_same_bandwidth, None, self.options.fidel_esp_kernel_type)
# Domain kernel --------------------------------------
# The code for the domain kernel should come after fidelity. Otherwise, its a bug.
domain_kernel_hyperparams = self._prep_init_domain_kernel_hyperparams()
if self.options.domain_use_additive_gp:
gp_dscr_hps = gp_dscr_hps[:-1] # The first element is the group size
add_gp_groupings = other_gp_params.add_gp_groupings
else:
add_gp_groupings = None
domain_kernel, gp_cts_hps, gp_dscr_hps = \
get_euclidean_integral_gp_kernel_with_scale(self.options.domain_kernel_type, 1.0, \
domain_kernel_hyperparams, gp_cts_hps, gp_dscr_hps, \
self.options.domain_use_same_bandwidth, add_gp_groupings, \
self.options.domain_esp_kernel_type)
# Construct and return MF GP
ret_gp = EuclideanMFGP(self.ZZ, self.XX, self.YY, None, ke_scale, fidel_kernel,
domain_kernel, mean_func, noise_var, reporter=self.reporter)
return ret_gp, gp_cts_hps, gp_dscr_hps
# _child_build_gp methods end here ---------------------------------------------------
# Part III: Ancillary Functions
# ===============================================
# A function to optimise continous hyperparams in an additive model-----------------------
# Used by EuclideanGPFitter and EuclideanMFGPFitter.
def optimise_cts_hps_for_given_dscr_hps_in_add_model(given_dscr_hps, \
num_groups_per_group_size, dim, hp_tune_max_evals, cts_hp_optimise, \
tuning_objective):
""" Optimises the continuous hyper-parameters for an additive model. """
group_size = given_dscr_hps[-1] # The first is the max group size
if num_groups_per_group_size < 0:
if group_size == 1:
num_groups_per_group_size = 1
else:
num_groups_per_group_size = max(5, min(2 * dim, 25))
grp_best_hps = None
grp_best_val = -np.inf
grp_best_other_params = None
# Now try out different groups picking a random grouping each time.
for _ in range(num_groups_per_group_size):
rand_perm = list(np.random.permutation(dim))
groupings = [rand_perm[i:i+group_size]
for i in range(0, dim, group_size)]
other_gp_params = Namespace(add_gp_groupings=groupings)
# _tuning_objective is usually defined in gp_core.py
cts_tuning_objective = lambda arg: tuning_objective(arg, given_dscr_hps[:],
other_gp_params=other_gp_params)
max_evals = int(max(500, hp_tune_max_evals/num_groups_per_group_size))
opt_cts_val, opt_cts_hps, _ = cts_hp_optimise(cts_tuning_objective, max_evals)
if opt_cts_val > grp_best_val:
grp_best_val = opt_cts_val
grp_best_hps = opt_cts_hps
grp_best_other_params = other_gp_params
return grp_best_val, grp_best_hps, grp_best_other_params
def sample_cts_dscr_hps_for_rand_exp_sampling_in_add_model(num_evals, cts_hp_bounds, \
dim, dscr_hp_vals, add_group_size_idx_in_dscr_hp_vals, tuning_objective):
# IMPORTANT: We are assuming that the add_group_size is the first
""" Samples the hyper-paramers for an additive model. """
agsidhp = add_group_size_idx_in_dscr_hp_vals
sample_cts_hps = []
sample_dscr_hps = []
sample_other_gp_params = []
sample_obj_vals = []
for _ in range(num_evals):
group_size = np.random.choice(dscr_hp_vals[agsidhp])
rand_perm = list(np.random.permutation(dim))
groupings = [rand_perm[i:i+group_size] for i in range(0, dim, group_size)]
curr_other_gp_params = Namespace(add_gp_groupings=groupings)
curr_dscr_hps = random_sample_from_discrete_domain(dscr_hp_vals)
curr_dscr_hps[agsidhp] = group_size
curr_cts_hps = map_to_bounds(np.random.random((len(cts_hp_bounds),)), cts_hp_bounds)
curr_obj_val = tuning_objective(curr_cts_hps, curr_dscr_hps, curr_other_gp_params)
# Now add to the lists
sample_cts_hps.append(curr_cts_hps)
sample_dscr_hps.append(curr_dscr_hps)
sample_other_gp_params.append(curr_other_gp_params)
sample_obj_vals.append(curr_obj_val)
sample_probs =
|
np.exp(sample_obj_vals)
|
numpy.exp
|
import json
import os
import pathlib
import pytest
import numpy as np
import gouda
# import pytest
def test_ensure_dir():
test_dir = gouda.GoudaPath("ScratchFiles/test_dir", use_absolute=False, ensure_dir=True)
test_dir2 = gouda.ensure_dir("ScratchFiles/test_dir")
assert str(test_dir) == str(test_dir2)
assert os.path.isdir("ScratchFiles/test_dir")
assert test_dir.path == "ScratchFiles/test_dir"
test_dir_path = gouda.ensure_dir("ScratchFiles/test_dir", "check1")
# Have to use test_dir.path. os.path.join uses the __fspath__, which is always absolute
assert test_dir_path == os.path.join(test_dir.path, "check1")
assert os.path.isdir(test_dir_path)
pathlib.Path(os.path.join(test_dir_path, 'check2')).touch()
with pytest.raises(ValueError):
assert gouda.ensure_dir(test_dir_path, "check2")
# Cleanup
os.remove(os.path.join(test_dir_path, 'check2'))
os.rmdir(test_dir_path)
os.rmdir('ScratchFiles/test_dir')
def test_next_filename():
assert gouda.next_filename("ScratchFiles/test.txt") == "ScratchFiles/test.txt"
open('ScratchFiles/test.txt', 'w').close()
assert gouda.next_filename("ScratchFiles/test.txt") == "ScratchFiles/test_2.txt"
open("ScratchFiles/test_2.txt", 'w').close()
assert gouda.next_filename("ScratchFiles/test.txt") == "ScratchFiles/test_3.txt"
assert gouda.next_filename("ScratchFiles/test_2.txt") == "ScratchFiles/test_2_2.txt"
# Cleanup
os.remove("ScratchFiles/test.txt")
os.remove("ScratchFiles/test_2.txt")
def test_basicname():
assert gouda.basicname('anypath/morepath/test_item-here.jpg') == 'test_item-here'
def test_get_sorted_filenames():
src_dir = gouda.GoudaPath('../ScratchFiles/sortedfiles')
if not src_dir('test.txt').exists():
gouda.ensure_dir(src_dir)
for i in range(5):
with open(gouda.next_filename(src_dir('test.txt')), 'w'):
pass
for i in range(3):
with open(gouda.next_filename(src_dir('btest.txt')), 'w'):
pass
filenames = gouda.get_sorted_filenames(src_dir / '*.txt')
assert [gouda.basicname(item) for item in filenames] == ['btest', 'btest_2', 'btest_3', 'test', 'test_2', 'test_3', 'test_4', 'test_5']
os.remove(src_dir('test.txt'))
for i in range(2, 6):
os.remove(src_dir('test_{}.txt'.format(i)))
os.remove(src_dir('btest.txt'))
for i in range(2, 4):
os.remove(src_dir('btest_{}.txt'.format(i)))
def test_save_load_json_dict():
temp_data = {'a': 1, 'b': 2, 'c': 3}
gouda.save_json(temp_data, 'ScratchFiles/test.json')
assert os.path.isfile('ScratchFiles/test.json')
check_data = gouda.load_json('ScratchFiles/test.json')
for key in temp_data.keys():
assert temp_data[key] == check_data[key]
os.remove('ScratchFiles/test.json')
def test_save_load_json_list():
temp_data = ['a', 'b', 'c']
gouda.save_json(temp_data, 'ScratchFiles/test.json')
assert os.path.isfile('ScratchFiles/test.json')
check_data = gouda.load_json('ScratchFiles/test.json')
for i in range(len(temp_data)):
assert temp_data[i] == check_data[i]
os.remove('ScratchFiles/test.json')
def test_save_load_json_nested():
temp_data = [{'a': 1}, {'a': 2}, {'a': 3}]
gouda.save_json(temp_data, 'ScratchFiles/test.json')
assert os.path.isfile('ScratchFiles/test.json')
check_data = gouda.load_json('ScratchFiles/test.json')
for i in range(len(temp_data)):
assert temp_data[i]['a'] == check_data[i]['a']
os.remove('ScratchFiles/test.json')
def test_save_json_load_json_numpy():
temp_data = np.arange(5, dtype=np.uint8)
gouda.save_json(temp_data, 'ScratchFiles/testx.json', embed_arrays=True, compressed=False)
gouda.save_json(temp_data, 'ScratchFiles/test2.json', embed_arrays=False, compressed=False)
gouda.save_json(temp_data, 'ScratchFiles/test3.json', embed_arrays=False, compressed=True)
assert os.path.isfile('ScratchFiles/testx.json')
assert not os.path.isfile('ScratchFiles/testx_array.npz')
assert not os.path.isfile('ScratchFiles/testx_arrayzip.npz')
assert os.path.isfile('ScratchFiles/test2.json')
assert os.path.isfile('ScratchFiles/test2_array.npz')
assert os.path.isfile('ScratchFiles/test3.json')
assert os.path.isfile('ScratchFiles/test3_arrayzip.npz')
with open('ScratchFiles/testx.json', 'r') as f:
data = json.load(f)
assert data[-1] == 'numpy_embed'
with open('ScratchFiles/test2.json', 'r') as f:
data = json.load(f)
assert data[-1] == 'numpy'
with open('ScratchFiles/test3.json', 'r') as f:
data = json.load(f)
assert data[-1] == 'numpy_zip'
check_data = gouda.load_json('ScratchFiles/testx.json')
check_data2 = gouda.load_json('ScratchFiles/test2.json')
check_data3 = gouda.load_json('ScratchFiles/test3.json')
np.testing.assert_array_equal(temp_data, check_data)
np.testing.assert_array_equal(temp_data, check_data2)
np.testing.assert_array_equal(temp_data, check_data3)
os.remove('ScratchFiles/testx.json')
os.remove('ScratchFiles/test2.json')
os.remove('ScratchFiles/test3.json')
os.remove('ScratchFiles/test2_array.npz')
os.remove('ScratchFiles/test3_arrayzip.npz')
def test_save_load_json_set():
test_data = {'a': set([1, 2, 3])}
gouda.save_json(test_data, 'ScratchFiles/testset.json')
check = gouda.load_json('ScratchFiles/testset.json')
assert check['a'] == set([1, 2, 3])
def test_save_load_json_list_numpy():
temp_data = np.arange(5, dtype=np.uint8)
gouda.save_json([temp_data], 'ScratchFiles/testn1.json', embed_arrays=False, compressed=False)
gouda.save_json([temp_data], 'ScratchFiles/testn2.json', embed_arrays=True, compressed=False)
gouda.save_json([temp_data], 'ScratchFiles/testn3.json', embed_arrays=False, compressed=True)
check1 = gouda.load_json('ScratchFiles/testn1.json')
check2 = gouda.load_json('ScratchFiles/testn2.json')
check3 = gouda.load_json('ScratchFiles/testn3.json')
assert isinstance(check1, list)
assert isinstance(check2, list)
assert isinstance(check3, list)
np.testing.assert_array_equal(temp_data, check1[0])
np.testing.assert_array_equal(temp_data, check2[0])
np.testing.assert_array_equal(temp_data, check3[0])
os.remove('ScratchFiles/testn1.json')
os.remove('ScratchFiles/testn2.json')
os.remove('ScratchFiles/testn3.json')
def test_save_load_json_numpy_list():
temp_data = [np.arange(3, dtype=np.uint8), np.arange(4, 6, dtype=np.float32)]
gouda.save_json(temp_data, 'ScratchFiles/testnl.json', embed_arrays=True, compressed=False)
check_data = gouda.load_json('ScratchFiles/testnl.json')
assert len(check_data) == 2
np.testing.assert_array_equal(temp_data[0], check_data[0])
np.testing.assert_array_equal(temp_data[1], check_data[1])
gouda.save_json(temp_data, 'ScratchFiles/testnl2.json', embed_arrays=False, compressed=False)
check_data2 = gouda.load_json('ScratchFiles/testnl2.json')
assert len(check_data2) == 2
np.testing.assert_array_equal(temp_data[0], check_data2[0])
np.testing.assert_array_equal(temp_data[1], check_data2[1])
os.remove('ScratchFiles/testnl.json')
os.remove('ScratchFiles/testnl2.json')
os.remove('ScratchFiles/testnl2_array.npz')
def test_save_load_json_numpy_dict():
temp_data = {'a': np.arange(3, dtype=np.uint8)}
gouda.save_json(temp_data, 'ScratchFiles/testnd.json', embed_arrays=True, compressed=False)
check_data = gouda.load_json('ScratchFiles/testnd.json')
assert len(temp_data) == len(check_data)
np.testing.assert_array_equal(temp_data['a'], check_data['a'])
gouda.save_json(temp_data, 'ScratchFiles/testnd2.json', embed_arrays=False, compressed=False)
check_data2 = gouda.load_json('ScratchFiles/testnd2.json')
assert len(temp_data) == len(check_data2)
np.testing.assert_array_equal(temp_data['a'], check_data2['a'])
os.remove('ScratchFiles/testnd.json')
os.remove('ScratchFiles/testnd2.json')
os.remove('ScratchFiles/testnd2_array.npz')
def test_save_load_json_numpy_mixed():
temp_data = [np.arange(3), 3]
gouda.save_json(temp_data, 'ScratchFiles/testm.json', embed_arrays=True, compressed=False)
check_data = gouda.load_json('ScratchFiles/testm.json')
np.testing.assert_array_equal(temp_data[0], check_data[0])
assert check_data[1] == 3
gouda.save_json(temp_data, 'ScratchFiles/testm2.json', embed_arrays=False, compressed=False)
check_data2 = gouda.load_json('ScratchFiles/testm2.json')
np.testing.assert_array_equal(temp_data[0], check_data2[0])
assert check_data2[1] == 3
os.remove('ScratchFiles/testm.json')
os.remove('ScratchFiles/testm2.json')
os.remove('ScratchFiles/testm2_array.npz')
data = [np.int64(32), 'a', np.float32(18.32)]
gouda.save_json(data, 'ScratchFiles/testm3.json')
check = gouda.load_json('ScratchFiles/testm3.json')
assert check[0] == 32
assert np.dtype(check[0]) == 'int64'
assert data[1] == 'a'
assert isinstance(data[1], str)
np.testing.assert_almost_equal(check[2], 18.32, decimal=5)
assert
|
np.dtype(check[2])
|
numpy.dtype
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def ccrelu(x, name):
return (
flow.user_op_builder(name)
.Op("ccrelu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def fixed_tensor_def_test(test_case, func_config):
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ReluJob(a: oft.Numpy.Placeholder((5, 2))):
return ccrelu(a, "my_cc_relu_op")
x = np.random.rand(5, 2).astype(np.float32)
y = ReluJob(x).get().numpy()
test_case.assertTrue(np.array_equal(y, np.maximum(x, 0)))
def mirrored_tensor_def_test(test_case, func_config):
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ReluJob(a: oft.ListNumpy.Placeholder((5, 2))):
return ccrelu(a, "my_cc_relu_op")
x = np.random.rand(3, 1).astype(np.float32)
y = ReluJob([x]).get().numpy_list()[0]
test_case.assertTrue(np.array_equal(y,
|
np.maximum(x, 0)
|
numpy.maximum
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class QrOpTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The input to qr should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*0"):
linalg_ops.qr(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*1"):
linalg_ops.qr(vector)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testConcurrentExecutesWithoutError(self):
seed = [42, 24]
all_ops = []
for full_matrices_ in True, False:
for rows_ in 4, 5:
for cols_ in 4, 5:
matrix_shape = [rows_, cols_]
matrix1 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed)
matrix2 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed)
self.assertAllEqual(matrix1, matrix2)
q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
all_ops += [q1, q2, r1, r2]
val = self.evaluate(all_ops)
for i in range(0, len(val), 2):
self.assertAllClose(val[i], val[i + 1])
def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
def CompareOrthogonal(self, x, y, rank):
if is_single:
atol = 5e-4
else:
atol = 5e-14
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios,
|
np.abs(sum_of_ratios)
|
numpy.abs
|
import sys
import os
import math
import copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import rankdata
import multiprocessing as mp
import logging
import scanpy as sc
import anndata as ad
from scipy.io import mmread,mmwrite
from scipy.sparse import csr_matrix,issparse
import matplotlib as mpl
from functools import reduce
from sklearn.decomposition import PCA
import umap
from sctriangulate.colors import *
# for publication ready figure
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.family'] = 'Arial'
def sctriangulate_preprocessing_setting(backend='Agg',png=False):
# change the backend
mpl.use(backend)
if png:
# for publication and super large dataset
mpl.rcParams['savefig.dpi'] = 600
mpl.rcParams['figure.dpi'] = 600
def small_txt_to_adata(int_file,gene_is_index=True):
'''
given a small dense expression (<2GB) txt file, load them into memory as adata, and also make sure the X is sparse matrix.
:param int_file: string, path to the input txt file, delimited by tab
:param gene_is_index: boolean, whether the gene/features are the index.
:return: AnnData
Exmaples::
from sctriangulate.preprocessing import small_txt_to_adata
adata= = small_txt_to_adata('./input.txt',gene_is_index=True)
'''
df = pd.read_csv(int_file,sep='\t',index_col=0)
if gene_is_index:
adata = ad.AnnData(X=csr_matrix(df.values.T),var=pd.DataFrame(index=df.index.values),obs=pd.DataFrame(index=df.columns.values))
else:
adata = ad.AnnData(X=csr_matrix(df.values),var=pd.DataFrame(index=df.columns.values),obs=pd.DataFrame(index=df.index.values))
adata.var_names_make_unique()
adata.X = csr_matrix(adata.X)
return adata
def large_txt_to_mtx(int_file,out_folder,gene_is_index=True,type_convert_to='int16'): # whether the txt if gene * cell
'''
Given a large txt dense expression file, convert them to mtx file on cluster to facilitate future I/O
:param int_file: string, path to the intput txt file, delimited by tab
:param out_folder: string, path to the output folder where the mtx file will be stored
:param gene_is_index: boolean, whether the gene/features is the index in the int_file.
:param type_convert_to: string, since it is a large dataframe, need to read in chunk, to accelarate it and reduce the memory footprint,
we convert it to either 'int16' if original data is count, or 'float32' if original data is normalized data.
Examples::
from sctriangulate.preprocessing import large_txt_to_mtx
large_txt_to_mtx(int_file='input.txt',out_folder='./data',gene_is_index=False,type_convert_to='float32')
'''
reader = pd.read_csv(int_file,sep='\t',index_col=0,chunksize=1000)
store = []
for chunk in reader:
tmp = chunk.astype(type_convert_to)
store.append(tmp)
data = pd.concat(store)
print(data.shape)
'''save as mtx, now!!!'''
if not os.path.exists(out_folder):
os.mkdir(out_folder)
if gene_is_index:
data.index.to_series().to_csv(os.path.join(out_folder,'genes.tsv'),sep='\t',header=None,index=None)
data.columns.to_series().to_csv(os.path.join(out_folder,'barcodes.tsv'),sep='\t',header=None,index=None)
mmwrite(os.path.join(out_folder,'matrix.mtx'),csr_matrix(data.values))
else:
data.columns.to_series().to_csv(os.path.join(out_folder,'genes.tsv'),sep='\t',header=None,index=None)
data.index.to_series().to_csv(os.path.join(out_folder,'barcodes.tsv'),sep='\t',header=None,index=None)
mmwrite(os.path.join(out_folder,'matrix.mtx'),csr_matrix(data.values.T))
def mtx_to_adata(int_folder,gene_is_index=True,feature='genes',feature_col='index',barcode_col='index'): # whether the mtx file is gene * cell
'''
convert mtx file to adata in RAM, make sure the X is sparse.
:param int_folder: string, folder where the mtx files are stored.
:param gene_is_index: boolean, whether the gene is index.
:param feature: string, the name of the feature tsv file, if rna, it will be genes.tsv.
:param feature_col: 'index' as index, or a int (which column, python is zero based) to use in your feature.tsv as feature
:param barcode_col: 'index' as index, or a int (which column, python is zero based) to use in your barcodes.tsv as barcode
:return: AnnData
Examples::
from sctriangulate.preprocessing import mtx_to_adata
mtx_to_adata(int_folder='./data',gene_is_index=False,feature='genes')
'''
if feature_col == 'index':
gene = pd.read_csv(os.path.join(int_folder,'{}.tsv'.format(feature)),sep='\t',index_col=0,header=None).index
else:
gene = pd.read_csv(os.path.join(int_folder,'{}.tsv'.format(feature)),sep='\t',index_col=0,header=None)[feature_col]
if barcode_col == 'index':
cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None).index
else:
cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None)[barcode_col]
value = csr_matrix(mmread(os.path.join(int_folder,'matrix.mtx')))
if gene_is_index:
value = value.T
adata = ad.AnnData(X=value,obs=pd.DataFrame(index=cell),var=pd.DataFrame(index=gene))
else:
adata = ad.AnnData(X=value,obs=pd.DataFrame(index=cell),var=pd.DataFrame(index=gene))
adata.var.index.name = None
adata.var_names_make_unique()
return adata
def mtx_to_large_txt(int_folder,out_file,gene_is_index=False):
'''
convert mtx back to large dense txt expression dataframe.
:param int_folder: string, path to the input mtx folder.
:param out_file: string, path to the output txt file.
:param gene_is_index: boolean, whether the gene is the index.
Examples::
from sctriangulate.preprocessing import mtx_to_large_txt
mtx_to_large_txt(int_folder='./data',out_file='input.txt',gene_is_index=False)
'''
gene = pd.read_csv(os.path.join(int_folder,'genes.tsv'),sep='\t',index_col=0,header=None).index
cell = pd.read_csv(os.path.join(int_folder,'barcodes.tsv'),sep='\t',index_col=0,header=None).index
value = mmread(os.path.join(int_folder,'matrix.mtx')).toarray()
if gene_is_index:
data = pd.DataFrame(data=value,index=gene,columns=cell)
else:
data = pd.DataFrame(data=value.T,index=cell,columns=gene)
data.to_csv(out_file,sep='\t',chunksize=1000)
def adata_to_mtx(adata,gene_is_index=True,var_column=None,obs_column=None,outdir='data'):
# create folder if not exist
if not os.path.exists(outdir):
os.mkdir(outdir)
# write genes.tsv
if var_column is None:
var = adata.var_names.to_series()
else:
var = adata.var[var_column]
var.to_csv(os.path.join(outdir,'genes.tsv'),sep='\t',header=None,index=None)
# write barcodes.tsv
if obs_column is None:
obs = adata.obs_names.to_series()
else:
obs = adata.obs[obs_column]
obs.to_csv(os.path.join(outdir,'barcodes.tsv'),sep='\t',header=None,index=None)
# write matrix.mtx
if not gene_is_index:
mmwrite(os.path.join(outdir,'matrix.mtx'),make_sure_mat_sparse(adata.X))
else:
mmwrite(os.path.join(outdir,'matrix.mtx'),make_sure_mat_sparse(adata.X).transpose())
def add_azimuth(adata,result,name='predicted.celltype.l2'):
'''
a convenient function if you have azimuth predicted labels in hand, and want to add the label to the adata.
:param adata: AnnData
:param result: string, the path to the 'azimuth_predict.tsv' file
:param name: string, the column name where the user want to transfer to the adata.
Examples::
from sctriangulate.preprocessing import add_azimuth
add_azimuth(adata,result='./azimuth_predict.tsv',name='predicted.celltype.l2')
'''
azimuth = pd.read_csv(result,sep='\t',index_col=0)
azimuth_map = azimuth[name].to_dict()
azimuth_prediction = azimuth['{}.score'.format(name)].to_dict()
azimuth_mapping = azimuth['mapping.score'].to_dict()
adata.obs['azimuth'] = adata.obs_names.map(azimuth_map).values
adata.obs['prediction_score'] = adata.obs_names.map(azimuth_prediction).values
adata.obs['mapping_score'] = adata.obs_names.map(azimuth_mapping).values
def add_annotations(adata,inputs,cols_input,index_col=0,cols_output=None,kind='disk'):
'''
Adding annotations from external sources to the adata
:param adata: Anndata
:param inputs: string, path to the txt file where the barcode to cluster label information is stored.
:param cols_input: list, what columns the users want to transfer to the adata.
:param index_col: int, for the input, which column will serve as the index column
:param cols_output: list, corresponding to the cols_input, how these columns will be named in the adata.obs columns
:param kind: a string, either 'disk', or 'memory', disk means the input is the path to the text file, 'memory' means the input is the
variable name in the RAM that represents the dataframe
Examples::
from sctriangulate.preprocessing import add_annotations
add_annotations(adata,inputs='./annotation.txt',cols_input=['col1','col2'],index_col=0,cols_output=['annotation1','annontation2'],kind='disk')
add_annotations(adata,inputs=df,cols_input=['col1','col2'],index_col=0,cols_output=['annotation1','annontation2'],kind='memory')
'''
# means a single file such that one column is barcodes, annotations are within other columns
if kind == 'disk':
annotations = pd.read_csv(inputs,sep='\t',index_col=index_col).loc[:,cols_input]
elif kind == 'memory': # index_col will be ignored
annotations = inputs.loc[:,cols_input]
mappings = []
for col in cols_input:
mapping = annotations[col].to_dict()
mappings.append(mapping)
if cols_output is None:
for i,col in enumerate(cols_input):
adata.obs[col] = adata.obs_names.map(mappings[i]).fillna('Unknown').values
adata.obs[col] = adata.obs[col].astype('str').astype('category')
else:
for i in range(len(cols_input)):
adata.obs[cols_output[i]] = adata.obs_names.map(mappings[i]).fillna('Unknown').values
adata.obs[cols_output[i]] = adata.obs[cols_output[i]].astype('str').astype('category')
def add_umap(adata,inputs,mode,cols=None,index_col=0):
'''
if umap embedding is pre-computed, add it back to adata object.
:param adata: Anndata
:param inputs: string, path to the the txt file where the umap embedding was stored.
:param mode: string, valid value 'pandas_disk', 'pandas_memory', 'numpy'
* **pandas_disk**: the `inputs` argument should be the path to the txt file
* **pandas_memory**: the `inputs` argument should be the name of the pandas dataframe in the program, inputs=df
* **numpy**, the `inputs` argument should be a 2D ndarray contains pre-sorted (same order as barcodes in adata) umap coordinates
:param cols: list, what columns contain umap embeddings
:param index_col: int, which column will serve as the index column.
Examples::
from sctriangulate.preprocessing import add_umap
add_umap(adata,inputs='umap.txt',mode='pandas_disk',cols=['umap1','umap2'],index_col=0)
'''
# make sure cols are [umap_x, umap_y]
if mode == 'pandas_disk':
df = pd.read_csv(inputs,sep='\t',index_col=index_col)
umap_x = df[cols[0]].to_dict()
umap_y = df[cols[1]].to_dict()
adata.obs['umap_x'] = adata.obs_names.map(umap_x).values
adata.obs['umap_y'] = adata.obs_names.map(umap_y).values
adata.obsm['X_umap'] = adata.obs.loc[:,['umap_x','umap_y']].values
adata.obs.drop(columns=['umap_x','umap_y'],inplace=True)
elif mode == 'pandas_memory':
df = inputs
umap_x = df[cols[0]].to_dict()
umap_y = df[cols[1]].to_dict()
adata.obs['umap_x'] = adata.obs_names.map(umap_x).values
adata.obs['umap_y'] = adata.obs_names.map(umap_y).values
adata.obsm['X_umap'] = adata.obs.loc[:,['umap_x','umap_y']].values
adata.obs.drop(columns=['umap_x','umap_y'],inplace=True)
elif mode == 'numpy': # assume the order is correct
adata.obsm['X_umap'] = inputs
def doublet_predict(adata): # gave RNA count or log matrix
'''
wrapper function for running srublet, a new column named 'doublet_scores' will be added to the adata
:param adata: Anndata
:return: dict
Examples::
from sctriangulate.preprocessing import doublet_predict
mapping = doublet_predict(old_adata)
'''
from scipy.sparse import issparse
import scrublet as scr
if issparse(adata.X):
adata.X = adata.X.toarray()
counts_matrix = adata.X
scrub = scr.Scrublet(counts_matrix)
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=1, min_cells=1)
adata.obs['doublet_scores'] = doublet_scores
return adata.obs['doublet_scores'].to_dict()
def make_sure_adata_writable(adata,delete=False):
'''
maks sure the adata is able to write to disk, since h5 file is stricted typed, so no mixed dtype is allowd.
this function basically is to detect the column of obs/var that are of mixed types, and delete them.
:param adata: Anndata
:param delete: boolean, False will just print out what columns are mixed type, True will automatically delete those columns
:return: Anndata
Examples::
from sctriangulate.preprocessing import make_sure_adata_writable
make_sure_adata_writable(adata,delete=True)
'''
# check index, can not have name
var_names = adata.var_names
obs_names = adata.obs_names
var_names.name = None
obs_names.name = None
adata.var_names = var_names
adata.obs_names = obs_names
# make sure column is pure type, basically, if mixed tyep, delete the column, and print out the delete columns
# go to: https://github.com/theislab/scanpy/issues/1866
var = adata.var
obs = adata.obs
for col in var.columns:
if var[col].dtypes == 'O':
all_type = np.array([type(item) for item in var[col]])
first = all_type[0]
if (first==all_type).all() and first == str: # object, but every item is str
continue
else: # mixed type
print('column {} in var will be deleted, because mixed types'.format(col))
if delete:
adata.var.drop(columns=[col],inplace=True)
for col in obs.columns:
if obs[col].dtypes == 'O':
all_type = np.array([type(item) for item in obs[col]])
first = all_type[0]
if (first==all_type).all() and first == str: # object, but every item is str
continue
else: # mixed type
print('column {} in obs will be deleted, because mixed types'.format(col))
if delete:
adata.obs.drop(columns=[col],inplace=True)
return adata
def scanpy_recipe(adata,species='human',is_log=False,resolutions=[1,2,3],modality='rna',umap=True,save=True,pca_n_comps=None,n_top_genes=3000):
'''
Main preprocessing function. Run Scanpy normal pipeline to achieve Leiden clustering with various resolutions across multiple modalities.
:param adata: Anndata
:param species: string, 'human' or 'mouse'
:param is_log: boolean, whether the adata.X is count or normalized data.
:param resolutions: list, what leiden resolutions the users want to obtain.
:param modality: string, valid values: 'rna','adt','atac', 'binary'[mutation data, TCR data, etc]
:param umap: boolean, whether to compute umap embedding.
:param save: boolean, whether to save the obtained adata object with cluster label information in it.
:param pca_n_comps: int, how many PCs to keep when running PCA. Suggestion: RNA (30-50), ADT (15), ATAC (100)
:param n_top_genes: int, how many features to keep when selecting highly_variable_genes. Suggestion: RNA (3000), ADT (ignored), ATAC (50000-100000)
:return: Anndata
Examples::
from sctriangulate.preprocessing import scanpy_recipe
# rna
adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='rna',pca_n_comps=50,n_top_genes=3000)
# adt
adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='adt',pca_n_comps=15)
# atac
adata = scanpy_recipe(adata,is_log=False,resolutions=[1,2,3],modality='atac',pca_n_comps=100,n_top_genes=100000)
# binary
adata = scanpy_recipe(adata,resolutions=[1,2,3],modality='binary')
'''
adata.var_names_make_unique()
# normal analysis
if modality == 'rna':
if not is_log: # count data
if species == 'human':
adata.var['mt'] = adata.var_names.str.startswith('MT-')
elif species == 'mouse':
adata.var['mt'] = adata.var_names.str.startswith('mt-')
sc.pp.calculate_qc_metrics(adata,qc_vars=['mt'],percent_top=None,inplace=True,log1p=False)
sc.pp.normalize_total(adata,target_sum=1e4)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
sc.pp.regress_out(adata,['total_counts','pct_counts_mt'])
sc.pp.scale(adata,max_value=10)
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
# put raw back to X, and make sure it is sparse matrix
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
else: # log(1+x) and depth normalized data
if species == 'human':
adata.var['mt'] = adata.var_names.str.startswith('MT-')
elif species == 'mouse':
adata.var['mt'] = adata.var_names.str.startswith('mt-')
sc.pp.calculate_qc_metrics(adata,qc_vars=['mt'],percent_top=None,inplace=True,log1p=False)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
sc.pp.regress_out(adata,['total_counts','pct_counts_mt'])
sc.pp.scale(adata,max_value=10)
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
# put raw back to X, and make sure it is sparse matrix
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'atac':
if not is_log:
sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False)
sc.pp.normalize_total(adata,target_sum=1e4)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
#sc.pp.scale(adata,max_value=10) # because in episcanpy toturial, it seems to be ignored
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
else:
sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False)
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
#sc.pp.scale(adata,max_value=10)
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
adata = adata.raw.to_adata()
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'adt':
if not is_log:
sc.pp.calculate_qc_metrics(adata,percent_top=None,inplace=True,log1p=False)
adata.X = make_sure_mat_sparse(Normalization.CLR_normalization(make_sure_mat_dense(adata.X)))
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
else:
sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'binary': # mutation
#sc.tl.pca(adata,n_comps=pca_n_comps)
sc.pp.neighbors(adata,use_rep='X',metric='jaccard')
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if umap:
sc.tl.umap(adata)
if not issparse(adata.X):
adata.X = csr_matrix(adata.X)
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,umap))
elif modality == 'spatial':
sc.pp.scale(adata)
sc.pp.neighbors(adata)
for resolution in resolutions:
sc.tl.leiden(adata,resolution=resolution,key_added='sctri_{}_leiden_{}'.format(modality,resolution))
if save:
resolutions = '_'.join([str(item) for item in resolutions])
adata.write('adata_after_scanpy_recipe_{}_{}_umap_{}.h5ad'.format(modality,resolutions,False))
return adata
def concat_rna_and_other(adata_rna,adata_other,umap,name,prefix):
'''
concatenate rna adata and another modality's adata object
:param adata_rna: AnnData
:param adata_other: Anndata
:param umap: string, whose umap to use, either 'rna' or 'other'
:param name: string, the name of other modality, for example, 'adt' or 'atac'
:param prefix: string, the prefix added in front of features from other modality, by scTriangulate convertion, adt will be 'AB_', atac will be ''.
:return adata_combine: Anndata
Examples::
from sctriangulate.preprocessing import concat_rna_and_other
concat_rna_and_other(adata_rna,adata_adt,umap='rna',name='adt',prefix='AB_')
'''
adata_rna = adata_rna.copy()
adata_other = adata_other.copy()
# remove layers, [!obsm], varm, obsp, varp, raw
for adata in [adata_rna,adata_other]:
del adata.layers
del adata.varm
del adata.obsp
del adata.varp
del adata.raw
adata_other = adata_other[adata_rna.obs_names,:] # make sure the obs order is the same
adata_other.var_names = [prefix + item for item in adata_other.var_names]
adata_combine = ad.concat([adata_rna,adata_other],axis=1,join='outer',merge='first',label='modality',keys=['rna','{}'.format(name)])
if umap == 'rna':
adata_combine.obsm['X_umap'] = adata_rna.obsm['X_umap']
elif umap == 'other':
adata_combine.obsm['X_umap'] = adata_other.obsm['X_umap']
if not issparse(adata_combine.X):
adata_combine.X = csr_matrix(adata_combine.X)
return adata_combine
def nca_embedding(adata,nca_n_components,label,method,max_iter=50,plot=True,save=True,format='pdf',legend_loc='on data',n_top_genes=None,hv_features=None,add_features=None):
'''
Doing Neighborhood component ananlysis (NCA), so it is a supervised PCA that takes the label from the annotation, and try to generate a UMAP
embedding that perfectly separate the labelled clusters.
:param adata: the Anndata
:param nca_n_components: recommend to be 10 based on `Ref <https://www.nature.com/articles/s41586-021-03969-3>`_
:param label: string, the column name which contains the label information
:param method: either 'umap' or 'tsne'
:param max_iter: for the NCA, default is 50, it is generally good enough
:param plot: whether to plot the umap/tsne or not
:param save: whether to save the plot or not
:param format: the saved format, default is 'pdf'
:param legend_loc: 'on data' or 'right margin'
:param n_top_genes: how many hypervariable genes to choose for NCA, recommended 3000 or 5000, default is None, means there will be other features to add, multimodal setting
:param hv_features: a list contains the user-supplied hypervariable genes/features, in multimodal setting, this can be [rna genes] + [ADT protein]
:param add_features: this should be another adata contains features from other modalities, or None means just for RNA
Example::
from sctriangulate.preprocessing import nca_embedding
# only RNA
nca_embedding(adata,nca_n_components=10,label='annotation1',method='umap',n_top_genes=3000)
# RNA + ADT
# list1 contains [gene features that are variable] and [ADT features that are variable]
nca_embedding(adata_rna,nca_n_components=10,label='annotation1',method='umap',n_top_genes=3000,hv_features=list1, add_features=adata_adt)
'''
from sklearn.neighbors import NeighborhoodComponentsAnalysis
adata = adata
if n_top_genes is not None:
sc.pp.highly_variable_genes(adata,flavor='seurat',n_top_genes=n_top_genes)
else:
if add_features is not None: # first add the features, input should be anndata
adata = concat_rna_and_other(adata,add_features,umap=None,name='add_features',prefix='add_features_')
if hv_features is not None: # custom hv
tmp = pd.Series(index=adata.var_names,data=np.full(len(adata.var_names),fill_value=False))
tmp.loc[hv_features] = True
adata.var['highly_variable'] = tmp.values
adata.raw = adata
adata = adata[:,adata.var['highly_variable']]
X = make_sure_mat_dense(adata.X)
y = adata.obs[label].values
nca = NeighborhoodComponentsAnalysis(n_components=nca_n_components,max_iter=max_iter)
embed = nca.fit_transform(X,y) # (n_cells,n_components)
adata.obsm['X_nca'] = embed
adata = adata.raw.to_adata()
if method == 'umap':
sc.pp.neighbors(adata,use_rep='X_nca')
sc.tl.umap(adata)
sc.pl.umap(adata,color=label,frameon=False,legend_loc=legend_loc)
if save:
plt.savefig(os.path.join('.','nca_embedding_{}_{}.{}'.format(label,method,format)),bbox_inches='tight')
plt.close()
elif method == 'tsne':
sc.tl.tsne(adata,use_rep='X_nca')
sc.pl.tsne(adata,color=label,frameon=False,legend_loc=legend_loc)
if save:
plt.savefig(os.path.join('.','nca_embedding_{}_{}.{}'.format(label,method,format)),bbox_inches='tight')
plt.close()
adata.X = make_sure_mat_sparse(adata.X)
return adata
def umap_dual_view_save(adata,cols):
'''
generate a pdf file with two umap up and down, one is with legend on side, another is with legend on data.
More importantly, this allows you to generate multiple columns iteratively.
:param adata: Anndata
:param cols: list, all columns from which we want to draw umap.
Examples::
from sctriangulate.preprocessing import umap_dual_view_save
umap_dual_view_save(adata,cols=['annotation1','annotation2','total_counts'])
'''
for col in cols:
fig,ax = plt.subplots(nrows=2,ncols=1,figsize=(8,20),gridspec_kw={'hspace':0.3}) # for final_annotation
sc.pl.umap(adata,color=col,frameon=False,ax=ax[0])
sc.pl.umap(adata,color=col,frameon=False,legend_loc='on data',legend_fontsize=5,ax=ax[1])
plt.savefig('./umap_dual_view_{}.pdf'.format(col),bbox_inches='tight')
plt.close()
def just_log_norm(adata):
sc.pp.normalize_total(adata,target_sum=1e4)
sc.pp.log1p(adata)
return adata
def format_find_concat(adata,canonical_chr_only=True,gtf_file='gencode.v38.annotation.gtf',key_added='gene_annotation',**kwargs):
'''
this is a wrapper function to add nearest genes to your ATAC peaks or bins. For instance, if the peak is chr1:55555-55566,
it will be annotated as chr1:55555-55566_gene1;gene2
:param adata: The anndata, the var_names is the peak/bin, please make sure the format is like chr1:55555-55566
:param canonical_chr_only: boolean, default to True, means only contain features on canonical chromosomes. for human, it is chr1-22 and X,Y
:param gtf_file: the path to the gtf files, we provide the hg38 on this `google drive link <https://drive.google.com/file/d/11gbJl2-wZr3LbpWaU9RiUAGPebqWYi1z/view?usp=sharing>`_ to download
:param key_added: string, the column name where the gene annotation will be inserted to adata.var, default is 'gene_annotation'
:return adata: Anndata, the gene annotation will be added to var, and the var_name will be suffixed with gene annotation, if canonical_chr_only is True, then only features on canonical
chromsome will be retained.
Example::
adata = format_find_concat(adata)
'''
adata= reformat_peak(adata,canonical_chr_only=canonical_chr_only)
find_genes(adata,gtf_file=gtf_file,key_added=key_added,**kwargs)
adata.var_names = [name + '_' + gene for name,gene in zip(adata.var_names,adata.var[key_added])]
return adata
class GeneConvert(object):
'''
A collection of gene symbol conversion functions.
Now support:
1. ensemblgene id to gene symbol.
'''
@staticmethod
def ensemblgene_to_symbol(query,species):
'''
Examples::
from sctriangulate.preprocessing import GeneConvert
converted_list = GeneConvert.ensemblgene_to_symbol(['ENSG00000010404','ENSG00000010505'],species='human')
'''
# assume query is a list, will also return a list
import mygene
mg = mygene.MyGeneInfo()
out = mg.querymany(query,scopes='ensemblgene',fileds='symbol',species=species,returnall=True,as_dataframe=True,df_index=True)
result = out['out']['symbol'].fillna('unknown_gene').tolist()
try:
assert len(query) == len(result)
except AssertionError: # have duplicate results
df = out['out']
df_unique = df.loc[~df.index.duplicated(),:]
result = df_unique['symbol'].fillna('unknown_gene').tolist()
return result
def dual_gene_plot(adata,gene1,gene2,s=8,save=True,format='pdf',dir='.',umap_lim=None):
from scipy.sparse import issparse
if issparse(adata.X):
adata.X = adata.X.toarray()
index1 = np.where(adata.var_names == gene1)[0][0]
index2 = np.where(adata.var_names == gene2)[0][0]
exp1 = adata.X[:,index1]
exp2 = adata.X[:,index2]
color = []
for i in range(len(exp1)):
if exp1[i] > 0 and exp2[i] > 0:
color.append('#F2DE77')
elif exp1[i] > 0 and exp2[i] == 0:
color.append('#5ABF9A')
elif exp1[i] == 0 and exp2[i] > 0:
color.append('#F25C69')
else:
color.append('lightgrey')
fig, ax = plt.subplots()
if umap_lim is not None:
ax.set_xlim(umap_lim[0])
ax.set_ylim(umap_lim[1])
ax.scatter(x=adata.obsm['X_umap'][:,0],y=adata.obsm['X_umap'][:,1],s=s,c=color)
import matplotlib.lines as mlines
ax.legend(handles=[mlines.Line2D([],[],marker='o',color=i,linestyle='') for i in ['#F2DE77','#5ABF9A','#F25C69','lightgrey']],
labels=['Both','{}'.format(gene1),'{}'.format(gene2),'None'],frameon=False,loc='upper left',bbox_to_anchor=[1,1])
if save:
plt.savefig(os.path.join(dir,'sctri_dual_gene_plot_{}_{}.{}'.format(gene1,gene2,format)),bbox_inches='tight')
plt.close()
return ax
def multi_gene_plot(adata,genes,s=8,save=True,format='pdf',dir='.',umap_lim=None):
from scipy.sparse import issparse
if issparse(adata.X):
adata.X = adata.X.toarray()
exp_list = []
for gene in genes:
index_gene = np.where(adata.var_names == gene)[0][0]
exp_gene = adata.X[:,index_gene]
exp_list.append(exp_gene)
color = []
for i in range(len(exp_list[0])):
if len(genes) == 3:
c = ['#04BFBF','#83A603','#F7766D']
elif len(genes) == 4:
c = ['#04BFBF','#83A603','#F7766D','#E36DF2']
elif len(genes) == 5:
c = ['#04BFBF','#83A603','#F7766D','#E36DF2','#A69B03']
b = '#BABABA'
l_exp = np.array([exp[i] for exp in exp_list])
n_exp = np.count_nonzero(l_exp > 0)
if n_exp > 1:
color.append(c[np.where(l_exp==l_exp.max())[0][0]])
elif n_exp == 1:
color.append(c[np.where(l_exp>0)[0][0]])
elif n_exp == 0:
color.append(b)
fig, ax = plt.subplots()
if umap_lim is not None:
ax.set_xlim(umap_lim[0])
ax.set_ylim(umap_lim[1])
ax.scatter(x=adata.obsm['X_umap'][:,0],y=adata.obsm['X_umap'][:,1],s=s,c=color)
import matplotlib.lines as mlines
ax.legend(handles=[mlines.Line2D([],[],marker='o',color=i,linestyle='') for i in c+[b]],
labels=genes + ['None'],frameon=False,
loc='upper left',bbox_to_anchor=[1,1])
if save:
output = '_'.join(genes)
plt.savefig(os.path.join(dir,'sctri_multi_gene_plot_{}.{}'.format(output,format)),bbox_inches='tight')
plt.close()
return ax
def make_sure_mat_dense(mat):
'''
make sure a matrix is dense
:param mat: ndarary
:return mat: ndarray (dense)
Examples::
mat = make_sure_mat_dense(mat)
'''
if not issparse(mat):
pass
else:
mat = mat.toarray()
return mat
def make_sure_mat_sparse(mat): # will be csr if the input mat is a dense array
'''
make sure a matrix is sparse
:param mat: ndarary
:return mat: ndarray (sparse)
Examples::
mat = make_sure_mat_dense(mat)
'''
if not issparse(mat):
mat = csr_matrix(mat)
else:
pass
return mat
class Normalization(object):
'''
a series of Normalization functions
Now support:
1. CLR normalization
2. total count normalization (CPTT, CPM)
3. GMM normalization
'''
# matrix should be cell x feature, expecting a ndarray
@staticmethod
def CLR_normalization(mat):
'''
Examples::
from sctriangulate.preprocessing import Normalization
post_mat = Normalization.CLR_normalization(pre_mat)
'''
from scipy.stats import gmean
gmeans = gmean(mat+1,axis=1).reshape(-1,1)
post = np.log(mat/gmeans + 1)
return post
@staticmethod
def total_normalization(mat,target=1e4):
'''
Examples::
from sctriangulate.preprocessing import Normalization
post_mat = Normalization.total_normalization(pre_mat)
'''
total = np.sum(mat,axis=1).reshape(-1,1)
sf = total/target
post =
|
np.log(mat/sf + 1)
|
numpy.log
|
import os
import sys
import warnings
import pydicom as dicom
import numpy as np
import sqlalchemy as sq
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from ._Base import Base
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, CheckButtons
from scipy.spatial.distance import cdist
from scipy.sparse.csgraph import connected_components
from .annotation_distance_metrics import metrics
from scipy.stats import mode
try:
import configparser
except ImportError:
import ConfigParser
configparser = ConfigParser
def _get_config_filename():
"""
Yields the platform-specific configuration filename
"""
return 'pylidc.conf' if sys.platform.startswith('win') else '.pylidcrc'
def _get_config_path():
"""
Yields the path to configuration file
"""
return os.path.join(os.path.expanduser('~'))
def _get_config_file():
return os.path.join(_get_config_path(),
_get_config_filename())
def _get_dicom_file_path_from_config_file():
"""
Loads the dicom section of the configuration file
"""
conf_file = _get_config_file()
parser = configparser.SafeConfigParser()
if os.path.exists(conf_file):
parser.read(conf_file)
try:
return parser.get(section='dicom', option='path')
except (configparser.NoSectionError,
configparser.NoOptionError):
msg = ("Could not find `dicom` configuration section or "
" `path` configuration option under that section."
"A template config file will be written to {}.")
warnings.warn(msg.format(conf_file))
parser.add_section('dicom')
parser.set('dicom', 'path', '')
with open(conf_file, 'w') as f:
parser.write(f)
return parser.get(section='dicom', option='path')
_off_limits = ['id','study_instance_uid','series_instance_uid',
'patient_id','slice_thickness','pixel_spacing',
'contrast_used','is_from_initial','sorted_dicom_file_names']
class Scan(Base):
"""
The Scan model class refers to the top-level XML file from the LIDC.
A scan has many :class:`pylidc.Annotation` objects, which correspond
to the `unblindedReadNodule` XML attributes for the scan.
Attributes
==========
study_instance_uid: string
DICOM attribute (0020,000D).
series_instance_uid: string
DICOM attribute (0020,000E).
patient_id: string
Identifier of the form "LIDC-IDRI-dddd" where dddd is a string of
integers.
slice_thickness: float
DICOM attribute (0018,0050). Note that this may not be
equal to the `slice_spacing` attribute (see below).
slice_zvals: ndarray
The "z-values" for the slices of the scan (i.e.,
the last coordinate of the ImagePositionPatient DICOM attribute)
as a NumPy array sorted in increasing order.
slice_spacing: float
This computed property is the median of the difference
between the slice coordinates, i.e., `scan.slice_zvals`.
Note
----
This attribute is typically (but not always!) the
same as the `slice_thickness` attribute. Furthermore,
the `slice_spacing` does NOT necessarily imply that all the
slices are spaced with spacing (although they often are).
pixel_spacing: float
Dicom attribute (0028,0030). This is normally two
values. All scans in the LIDC have equal resolutions
in the transverse plane, so only one value is used here.
contrast_used: bool
If the DICOM file for the scan had any Contrast tag,
this is marked as `True`.
is_from_initial: bool
Indicates whether or not this PatientID was tagged as
part of the initial 399 release.
sorted_dicom_file_names: string
This attribute is no longer used, and can be ignored.
Example
-------
A short example of `Scan` class usage::
import pylidc as pl
scans = pl.query(pl.Scan).filter(pl.Scan.slice_thickness <= 1)
print(scans.count())
# => 97
scan = scans.first()
print(scan.patient_id,
scan.pixel_spacing,
scan.slice_thickness,
scan.slice_spacing)
# => LIDC-IDRI-0066, 0.63671875, 0.6, 0.5
print(len(scan.annotations))
# => 11
"""
__tablename__ = 'scans'
id = sq.Column('id', sq.Integer, primary_key=True)
study_instance_uid = sq.Column('study_instance_uid', sq.String)
series_instance_uid = sq.Column('series_instance_uid', sq.String)
patient_id = sq.Column('patient_id', sq.String)
slice_thickness = sq.Column('slice_thickness', sq.Float)
pixel_spacing = sq.Column('pixel_spacing', sq.Float)
contrast_used = sq.Column('contrast_used', sq.Boolean)
is_from_initial = sq.Column('is_from_initial', sq.Boolean)
sorted_dicom_file_names = sq.Column('sorted_dicom_file_names', sq.String)
def __repr__(self):
return "Scan(id=%d,patient_id=%s)" % (self.id,self.patient_id)
def __setattr__(self, name, value):
if name in _off_limits:
msg = "Trying to assign read-only Scan object attribute \
`%s` a value of `%s`." % (name,value)
raise ValueError(msg)
else:
super(Scan, self).__setattr__(name,value)
def get_path_to_dicom_files(self):
"""
Get the path to where the DICOM files are stored for this scan,
relative to the root path set in the pylidc configuration file (i.e.,
`~/.pylidc` in MAC and Linux).
1. In older downloads, the data DICOM data would download as::
[...]/LIDC-IDRI/LIDC-IDRI-dddd/uid1/uid2/dicom_file.dcm
where [...] is the base path set in the pylidc configuration
filee; uid1 is `Scan.study_instance_uid`; and, uid2
is `Scan.series_instance_uid` .
2. However, in more recent downloads, the data is downloaded like::
[...]/LIDC-IDRI/LIDC-IDRI-dddd/???
where "???" is some unknown folder hierarchy convention used
by TCIA.
We first check option 1. Otherwise, we check if the
"LIDC-IDRI-dddd" folder exists in the root path. If so, then we
recursively search the "LIDC-IDRI-dddd" directory until we find
the correct subfolder that contains a DICOM file with the correct
`study_instance_uid` and `series_instance_uid`.
Option 2 is less efficient than 1; however, option 2 is robust.
"""
dicompath = _get_dicom_file_path_from_config_file()
if not os.path.exists(dicompath):
msg = ("Could not establish path to dicom files. Have you "
"specified the `path` option in the configuration "
"file {}?")
raise RuntimeError(msg.format(_get_config_file()))
base = os.path.join(dicompath, self.patient_id)
if not os.path.exists(base):
msg = "Couldn't find DICOM files for {} in {}"
raise RuntimeError(msg.format(self, base))
path = os.path.join(base,
self.study_instance_uid,
self.series_instance_uid)
# Check if old path first. If not found, do recursive search.
if not os.path.exists(path): # and base exists
found = False
for dpath,dnames,fnames in os.walk(base):
# Skip if no files in current dir.
if len(fnames) == 0: continue
# Gather up DICOM files in dir (if any).
dicom_file = [d for d in fnames if d.endswith(".dcm")]
# Skip if no DICOM files.
if len(dicom_file) == 0: continue
# Grab the first DICOM file in the dir since they should
# all have the same series/study ids.
dicom_file = dicom_file[0]
dimage = dicom.dcmread(os.path.join(dpath, dicom_file))
seid = str(dimage.SeriesInstanceUID).strip()
stid = str(dimage.StudyInstanceUID).strip()
if seid == self.series_instance_uid and \
stid == self.study_instance_uid:
path = dpath
found = True
break
if not found:
raise IOError("Couldn't find DICOM files for %s."%self)
return path
def load_all_dicom_images(self, verbose=True):
"""
Load all the DICOM images assocated with this scan and return as list.
Parameters
----------
verbose: bool
Turn the loading method on/off.
Example
-------
An example::
import pylidc as pl
import matplotlib.pyplot as plt
scan = pl.query(pl.Scan).first()
images = scan.load_all_dicom_images()
zs = [float(img.ImagePositionPatient[2]) for img in images]
print(zs[1] - zs[0], img.SliceThickness, scan.slice_thickness)
plt.imshow( images[0].pixel_array, cmap=plt.cm.gray )
plt.show()
"""
if verbose: print("Loading dicom files ... This may take a moment.")
path = self.get_path_to_dicom_files()
fnames = [fname for fname in os.listdir(path)
if fname.endswith('.dcm')]
images = []
for fname in fnames:
image = dicom.dcmread(os.path.join(path,fname))
seid = str(image.SeriesInstanceUID).strip()
stid = str(image.StudyInstanceUID).strip()
if seid == self.series_instance_uid and\
stid == self.study_instance_uid:
images.append(image)
# ##############################################
# Clean multiple z scans.
#
# Some scans contain multiple slices with the same `z` coordinate
# from the `ImagePositionPatient` tag.
# The arbitrary choice to take the slice with lesser
# `InstanceNumber` tag is made.
# This takes some work to accomplish...
zs = [float(img.ImagePositionPatient[-1]) for img in images]
inums = [float(img.InstanceNumber) for img in images]
inds = list(range(len(zs)))
while np.unique(zs).shape[0] != len(inds):
for i in inds:
for j in inds:
if i!=j and zs[i] == zs[j]:
k = i if inums[i] > inums[j] else j
inds.pop(inds.index(k))
# Prune the duplicates found in the loops above.
zs = [zs[i] for i in range(len(zs)) if i in inds]
images = [images[i] for i in range(len(images)) if i in inds]
# Sort everything by (now unique) ImagePositionPatient z coordinate.
sort_inds = np.argsort(zs)
images = [images[s] for s in sort_inds]
# End multiple z clean.
# ##############################################
return images
def cluster_annotations(self, metric='min', tol=None, factor=0.9,
min_tol=1e-1, return_distance_matrix=False,
verbose=True):
"""
Estimate which annotations refer to the same physical
nodule in the CT scan. This method clusters all nodule Annotations for
a Scan by computing a distance measure between the annotations.
Parameters
------
metric: string or callable, default 'min'
If string, see::
from pylidc.annotation_distance_metrics import
print(metrics metrics.keys())
for available metrics. If callable, the provided function,
should take two Annotation objects and return a float, i.e.,
`isinstance( metric(ann1, ann2), float )`.
tol: float, default=None
A distance in millimeters. Annotations are grouped when
the minimum distance between their boundary contour points
is less than `tol`. If `tol = None` (the default), then
`tol = scan.pixel_spacing` is used.
factor: float, default=0.9
If `tol` resulted in any group of annotations with more than
4 Annotations, then `tol` is multiplied by `factor` and the
grouping is performed again.
min_tol: float, default=0.1
If `tol` is reduced below `min_tol` (see the `factor` parameter),
then the routine exits because we conclude that the annotation
groups cannot be automatically reduced to have groups
with each group having `Annotations<=4` (as expected
with LIDC data).
return_distance_matrix: bool, default False
Optionally return the distance matrix that was used
to produce the clusters.
verbose: bool, default=True
If True, a warning message is printed when `tol < min_tol` occurs.
Return
------
clusters: list of lists.
`clusters[i]` is a list of :class:`pylidc.Annotation` objects
that refer to the same physical nodule in the Scan. `len(clusters)`
estimates the number of unique physical nodules in the Scan.
Note
----
The "distance" matrix, `d[i,j]`, between all Annotations for
the Scan is first computed using the provided `metric` parameter.
Annotations are said to be adjacent when `d[i,j]<=tol`.
Annotation groups are determined by finding the connected components
of the graph associated with this adjacency matrix.
Example
-------
An example::
import pylidc as pl
scan = pl.query(pl.Scan).first()
nodules = scan.cluster_annotations()
print("This can has %d nodules." % len(nodules))
# => This can has 4 nodules.
for i,nod in enumerate(nodules):
print("Nodule %d has %d annotations." % (i+1,len(nod)))
# => Nodule 1 has 4 annotations.
# => Nodule 2 has 4 annotations.
# => Nodule 3 has 1 annotations.
# => Nodule 4 has 4 annotations.
"""
assert 0 < factor < 1, "`factor` must be in the interval (0,1)."
if isinstance(metric, str) and metric not in metrics.keys():
msg = 'Invalid `metric` string. See \n\n'
msg += '`from pylidc.annotation_distance_metrics import metrics`\n'
msg += '`print metrics.keys()`\n\n'
msg += 'for valid `metric` strings.'
raise ValueError(msg)
elif not callable(metric):
metric = metrics[metric]
N = len(self.annotations)
tol = self.slice_thickness if tol is None else tol
assert tol >= 0, "`tol` should be >= 0."
# Some special cases.
if N == 0:
return []
elif N == 1:
return [[self.annotations[0]]]
D =
|
np.zeros((N,N))
|
numpy.zeros
|
from . import ProgressiveTest
import datashape as ds
from collections import OrderedDict
from progressivis import Scheduler
from progressivis.table.table import Table, BaseTable
from progressivis.io.csv_loader import CSVLoader
from progressivis.datasets import get_dataset
from progressivis.storage import Group
from progressivis.core import aio
import numpy as np
import pandas as pd
class TestTable(ProgressiveTest):
# pylint: disable=protected-access
def setUp(self):
super(TestTable, self).setUp()
self.scheduler = Scheduler.default
self.storagegroup = Group.default()
def test_steps(self):
self.create_table()
self.fill_table()
self.update_table()
self.delete_table()
self.examine_table()
self.append_dataframe()
self.append_direct()
self.load_csv()
self.fill_table()
def test_loc_tableview(self):
t = Table('table_loc', dshape="{a: int, b: float32}", create=True)
t.resize(10)
ivalues = np.random.randint(100, size=20)
t['a'] = ivalues[:10]
fvalues = np.random.rand(20)
t['b'] = fvalues[:10]
t.append({'a': ivalues[10:], 'b': fvalues[10:]})
view = t.loc[2:11]
self.assertEqual(type(view), BaseTable)
self.assertTrue(np.array_equal(view._column(0)[:], ivalues[2:12]))
view_view = view.loc[3:7]
self.assertTrue(np.array_equal(view_view._column(0)[:], view._column(0)[3:7]))
view_view = view.loc[3:6]
self.assertTrue(np.array_equal(view_view._column(0)[:], view._column(0)[view.id_to_index(slice(3,6))]))
table_view = view.loc[[3,4,6,9]]
self.assertEqual(type(table_view),BaseTable)
self.assertTrue(np.array_equal(table_view._column(0).values, view._column(0)[[3,4,6,9]]))
table_view = view.loc[[3,4,6,9]]
self.assertEqual(type(table_view),BaseTable)
self.assertTrue(np.array_equal(table_view._column(0).values, view._column(0)[view.id_to_index([3,4,6,9])]))
def test_set_loc(self):
t = Table('table_set_loc', dshape="{a: int, b: float32}", create=True)
t.resize(20)
ivalues = np.random.randint(100,size=20)
t['a'] = ivalues
fvalues = np.random.rand(20)
t['b'] = fvalues
t.loc[3:6] = [1001, 1002]
self.assertTrue(np.array_equal(t._column(0)[3:7], np.repeat(1001, 4)))
self.assertTrue(np.array_equal(t._column(1)[3:7], np.repeat(1002, 4)))
t.loc[3:7] = 1003
self.assertTrue(np.array_equal(t._column(0)[3:8], np.repeat(1003, 5)))
self.assertTrue(np.array_equal(t._column(1)[3:8], np.repeat(1003, 5)))
t.loc[3:7,['a','b']] = [1004, 1005]
self.assertTrue(np.array_equal(t._column(0)[3:8], np.repeat(1004, 5)))
self.assertTrue(np.array_equal(t._column(1)[3:8], np.repeat(1005, 5)))
t.loc[3:7,['a','b']] = [1006, 1007] # previous iloc test
self.assertTrue(np.array_equal(t._column(0)[3:7], np.repeat(1006, 4)))
self.assertTrue(np.array_equal(t._column(1)[3:7], np.repeat(1007, 4)))
view = t.loc[2:11]
view.loc[3:6] = [1008, 1009]
self.assertTrue(np.array_equal(view._column(0)[
view.id_to_index(slice(3,6))], np.repeat(1008, 4)))
self.assertTrue(np.array_equal(view._column(1)[
view.id_to_index(slice(3,6))], np.repeat(1009, 4)))
self.assertTrue(np.array_equal(t._column(0)[3:7],
np.repeat(1008, 4)))
self.assertTrue(np.array_equal(t._column(1)[3:7],
np.repeat(1009, 4)))
view_view = view.loc[3:6]
view_view.loc[3:6] = [1010, 1011]
self.assertTrue(np.array_equal(view_view._column(0)[
view_view.id_to_index(slice(3,6))], np.repeat(1010, 4)))
self.assertTrue(np.array_equal(view_view._column(1)[
view_view.id_to_index(slice(3,6))], np.repeat(1011, 4)))
self.assertTrue(np.array_equal(t._column(0)[3:7], np.repeat(1010, 4)))
self.assertTrue(np.array_equal(t._column(1)[3:7], np.repeat(1011, 4)))
def test_at(self):
t = Table('table_at', dshape="{a: int, b: float32}", create=True)
t.resize(20)
ivalues = np.random.randint(100,size=20)
t['a'] = ivalues
fvalues = np.random.rand(20)
t['b'] = fvalues
at_ = t.at[3,'a']
self.assertEqual(at_, t._column(0)[3])
iat_ = t.at[3, 1]
self.assertEqual(iat_, t._column(1)[3])
view = t.loc[2:11]
at_ = view.at[3,'a']
self.assertEqual(at_, view._column(0)[view.id_to_index(3)])
iat_ = view.at[3, 1]
self.assertEqual(iat_, view._column(1)[3])
def test_set_at(self):
t = Table('table_set_at', dshape="{a: int, b: float32}", create=True)
t.resize(20)
ivalues = np.random.randint(100,size=20)
t['a'] = ivalues
fvalues = np.random.rand(20)
t['b'] = fvalues
t.at[3, 'a'] = 1001
self.assertEqual(t._column(0)[3], 1001)
t.at[3, 'a'] = 1001
self.assertEqual(t._column(0)[3], 1001)
t.at[3, 0] = 1002
self.assertEqual(t._column(0)[3], 1002)
view = t.loc[2:11]
view.loc[3, 'a'] = 1003
self.assertEqual(view._column(0)[view.id_to_index(3)], 1003)
self.assertEqual(t._column(0)[3], 1003)
view_view = view.loc[3:6]
view_view.at[3, 'a'] = 1004
self.assertEqual(view_view._column(0)[view_view.id_to_index(3)], 1004)
self.assertEqual(t._column(0)[3], 1004)
view_view.at[2, 0] = 1005
self.assertEqual(view_view._column(0)[2], 1005)
self.assertEqual(t._column(0)[t.id_to_index(view_view.index_to_id(2))], 1005)
def test_last(self):
t = Table('table_last', dshape="{a: int, b: float32}", create=True)
t.resize(10)
ivalues = np.random.randint(100,size=10)
t['a'] = ivalues
fvalues = np.random.rand(10)
t['b'] = fvalues
last_ = list(t.last().values())
self.assertEqual(last_, [t._column(0)[-1],t._column(1)[-1]])
last_a = t.last('a')
self.assertEqual(last_a, t._column(0)[-1])
last_a_b = t.last(['a','b'])
self.assertEqual(list(last_a_b),last_)
def create_table(self):
t = Table('table',
storagegroup=self.storagegroup,
dshape="{a: int, b: float32, c: string, d: 10*int}", create=True)
self.assertTrue(t is not None)
self.assertEqual(t.ncol, 4)
col1 = t['a']
col2 = t[0]
self.assertTrue(col1 is col2)
t = Table('table',
storagegroup=self.storagegroup,
dshape="{a: int, b: float32, c: string, d: 10*int}")
self.assertTrue(t is not None)
t = Table('table', storagegroup=self.storagegroup)
self.assertEqual(t.dshape, ds.dshape("{a: int, b: float32, c: string, d: 10 * int}"))
t2 = Table('bar_table',
dshape="{a: int64, b: float64}",
fillvalues={'a': -1}, create=True)
self.assertEqual(t2.dshape, ds.dshape("{a: int64, b: float64}"))
self.assertEqual(t2[0].fillvalue, -1)
def fill_table(self):
t = Table('table', storagegroup=self.storagegroup)
self._fill_table(t)
def _fill_table(self, t):
# Try with a 10 elements Table
t.resize(10)
# Fill one column with a simple list
ivalues = range(10)
t['a'] = ivalues # Table._setitem_key
icol = t['a'].value
for i in range(len(ivalues)):
self.assertEqual(ivalues[i], icol[i])
ivalues = np.random.randint(100, size=10)
t['a'] = ivalues
icol = t['a'].value
for i in range(len(ivalues)):
self.assertEqual(ivalues[i], icol[i])
t['b'] = ivalues
fcol = t['b'].value
for i in range(len(ivalues)):
self.assertEqual(ivalues[i], fcol[i])
fvalues = np.random.rand(10)
t['b'] = fvalues
fcol = t['b'].value
for i in range(len(fvalues)):
self.assertAlmostEqual(fvalues[i], fcol[i])
try:
t['a'] = ivalues[1:]
except ValueError:
pass
else:
self.fail('ExpectedException not raised')
# Fill multiple colums with
ivalues = np.random.randint(100, size=10)
fvalues = np.random.rand(10)
t[['a', 'b']] = [ivalues, fvalues]
icol = t['a'].value
fcol = t['b'].value
for i in range(len(fvalues)):
self.assertEqual(ivalues[i], icol[i])
self.assertAlmostEqual(fvalues[i], fcol[i])
values = np.random.randint(100, size=(10, 2))
t[['a', 'b']] = values
icol = t['a'].value
fcol = t['b'].value
for i in range(len(fvalues)):
self.assertEqual(values[i, 0], icol[i])
self.assertEqual(values[i, 1], fcol[i])
try:
t[['a', 'b']] = values[:, 1:]
except TypeError: # h5py raises a TypeError
pass
except ValueError: # numpy would raise a ValueError
pass
# pylint: disable=broad-except
except Exception as e:
self.fail('Unexpected exception raised: %s' % e)
else:
self.fail('ExpectedException not raised')
def update_table(self):
t = Table('table', storagegroup=self.storagegroup)
self._update_table(t)
def delete_table(self):
t = Table('table', storagegroup=self.storagegroup)
self._delete_table(t)
def examine_table(self):
t = Table('table', storagegroup=self.storagegroup)
pass
def _update_table(self, t):
#pylint: disable=protected-access
self.assertEqual(len(t),10)
#t.scheduler._run_number = 1
t['a'] = np.arange(10)
#t.scheduler._run_number = 2
t.loc[2:3, 'a'] = np.arange(2) # loc is inclusive
v1 = t.loc[2:3, 'a']
v11 = v1.loc[2,'a']
v12 = v1.loc[2,:]
v2 = t.loc[:, 'a']
v3 = t.loc[:]
def _delete_table(self, t):
self.assertEqual(t.index_to_id(2), 2)
a = t['a']
self.assertEqual(a[2], a.fillvalue)
del t.loc[2]
with self.assertRaises(KeyError):
c = t.loc[2]
print(c)
self.assertEqual(len(t), a.size-1)
cnt = 0
for row in t.iterrows():
self.assertTrue('a' in row)
cnt += 1
self.assertEqual(len(t), cnt)
def _delete_table2(self, t):
with self.assertRaises(KeyError):
c = t.loc[2]
print(c)
def append_dataframe(self):
#pylint: disable=protected-access
#self.scheduler._run_number = 1
df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3], 'c': ['a', 'b', 'cd']})
t = Table('table_2', data=df)
self.assertEqual(len(t),len(df))
for colname in df:
coldf = df[colname]
colt = t[colname]
self.assertEqual(len(coldf), len(colt))
self.assertTrue(np.all(coldf.values==colt.values))
#self.scheduler._run_number = 2
t.append(df)
self.assertEqual(len(t),2*len(df))
for colname in df:
coldf = df[colname]
colt = t[colname]
self.assertEqual(2*len(coldf), len(colt))
self.assertTrue(np.all(coldf==colt[len(df):len(t)]))
#self.scheduler._run_number = 3
t.append(t) # fun test
self.assertEqual(len(t),4*len(df))
for colname in df:
coldf = df[colname]
colt = t[colname]
self.assertEqual(4*len(coldf), len(colt))
self.assertTrue(np.all(colt[0:2*len(df)]==colt[2*len(df):len(t)]))
def append_direct(self):
#pylint: disable=protected-access
d = OrderedDict([('a', [1, 2, 3]), ('b', [0.1, 0.2, 0.3]), ('c', ['a', 'b', 'cd'])])
#print(dshape_extract(d))
df = pd.DataFrame(d)
#self.scheduler._run_number = 1
t = Table('table_3', data=d)
self.assertEqual(len(t),len(df))
for colname in df:
coldf = df[colname]
colt = t[colname]
self.assertEqual(len(coldf), len(colt))
self.assertTrue(np.all(coldf==colt.values))
#self.scheduler._run_number = 2
t.append(d)
self.assertEqual(len(t),2*len(df))
for colname in df:
coldf = df[colname]
colt = t[colname]
self.assertEqual(2*len(coldf), len(colt))
self.assertTrue(np.all(coldf==colt[len(df):len(t)]))
#self.scheduler._run_number = 3
t.append(t) # fun test
self.assertEqual(len(t),4*len(df))
for colname in df:
coldf = df[colname]
colt = t[colname]
self.assertEqual(4*len(coldf), len(colt))
self.assertTrue(np.all(colt[0:2*len(df)]==colt[2*len(df):len(t)]))
def load_csv(self):
module=CSVLoader(filepath_or_buffer=get_dataset('smallfile'),
force_valid_ids=True,
index_col=False,
header=None,
scheduler=self.scheduler)
self.assertTrue(module.result is None)
aio.run(self.scheduler.start(persist=True))
t = module.result
self.assertFalse(t is None)
self.assertEqual(len(t), 30000)
df = pd.read_csv(filepath_or_buffer=get_dataset('smallfile'),
index_col=False,
header=None)
for col in range(t.ncol):
coldf = df[col]
colt = t[col]
self.assertTrue(np.all(coldf==colt.values))
#print(t)
def test_read_direct(self):
t = Table('table_read_direct', dshape="{a: int, b: float32}", create=True)
t.resize(10)
ivalues = np.random.randint(100,size=10)
t['a'] = ivalues
fvalues = np.random.rand(10)
t['b'] = fvalues
a = t['a']
jvalues = np.empty(10, dtype=a.dtype)
a.read_direct(jvalues, np.s_[0:10], np.s_[0:10])
self.assertTrue(np.all(ivalues==jvalues))
b = t['b']
gvalues = np.empty(10, dtype=b.dtype)
b.read_direct(gvalues, np.s_[0:10], np.s_[0:10])
self.assertTrue(np.allclose(fvalues, gvalues))
a.read_direct(jvalues, np.s_[2:7], np.s_[5:10])
self.assertTrue(np.all(ivalues[2:7]==jvalues[5:10]))
b.read_direct(gvalues, np.s_[2:7], np.s_[5:10])
self.assertTrue(np.allclose(fvalues[2:7], gvalues[5:10]))
def test_to_array(self):
t = Table('table_to_array', dshape="{a: int, b: float32, c: real}", create=True)
t.resize(10)
ivalues = np.random.randint(100,size=10)
t['a'] = ivalues
fvalues = np.random.rand(10)
t['b'] = fvalues
dvalues = np.random.rand(10)
t['c'] = dvalues
a = t['a']
b = t['b']
c = t['c']
arr = t.to_array()
self.assertEqual(arr.dtype, np.float64)
self.assertEqual(arr.shape[0], t.nrow)
self.assertEqual(arr.shape[1], t.ncol)
self.assertTrue(np.allclose(a[:], arr[:, 0]))
self.assertTrue(np.allclose(b[:], arr[:, 1]))
self.assertTrue(np.allclose(c[:], arr[:, 2]))
# Columns
arr = t.to_array(columns=['a', 'b'])
self.assertEqual(arr.dtype, np.float64)
self.assertEqual(arr.shape[0], t.nrow)
self.assertEqual(arr.shape[1], 2)
self.assertTrue(np.allclose(a[:], arr[:, 0]))
self.assertTrue(np.allclose(b[:], arr[:, 1]))
# Keys
key = slice(2,7)
arr = t.to_array(key)
key = t.id_to_index(key).to_slice_maybe() # slices contain their bounds
self.assertEqual(arr.dtype, np.float64)
self.assertEqual(arr.shape[0], key.stop-key.start)
self.assertEqual(arr.shape[1], 3)
self.assertTrue(np.allclose(a[key], arr[:, 0]))
self.assertTrue(np.allclose(b[key], arr[:, 1]))
self.assertTrue(np.allclose(c[key], arr[:, 2]))
# Keys with fancy indexing
key = [2,4,6,8]
arr = t.to_array(key)
indices = t.id_to_index(key) # slices contain their bounds
self.assertEqual(arr.dtype, np.float64)
self.assertEqual(arr.shape[0], len(indices))
self.assertEqual(arr.shape[1], 3)
self.assertTrue(np.allclose(a[indices], arr[:, 0]))
self.assertTrue(np.allclose(b[indices], arr[:, 1]))
self.assertTrue(np.allclose(c[indices], arr[:, 2]))
#TODO more tests multidimensional columns and deleted rows
def test_convert(self):
arr = np.random.rand(10,5)
t = Table.from_array(arr)
self.assertIsNotNone(t)
self.assertEqual(len(t.columns), arr.shape[1])
self.assertEqual(t.columns, ['_1', '_2', '_3', '_4', '_5'])
arr2 = t.to_array()
self.assertTrue(
|
np.allclose(arr, arr2)
|
numpy.allclose
|
import os
from bisect import bisect_right
import sympy as sp
import numpy as np
from numpy import dot
from common.ffi_wrapper import compile_code, wrap_compiled
from common.sympy_helpers import sympy_into_c
from .chi2_lookup import chi2_ppf
EXTERNAL_PATH = os.path.dirname(os.path.abspath(__file__))
def solve(a, b):
if a.shape[0] == 1 and a.shape[1] == 1:
#assert np.allclose(b/a[0][0], np.linalg.solve(a, b))
return b/a[0][0]
else:
return np.linalg.solve(a, b)
def null(H, eps=1e-12):
u, s, vh = np.linalg.svd(H)
padding = max(0,np.shape(H)[1]-np.shape(s)[0])
null_mask = np.concatenate(((s <= eps), np.ones((padding,),dtype=bool)),axis=0)
null_space = np.compress(null_mask, vh, axis=0)
return np.transpose(null_space)
def gen_code(name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=None, msckf_params=None, maha_test_kinds=[]):
# optional state transition matrix, H modifier
# and err_function if an error-state kalman filter (ESKF)
# is desired. Best described in "Quaternion kinematics
# for the error-state Kalman filter" by <NAME>
if eskf_params:
err_eqs = eskf_params[0]
inv_err_eqs = eskf_params[1]
H_mod_sym = eskf_params[2]
f_err_sym = eskf_params[3]
x_err_sym = eskf_params[4]
else:
nom_x = sp.MatrixSymbol('nom_x',dim_x,1)
true_x = sp.MatrixSymbol('true_x',dim_x,1)
delta_x = sp.MatrixSymbol('delta_x',dim_x,1)
err_function_sym = sp.Matrix(nom_x + delta_x)
inv_err_function_sym = sp.Matrix(true_x - nom_x)
err_eqs = [err_function_sym, nom_x, delta_x]
inv_err_eqs = [inv_err_function_sym, nom_x, true_x]
H_mod_sym = sp.Matrix(np.eye(dim_x))
f_err_sym = f_sym
x_err_sym = x_sym
# This configures the multi-state augmentation
# needed for EKF-SLAM with MSCKF (Mourikis et al 2007)
if msckf_params:
msckf = True
dim_main = msckf_params[0] # size of the main state
dim_augment = msckf_params[1] # size of one augment state chunk
dim_main_err = msckf_params[2]
dim_augment_err = msckf_params[3]
N = msckf_params[4]
feature_track_kinds = msckf_params[5]
assert dim_main + dim_augment*N == dim_x
assert dim_main_err + dim_augment_err*N == dim_err
else:
msckf = False
dim_main = dim_x
dim_augment = 0
dim_main_err = dim_err
dim_augment_err = 0
N = 0
# linearize with jacobians
F_sym = f_err_sym.jacobian(x_err_sym)
for sym in x_err_sym:
F_sym = F_sym.subs(sym, 0)
for i in range(len(obs_eqs)):
obs_eqs[i].append(obs_eqs[i][0].jacobian(x_sym))
if msckf and obs_eqs[i][1] in feature_track_kinds:
obs_eqs[i].append(obs_eqs[i][0].jacobian(obs_eqs[i][2]))
else:
obs_eqs[i].append(None)
# collect sympy functions
sympy_functions = []
# error functions
sympy_functions.append(('err_fun', err_eqs[0], [err_eqs[1], err_eqs[2]]))
sympy_functions.append(('inv_err_fun', inv_err_eqs[0], [inv_err_eqs[1], inv_err_eqs[2]]))
# H modifier for ESKF updates
sympy_functions.append(('H_mod_fun', H_mod_sym, [x_sym]))
# state propagation function
sympy_functions.append(('f_fun', f_sym, [x_sym, dt_sym]))
sympy_functions.append(('F_fun', F_sym, [x_sym, dt_sym]))
# observation functions
for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
sympy_functions.append(('h_%d' % kind, h_sym, [x_sym, ea_sym]))
sympy_functions.append(('H_%d' % kind, H_sym, [x_sym, ea_sym]))
if msckf and kind in feature_track_kinds:
sympy_functions.append(('He_%d' % kind, He_sym, [x_sym, ea_sym]))
# Generate and wrap all th c code
header, code = sympy_into_c(sympy_functions)
extra_header = "#define DIM %d\n" % dim_x
extra_header += "#define EDIM %d\n" % dim_err
extra_header += "#define MEDIM %d\n" % dim_main_err
extra_header += "typedef void (*Hfun)(double *, double *, double *);\n"
extra_header += "\nvoid predict(double *x, double *P, double *Q, double dt);"
extra_post = ""
for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
if msckf and kind in feature_track_kinds:
He_str = 'He_%d' % kind
# ea_dim = ea_sym.shape[0]
else:
He_str = 'NULL'
# ea_dim = 1 # not really dim of ea but makes c function work
maha_thresh = chi2_ppf(0.95, int(h_sym.shape[0])) # mahalanobis distance for outlier detection
maha_test = kind in maha_test_kinds
extra_post += """
void update_%d(double *in_x, double *in_P, double *in_z, double *in_R, double *in_ea) {
update<%d,%d,%d>(in_x, in_P, h_%d, H_%d, %s, in_z, in_R, in_ea, MAHA_THRESH_%d);
}
""" % (kind, h_sym.shape[0], 3, maha_test, kind, kind, He_str, kind)
extra_header += "\nconst static double MAHA_THRESH_%d = %f;" % (kind, maha_thresh)
extra_header += "\nvoid update_%d(double *, double *, double *, double *, double *);" % kind
code += "\n" + extra_header
code += "\n" + open(os.path.join(EXTERNAL_PATH, "ekf_c.c")).read()
code += "\n" + extra_post
header += "\n" + extra_header
compile_code(name, code, header, EXTERNAL_PATH)
class EKF_sym():
def __init__(self, name, Q, x_initial, P_initial, dim_main, dim_main_err,
N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[]):
'''
Generates process function and all
observation functions for the kalman
filter.
'''
if N > 0:
self.msckf = True
else:
self.msckf = False
self.N = N
self.dim_augment = dim_augment
self.dim_augment_err = dim_augment_err
self.dim_main = dim_main
self.dim_main_err = dim_main_err
# state
x_initial = x_initial.reshape((-1, 1))
self.dim_x = x_initial.shape[0]
self.dim_err = P_initial.shape[0]
assert dim_main + dim_augment*N == self.dim_x
assert dim_main_err + dim_augment_err*N == self.dim_err
# kinds that should get mahalanobis distance
# tested for outlier rejection
self.maha_test_kinds = maha_test_kinds
# process noise
self.Q = Q
# rewind stuff
self.rewind_t = []
self.rewind_states = []
self.rewind_obscache = []
self.init_state(x_initial, P_initial, None)
ffi, lib = wrap_compiled(name, EXTERNAL_PATH)
kinds, self.feature_track_kinds = [], []
for func in dir(lib):
if func[:2] == 'h_':
kinds.append(int(func[2:]))
if func[:3] == 'He_':
self.feature_track_kinds.append(int(func[3:]))
# wrap all the sympy functions
def wrap_1lists(name):
func = eval("lib.%s" % name, {"lib":lib})
def ret(lst1, out):
func(ffi.cast("double *", lst1.ctypes.data),
ffi.cast("double *", out.ctypes.data))
return ret
def wrap_2lists(name):
func = eval("lib.%s" % name, {"lib":lib})
def ret(lst1, lst2, out):
func(ffi.cast("double *", lst1.ctypes.data),
ffi.cast("double *", lst2.ctypes.data),
ffi.cast("double *", out.ctypes.data))
return ret
def wrap_1list_1float(name):
func = eval("lib.%s" % name, {"lib":lib})
def ret(lst1, fl, out):
func(ffi.cast("double *", lst1.ctypes.data),
ffi.cast("double", fl),
ffi.cast("double *", out.ctypes.data))
return ret
self.f = wrap_1list_1float("f_fun")
self.F = wrap_1list_1float("F_fun")
self.err_function = wrap_2lists("err_fun")
self.inv_err_function = wrap_2lists("inv_err_fun")
self.H_mod = wrap_1lists("H_mod_fun")
self.hs, self.Hs, self.Hes = {}, {}, {}
for kind in kinds:
self.hs[kind] = wrap_2lists("h_%d" % kind)
self.Hs[kind] = wrap_2lists("H_%d" % kind)
if self.msckf and kind in self.feature_track_kinds:
self.Hes[kind] = wrap_2lists("He_%d" % kind)
# wrap the C++ predict function
def _predict_blas(x, P, dt):
lib.predict(ffi.cast("double *", x.ctypes.data),
ffi.cast("double *", P.ctypes.data),
ffi.cast("double *", self.Q.ctypes.data),
ffi.cast("double", dt))
return x, P
# wrap the C++ update function
def fun_wrapper(f, kind):
f = eval("lib.%s" % f, {"lib": lib})
def _update_inner_blas(x, P, z, R, extra_args):
f(ffi.cast("double *", x.ctypes.data),
ffi.cast("double *", P.ctypes.data),
ffi.cast("double *", z.ctypes.data),
ffi.cast("double *", R.ctypes.data),
ffi.cast("double *", extra_args.ctypes.data))
if self.msckf and kind in self.feature_track_kinds:
y = z[:-len(extra_args)]
else:
y = z
return x, P, y
return _update_inner_blas
self._updates = {}
for kind in kinds:
self._updates[kind] = fun_wrapper("update_%d" % kind, kind)
def _update_blas(x, P, kind, z, R, extra_args=[]):
return self._updates[kind](x, P, z, R, extra_args)
# assign the functions
self._predict = _predict_blas
#self._predict = self._predict_python
self._update = _update_blas
#self._update = self._update_python
def init_state(self, state, covs, filter_time):
self.x = np.array(state.reshape((-1, 1))).astype(np.float64)
self.P =
|
np.array(covs)
|
numpy.array
|
"""Calculates variable values
Variable Classifications:
* Base variable: A non-gradient variable that is either used as input to MMM,
or is needed to calculate an MMM input variable. Base variables do not
depend on values of gradient or additional variables.
* Gradient variable: A variable that is a normalized gradient, and may be used
as an input to MMM. These variables depend on values of base variables,
but not on values of additional variables.
* Additional variable: A variable that is not needed for MMM input nor input
calculations. Additional variables can depend on both base and gradient
values.
Calculations for base variables try to match the definition used by TRANSP,
whenever possible. In some cases, perfect matches cannot be obtained, since
TRANSP doesn't save every variable we need for calculations. For example,
our vpar calculation is just an approximation for the TRANSP value of vpar,
although the difference should be fairly negligible.
Calculations for additional variables come directly from the MMM source files,
and these sources will be noted as necessary in the calculation functions
below. The names of variables used here will either directly match or
closely resemble the names of variables in MMM.
TODO:
* Consider replacing interp1d with Akima1DInterpolator, since TRANSP
apparently uses this method of interpolation.
"""
# Standard Packages
import sys
import inspect
import functools
# 3rd Party Packages
import numpy as np
from scipy.interpolate import interp1d
# Local Packages
import modules.constants as constants
import modules.datahelper as datahelper
_gradients = set() # Stores the names of calculated gradient variables
def gradient(gvar_name, var_name, drmin, calc_vars):
'''
Calculates the normalized gradient
After the gradient value is calculated, optional smoothing is applied, and
then the gradient is checked for min and nan values. The overall sign of
the gradient equation is determined by the sign given for drmin.
Parameters:
* gvar_name (str): The name of the variable to store the gradient result in
* var_name (str): The name of the variable to take the gradient of
* drmin (np.ndarray): Differential rmin
* calc_vars (InputVariables): Object containing variable data
'''
_gradients.add(gvar_name)
rmaj = calc_vars.rmaj.values
x = calc_vars.x.values[:, 0]
xb = calc_vars.xb.values[:, 0] # includes origin
# get variables related to the gradient from variable names
gvar = getattr(calc_vars, gvar_name)
var = getattr(calc_vars, var_name)
# partial derivative along radial dimension
dxvar = np.diff(var.values, axis=0) / drmin
# interpolate from x grid to xb grid
set_interp = interp1d(x, dxvar, kind='cubic', fill_value="extrapolate", axis=0)
dxvar = set_interp(xb)
# take gradient
gradient_values = rmaj * dxvar / var.values
gvar.set(values=gradient_values, units='')
if calc_vars.options.apply_smoothing:
gvar.apply_smoothing()
gvar.set_origin_to_zero()
gvar.clamp_values(constants.MAX_GRADIENT)
gvar.set_minvalue(ignore_exceptions=calc_vars.options.ignore_exceptions)
gvar.check_for_nan(ignore_exceptions=calc_vars.options.ignore_exceptions)
def calculation(func):
'''
Decorator function that wraps each non-gradient variable calculation
In addition to storing the result of each calculation function to the
corresponding variable object, the calculation decorator adds additional
functionality at the end of each variable calculation as well. In
particular, optional smoothing is applied to the variable, and then the
variable is checked for min and nan values. The units of each calculation
are as specified for each corresponding variable in the InputVariables
class.
Note: The name of the variable functions must match the name of the
variable in the InputVariables class in order for the calculations to
work.
Parameters:
* func (function): Function of the variable to calculate
'''
@functools.wraps(func) # Preserves the name of functions decorated with @calculation
def wrapper(calc_vars):
var = getattr(calc_vars, func.__name__) # Get the variable corresponding to func
var.values = func(calc_vars) # Do the calculation
if calc_vars.options.apply_smoothing:
var.apply_smoothing()
var.set_minvalue(ignore_exceptions=calc_vars.options.ignore_exceptions)
var.check_for_nan(ignore_exceptions=calc_vars.options.ignore_exceptions)
return func
wrapper.calculation = True
return wrapper
def calculation_output(func):
'''
Decorator function that wraps each non-gradient variable calculation
Same as the calculation wrapper, but calculations are made with and stored
in output variables. Output variables will be 1D arrays compared with
the 2D arrays of calc_vars.
Note: The name of the variable functions must match the name of the
variable in the OutputVariables class in order for the calculations to
work.
Parameters:
* func (function): Function of the variable to calculate
'''
@functools.wraps(func) # Preserves the name of functions decorated with @calculation_output
def wrapper(calc_vars, output_vars):
var = getattr(output_vars, func.__name__) # Get the variable corresponding to func
var.values = func(calc_vars, output_vars) # Do the calculation
if output_vars.options.apply_smoothing:
var.apply_smoothing()
var.set_minvalue(ignore_exceptions=calc_vars.options.ignore_exceptions)
var.check_for_nan(ignore_exceptions=calc_vars.options.ignore_exceptions)
return func
wrapper.calculation = True
return wrapper
@calculation
def ahyd(calc_vars):
'''Mean Atomic Mass of Hydrogenic Ions (Hydrogen + Deuterium)'''
nh0 = calc_vars.nh0.values
nd = calc_vars.nd.values
return (nh0 + 2 * nd) / (nh0 + nd)
@calculation
def aimass(calc_vars):
'''Mean Atomic Mass of Thermal Ions'''
ahyd = calc_vars.ahyd.values
aimp = calc_vars.aimp.values
nh = calc_vars.nh.values
nz = calc_vars.nz.values
return (ahyd * nh + aimp * nz) / (nh + nz)
@calculation
def alphamhd(calc_vars):
'''Alpha MHD (Weiland Definition)'''
betae = calc_vars.betae.values
gne = calc_vars.gne.values
gni = calc_vars.gni.values
gte = calc_vars.gte.values
gti = calc_vars.gti.values
q = calc_vars.q.values
te = calc_vars.te.values
ti = calc_vars.ti.values
return q**2 * betae * (gne + gte + ti / te * (gni + gti))
@calculation
def alphamhdunit(calc_vars):
'''Alpha MHD (Weiland Definition)'''
betaeunit = calc_vars.betaeunit.values
gne = calc_vars.gne.values
gni = calc_vars.gni.values
gte = calc_vars.gte.values
gti = calc_vars.gti.values
q = calc_vars.q.values
te = calc_vars.te.values
ti = calc_vars.ti.values
return q**2 * betaeunit * (gne + gte + ti / te * (gni + gti))
@calculation
def beta(calc_vars):
'''Beta'''
zcmu0 = constants.ZCMU0
btor = calc_vars.btor.values
p = calc_vars.p.values
return 2 * zcmu0 * p / btor**2
@calculation
def betae(calc_vars):
'''Electron Beta'''
zckb = constants.ZCKB
zcmu0 = constants.ZCMU0
btor = calc_vars.btor.values
ne = calc_vars.ne.values
te = calc_vars.te.values
return 2 * zcmu0 * ne * te * zckb / btor**2
@calculation
def betanorm(calc_vars):
'''Normalized Beta ???'''
pcur = calc_vars.pcur.values
rmin = calc_vars.rmin.values
bz = calc_vars.bz.values
# bt0,"vacuum field at Rmajmp",Tesla = bzxr/Rmajmp
# pav,"vol avg total pressure",Pascals = %time_trace(I1.0,volavg(Ptowb))
# beta1,"Beta-total","" = 2*mu0*pav/(Bt0*Bt0)
# betan,"Beta-N-total","" = beta1/((pcur/1.0e6)/(aminmp*Bt0))
return pcur / rmin / bz / 1e6
@calculation
def betaepunit(calc_vars):
'''Electron Beta Prime'''
alphamhdunit = calc_vars.alphamhdunit.values
q = calc_vars.q.values
return 2.54 * alphamhdunit / (2 * q**2)
@calculation
def betaeunit(calc_vars):
'''Electron Beta'''
zckb = constants.ZCKB
zcmu0 = constants.ZCMU0
bunit = calc_vars.bunit.values
ne = calc_vars.ne.values
te = calc_vars.te.values
return 2 * zcmu0 * ne * te * zckb / bunit**2
@calculation
def bpol(calc_vars):
'''Poloidal Magnetic Field'''
btor = calc_vars.btor.values
q = calc_vars.q.values
rmaj = calc_vars.rmaj.values[-1, :]
rmin = calc_vars.rmin.values
return rmin / rmaj * btor / q
@calculation
def btor(calc_vars):
'''Toroidal Magnetic Field'''
bzxr = calc_vars.bzxr.values
rmaj = calc_vars.rmaj.values
return bzxr / rmaj
@calculation
def bunit(calc_vars):
'''Magnetic Field (unit)'''
btor0 = calc_vars.btor.values[0, :]
rhochi = calc_vars.rhochi.values
rmin = calc_vars.rmin.values
x = calc_vars.x.values[:, 0] # same for all time values
xb = calc_vars.xb.values[:, 0] # same for all time values
drho_drmin = np.diff(rhochi, axis=0) / np.diff(rmin, axis=0)
# interpolate from x grid to xb grid
set_interp = interp1d(x, drho_drmin, kind='cubic', fill_value="extrapolate", axis=0)
dxrho = set_interp(xb)
bunit = np.empty_like(dxrho)
bunit[1:, :] = btor0 * rhochi[1:, :] / rmin[1:, :] * dxrho[1:, :]
bunit[0, :] = bunit[1, :]
return bunit
@calculation
def bunit_btor(calc_vars):
'''Toroidal Magnetic Field'''
bunit = calc_vars.bunit.values
btor = calc_vars.btor.values
return bunit / btor
@calculation
def csound(calc_vars):
'''Sound Speed'''
zckb = constants.ZCKB
zcmp = constants.ZCMP
aimass = calc_vars.aimass.values
te = calc_vars.te.values
return (zckb * te / (zcmp * aimass))**(1 / 2)
@calculation
def csound_a(calc_vars):
'''Sound Frequency'''
csound = calc_vars.csound.values
amin = calc_vars.rmin.values[-1, :]
return csound / amin
@calculation
def curlh(calc_vars):
'''LH Current'''
curdlh = calc_vars.curdlh.values
area = calc_vars.surf.values
rmin = calc_vars.rmin.values
# return curdlh * area
return curdlh * constants.PI * rmin**2
@calculation
def curoh(calc_vars):
'''OH Current'''
curdoh = calc_vars.curdoh.values
area = calc_vars.darea.values
return curdoh * area
@calculation
def e_r_grp(calc_vars):
'''Radial Electric Field (Pressure Term)'''
zce = constants.ZCE
zckb = constants.ZCKB
ni = calc_vars.ni.values
rmin = calc_vars.rmin.values
ti = calc_vars.ti.values
x = calc_vars.x.values[:, 0]
xb = calc_vars.xb.values[:, 0]
zeff = calc_vars.zeff.values
p_i = ti * ni * zckb
drmin = np.diff(rmin, axis=0)
dpdr_x = np.diff(p_i, axis=0) / drmin
# interpolate from x grid to xb grid
set_interp = interp1d(x, dpdr_x, kind='cubic', fill_value="extrapolate", axis=0)
dpdr = set_interp(xb)
# From pt_vflows_mod.f90:
# zE_r_grp(lcentr:lep1) = 1.0 / ( xzeffp(lcentr:lep1,1) * ze * rhoth(lcentr:lep1,2) * 1.0E6 ) * zgrp(lcentr:lep1)
return dpdr / (zeff * zce * ni)
@calculation
def e_r_phi(calc_vars):
'''Radial Electric Field (vtor Term)'''
bpol = calc_vars.bpol.values
omega = calc_vars.omega.values
rmaj = calc_vars.rmaj.values
# From pt_vflows_mod.f90:
# zE_r_phi(lcentr:lep1) = omega(lcentr:lep1,1) * rmjrmp(lcentr:lep1,1) * zcm_to_m * bpol(lcentr:lep1)
return omega * rmaj * bpol
@calculation
def e_r_tht(calc_vars):
'''Radial Electric Field (vpol Term)'''
vpol = calc_vars.vpol.values
bzxr = calc_vars.bzxr.values
rmaj = calc_vars.rmaj.values
# From pt_vflows_mod.f90:
# zE_r_tht(lcentr:lep1) = -1.0 * zvpol(lcentr:lep1) * zcm_to_m * bzxr / rmjrmp(lcentr:lep1,1)
return -vpol * bzxr / rmaj
@calculation
def eps(calc_vars):
'''Inverse Aspect Ratio'''
arat = calc_vars.arat.values
return 1 / arat
@calculation
def epsilonne(calc_vars):
'''Pinch Term'''
gbunit = calc_vars.gbunit.values
gne = calc_vars.gne.values
return 2 * gbunit / gne
@calculation
def etae(calc_vars):
'''etae = gte / gne'''
gte = calc_vars.gte.values
gne = calc_vars.gne.values
return gte / gne
@calculation
def etai(calc_vars):
'''
etae = gti / gni
Note that TRANSP appears to use an entirely different definition of ni
when calculating gni, than it uses for the values of ni itself. As such,
our calculation of etai will generally not match with TRANSP values.
'''
gti = calc_vars.gti.values
gni = calc_vars.gni.values
return gti / gni
@calculation
def gmax(calc_vars):
'''Upper bound for ne, nh, te, and ti gradients in DRBM model (modmmm.f90)'''
eps = calc_vars.eps.values
q = calc_vars.q.values
rmaj = calc_vars.rmaj.values
gyrfi = calc_vars.gyrfi.values
vthi = calc_vars.vthi.values
return rmaj / (vthi / gyrfi * q / eps)
@calculation
def gmaxunit(calc_vars):
'''Upper bound for ne, nh, te, and ti gradients in DRBM model (modmmm.f90)'''
eps = calc_vars.eps.values
q = calc_vars.q.values
rmaj = calc_vars.rmaj.values
gyrfiunit = calc_vars.gyrfiunit.values
vthi = calc_vars.vthi.values
return 2 * rmaj / (vthi / gyrfiunit * q / eps)
@calculation
def gyrfi(calc_vars):
'''Ion Gyrofrequency'''
zce = constants.ZCE
zcmp = constants.ZCMP
aimass = calc_vars.aimass.values
btor = calc_vars.btor.values
return zce * btor / (zcmp * aimass)
@calculation
def gyrfiunit(calc_vars):
'''Ion Gyrofrequency'''
zce = constants.ZCE
zcmp = constants.ZCMP
aimass = calc_vars.aimass.values
bunit = calc_vars.bunit.values
return zce * bunit / (zcmp * aimass)
@calculation
def gyrfe(calc_vars):
'''Electron Gyrofrequency'''
zce = constants.ZCE
zcme = constants.ZCME
btor = calc_vars.btor.values
return zce * btor / zcme
@calculation
def gyrfeunit(calc_vars):
'''Electron Gyrofrequency'''
zce = constants.ZCE
zcme = constants.ZCME
bunit = calc_vars.bunit.values
return zce * bunit / zcme
@calculation
def gxi(calc_vars):
rhochi = calc_vars.rhochi.values
elong = calc_vars.elong.values
rmin = calc_vars.rmin.values
x = calc_vars.x.values[:, 0]
xb = calc_vars.xb.values[:, 0]
drmin = np.diff(calc_vars.rmin.values, axis=0)
dxvar = np.diff(rhochi, axis=0) / drmin
# interpolate from x grid to xb grid
set_interp = interp1d(x, dxvar, kind='cubic', fill_value="extrapolate", axis=0)
dxvar2 = set_interp(xb)
return dxvar2 * rmin[-1, :] * elong[-1, :]**0.5
# return (1 + elong**2 / (2 * elong**2))**0.5
@calculation
def lare(calc_vars):
'''Electron Gyroradius'''
vthe = calc_vars.vthe.values
gyrfe = calc_vars.gyrfe.values
return vthe / gyrfe
@calculation
def lareunit(calc_vars):
'''Electron Gyroradius'''
vthe = calc_vars.vthe.values
gyrfeunit = calc_vars.gyrfeunit.values
return vthe / gyrfeunit
@calculation
def loge(calc_vars):
'''Electron Coulomb Logarithm'''
# TODO: Need to add equations for different TE ranges
ne = calc_vars.ne.values
te = calc_vars.te.values
# NRL Plasma Formulary Definition
# return 37.8 - np.log(ne**(1 / 2) / te)
# TRANSP definition (equivalent)
zeff = calc_vars.zeff.values
return 39.23 - np.log(zeff * ne**(1 / 2) / te)
@calculation
def nh0(calc_vars):
'''Hydrogen Ion Density'''
nd = calc_vars.nd.values
ne = calc_vars.ne.values
nf = calc_vars.nf.values
nz = calc_vars.nz.values
zimp = calc_vars.zimp.values
return ne - zimp * nz - nf - nd
@calculation
def nh(calc_vars):
'''Total Hydrogenic Ion Density'''
nh0 = calc_vars.nh0.values
nd = calc_vars.nd.values
return nh0 + nd
@calculation
def ni(calc_vars):
'''Thermal Ion Density'''
nh = calc_vars.nh.values
nz = calc_vars.nz.values
# TRANSP Definition
return nh + nz
@calculation
def ni2(calc_vars):
'''Thermal Ion Density v2'''
nh = calc_vars.nh.values
nz = calc_vars.nz.values
nf = calc_vars.nf.values
zimp = calc_vars.zimp.values
# TRANSP likely uses this for taking ion density gradients
return nh + zimp**2 * nz + nf
@calculation
def nuei(calc_vars):
'''Collision Frequency'''
zcf = constants.ZCF
ne = calc_vars.ne.values
te = calc_vars.te.values
zeff = calc_vars.zeff.values
loge = calc_vars.loge.values
return zcf * 2**(1 / 2) * ne * loge * zeff / te**(3 / 2)
@calculation
def nuei2(calc_vars):
'''OLD NOTE: Not sure what to call this, but it leads to the approx the correct NUSTI'''
zcf = constants.ZCF
ni = calc_vars.ni.values
ti = calc_vars.ti.values
zeff = calc_vars.zeff.values
loge = calc_vars.loge.values
return zcf * 2**(1 / 2) * ni * loge * zeff / ti**(3 / 2)
@calculation
def nuste(calc_vars):
'''
Electron Collisionality
This is in approximate agreement with NUSTE in TRANSP. One source of the
disagreement is likely because the modmmm7_1.f90 Coulomb logarithm
(loge) does not match perfectly with the TRANSP version (CLOGE). However,
our Coulomb logarithm definition follows from the NRL plasma formulary,
and we feel that it's use is correct here.
'''
eps = calc_vars.eps.values
nuei = calc_vars.nuei.values
q = calc_vars.q.values
rmaj = calc_vars.rmaj.values
vthe = calc_vars.vthe.values
return nuei * eps**(-3 / 2) * q * rmaj / vthe
@calculation
def nusti(calc_vars):
'''
Ion Collisionality
OLD NOTE: This is approximately correct, but agreement is also somewhat
time-dependent. We likely need to use the Coulomb logarithm for ions in
nuei instead of the Coulomb logarithm for electrons.
'''
zcme = constants.ZCME
zcmp = constants.ZCMP
eps = calc_vars.eps.values
q = calc_vars.q.values
nuei2 = calc_vars.nuei2.values
rmaj = calc_vars.rmaj.values
vthi = calc_vars.vthi.values
return nuei2 * eps**(-3 / 2) * q * rmaj / (2 * vthi) * (zcme / zcmp)**(1 / 2)
@calculation
def p(calc_vars):
'''Plasma Pressure'''
zckb = constants.ZCKB
ne = calc_vars.ne.values
ni = calc_vars.ni.values
te = calc_vars.te.values
ti = calc_vars.ti.values
return (ne * te + ni * ti) * zckb
@calculation
def rhochi(calc_vars):
'''
Rho, derived from magnetic flux
'''
pi = constants.PI
btor0 = calc_vars.btor.values[0, :]
bftor = calc_vars.bftor.values
return (bftor / (pi * btor0))**(1 / 2)
@calculation
def rhosunit(calc_vars):
'''
Rhos (Unit)
'''
zckb = constants.ZCKB
zcmp = constants.ZCMP
aimass = calc_vars.aimass.values
te = calc_vars.te.values
gyrfiunit = calc_vars.gyrfiunit.values
zsound = (zckb * te / (zcmp * aimass))**(1 / 2)
return zsound / gyrfiunit
@calculation
def shat(calc_vars):
'''Effective Magnetic Shear'''
elong = calc_vars.elong.values
shear = calc_vars.shear.values
return np.maximum(2 * shear - 1 + (elong * (shear - 1))**2, 0)**(1 / 2)
@calculation
def shat_gxi(calc_vars):
'''Effective Magnetic Shear'''
a = calc_vars.rmin.values[-1, :]
gxi = calc_vars.gxi.values
shear = calc_vars.shear.values
signs = np.ones_like(shear)
signs[shear < 0] = -1
return np.maximum(2 * shear - 1 + ((a * gxi) * (shear - 1))**2, 0)**(1 / 2) * signs
@calculation
def shat_gxi_q(calc_vars):
'''Effective Magnetic Shear'''
q = calc_vars.q.values
shat_gxi = calc_vars.shat_gxi.values
return shat_gxi / q
@calculation
def shear(calc_vars):
'''Magnetic Shear'''
gq = calc_vars.gq.values
rmaj = calc_vars.rmaj.values
rmin = calc_vars.rmin.values
return gq * rmin / rmaj
@calculation
def tau(calc_vars):
'''Temperature Ratio te / ti'''
te = calc_vars.te.values
ti = calc_vars.ti.values
return te / ti
@calculation
def tauh(calc_vars):
'''Temperature Ratio ti / te'''
tau = calc_vars.tau.values
return 1 / tau
@calculation
def vthe(calc_vars):
'''Thermal Velocity of Electrons'''
zckb = constants.ZCKB
zcme = constants.ZCME
te = calc_vars.te.values
return (2 * zckb * te / zcme)**(1 / 2)
@calculation
def vthi(calc_vars):
'''Thermal Velocity of Ions'''
zckb = constants.ZCKB
zcmp = constants.ZCMP
aimass = calc_vars.aimass.values
ti = calc_vars.ti.values
return (2 * zckb * ti / (zcmp * aimass))**(1 / 2)
@calculation
def vpar(calc_vars):
'''Parallel Velocity'''
bpol = calc_vars.bpol.values
btor = calc_vars.btor.values
vpol = calc_vars.vpol.values
vtor = calc_vars.vtor.values
return vtor + vpol * bpol / btor
@calculation
def wbounce(calc_vars):
'''Bounce Frequency'''
rmaj = calc_vars.rmaj.values
rmin = calc_vars.rmin.values
wtransit = calc_vars.wtransit.values
return (rmin / (2 * rmaj))**(1 / 2) * wtransit
@calculation
def wtransit(calc_vars):
'''Transit Frequency'''
vthe = calc_vars.vthe.values
q = calc_vars.q.values
rmaj = calc_vars.rmaj.values
return vthe / (q * rmaj)
@calculation
def wexbs(calc_vars):
'''ExB Shear Rate (adapted from pt_vflows_mod.f90)'''
def dfdr(E_r):
# zE_r(lcp1:lep1)/ ( bpol(lcp1:lep1) * rmjrmp(lcp1:lep1,1) * zcm_to_m )
f = E_r / (bpol * rmaj)
drmin = np.diff(rmin, axis=0)
dfdr_x = np.diff(f, axis=0) / drmin
# interpolate from x grid to xb grid
set_interp = interp1d(x, dfdr_x, kind='cubic', fill_value="extrapolate", axis=0)
return set_interp(xb)
x = calc_vars.x.values[:, 0] # same for all time values
xb = calc_vars.xb.values[:, 0] # same for all time values
bpol = calc_vars.bpol.values
bzxr = calc_vars.bzxr.values
rmaj = calc_vars.rmaj.values
rmin = calc_vars.rmin.values
# Electric Field Components
E_r_phi = calc_vars.e_r_phi.values
E_r_tht = calc_vars.e_r_tht.values
E_r_grp = calc_vars.e_r_grp.values
bratio = rmaj * bpol / (bzxr / rmaj[0, :])
wexbs_phi = np.minimum(bratio * dfdr(E_r_phi), 1e6)
wexbs_tht = np.minimum(bratio * dfdr(E_r_tht), 1e6) * 1e-2
wexbs_grp = np.minimum(bratio * dfdr(E_r_grp), 1e6)
return np.absolute(wexbs_phi + wexbs_tht + wexbs_grp)
@calculation
def xetgm_const(calc_vars):
'''ETGM Diffusivity Factor'''
lareunit = calc_vars.lareunit.values
vthe = calc_vars.vthe.values
gmaxunit = calc_vars.gmaxunit.values
gte = np.maximum(
|
np.minimum(calc_vars.gte.values, gmaxunit)
|
numpy.minimum
|
"""
Define functions needed for the demos.
"""
import numpy as np
from scipy.fftpack import fft2, ifft2, fftshift, ifftshift
from scipy.signal import fftconvolve
from bm3d import gaussian_kernel
def get_psnr(y_est: np.ndarray, y_ref: np.ndarray) -> float:
"""
Return PSNR value for y_est and y_ref presuming the noise-free maximum is 1.
:param y_est: Estimate array
:param y_ref: Noise-free reference
:return: PSNR value
"""
return 10 * np.log10(1 / np.mean(((y_est - y_ref).ravel()) ** 2))
def get_cropped_psnr(y_est: np.ndarray, y_ref: np.ndarray, crop: tuple) -> float:
"""
Return PSNR value for y_est and y_ref presuming the noise-free maximum is 1.
Crop the images before calculating the value by crop.
:param y_est: Estimate array
:param y_ref: Noise-free reference
:param crop: Tuple of crop-x and crop-y from both stides
:return: PSNR value
"""
return get_psnr(
np.atleast_3d(y_est)[crop[0] : -crop[0], crop[1] : -crop[1], :],
np.atleast_3d(y_ref)[crop[0] : -crop[0], crop[1] : -crop[1], :],
)
def get_experiment_kernel(
noise_type: str, noise_var: float, sz: tuple = np.array((101, 101))
):
"""
Get kernel for generating noise from specific experiment from the paper.
:param noise_type: Noise type string, g[0-4](w|)
:param noise_var: noise variance
:param sz: size of image, used only for g4 and g4w
:return: experiment kernel with the l2-norm equal to variance
"""
# if noiseType == gw / g0
kernel = np.array([[1]])
noise_types = ['gw', 'g0', 'g1', 'g2', 'g3', 'g4', 'g1w', 'g2w', 'g3w', 'g4w']
if noise_type not in noise_types:
raise ValueError("Noise type must be one of " + str(noise_types))
if noise_type != "g4" and noise_type != "g4w":
# Crop this size of kernel when generating,
# unless pink noise, in which
# if noiseType == we want to use the full image size
sz = np.array([101, 101])
else:
sz = np.array(sz)
# Sizes for meshgrids
sz2 = -(1 - (sz % 2)) * 1 + np.floor(sz / 2)
sz1 = np.floor(sz / 2)
uu, vv = np.meshgrid(
[i for i in range(-int(sz1[0]), int(sz2[0]) + 1)],
[i for i in range(-int(sz1[1]), int(sz2[1]) + 1)],
)
beta = 0.8
if noise_type[0:2] == 'g1':
# Horizontal line
kernel = np.atleast_2d(16 - abs(np.linspace(1, 31, 31) - 16))
elif noise_type[0:2] == 'g2':
# Circular repeating pattern
scale = 1
dist = uu ** 2 + vv ** 2
kernel = np.cos(
|
np.sqrt(dist)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
#!/usr/bin/env python
#
#The MIT CorrelX Correlator
#
#https://github.com/MITHaystack/CorrelX
#Contact: <EMAIL>
#Project leads: <NAME>, <NAME> Project developer: <NAME>
#
#Copyright 2017 MIT Haystack Observatory
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#------------------------------
#------------------------------
#Project: CorrelX.
#File: lib_fx_stack.py.
#Author: <NAME> (<EMAIL>)
#Description:
"""
CorrelX FX correlation and samples-stack routines.
"""
#History:
#initial version: 2016.10 ajva
#MIT Haystack Observatory
from __future__ import print_function,division
import scipy.fftpack as scfft
import numpy as np
from copy import copy
import os
if os.environ.get("is_legacy"):
from const_mapred import *
from lib_pcal import *
from lib_delay_model import *
from lib_debug import *
from const_performance import *
else:
from cxs.app.helpers.const_debug import DEBUG_HSTACK, DEBUG_LIB_DELAY, DEBUG_FRAC_OVER, DEBUG_GENERAL_R, DEBUG_DELAYS
from cxs.app.helpers.lib_debug import print_debug_r_hstack, print_debug_r_hstack_arrow, print_debug_r_hstack_separator,\
print_debug_r_delays_d, print_debug_r_delays_f
from cxs.app.base.const_mapred import KEY_SEP
from cxs.config.const_ini_files import C_INI_CR_WINDOW_HANNING
from cxs.computation.base.const_performance import USE_FFTW, THREADS_FFTW, USE_NE, USE_MP, THREADS_NE, USE_NE_EXP, \
USE_NE_FRINGE, FULL_TIMESCALE, SAVE_TIME_ROTATIONS, MP_THREADS
from cxs.computation.delay.lib_delay_model import DIFF_POLY, get_delay_val, get_full_frac_val
from cxs.computation.pcal.lib_pcal import accumulate_pcal_all
if USE_FFTW:
import pyfftw
pyfftw.interfaces.cache.enable()
#pyfftw.interfaces.cache.set_keepalive_time(20)
# use numexpr
if USE_NE:
import numexpr as ne
ne.set_num_threads(THREADS_NE)
# multiprocessing
if USE_MP:
import multiprocessing
###########################################
# Samples stacking
###########################################
def hstack_new_samples(F1_partial,F_ind_partial,F_ind,F1,F_adj_shift_partial,F_stack_shift,F_lti_in,F_first_sample,\
mode_str="",F_frac_over_ind=[]):
"""
Concatenation of new samples with previously saved.
Parameters
----------
F1_partial
previously saved samples.
F_ind_partial
identifiers for each row of saved samples.
F_ind
identifiers for rows of new samples.
F1
new samples.
F_adj_shift_partial
[unused] previously used for keeping track of the number of samples to add/drop, superseded by F_frac_over.
F_refs
indices for F1 based on F1_partial, used for other lists obtained in update_stored_samples(): (F_delay,F_rates,etc).
F_stack_shift
[unused] previously used to keep track of added/dropped samples.
F_lti
list with last sample (l), total number of samples processed (t), invalid samples (i), and adjuted samples for each stream.
F_adj_shift_partial
number of samples that have to be adjusted
F_first_sample
list with first sample corresponding to the streams in F1.
mode_str
"f" for normal operation model, "pcal" for saving the samples for phase calibration.
F_frac_over
list with the number of positions of the samples added/dropped due to fractional sample correction overflow,
see get_frac_over_ind() and fix_frac_over() for more details.
Returns
-------
F1_partial_out
F1_partial with samples from F1 added.
F_ind_partial_out
station-polarization identifiers for the streams in F1_partial_out (same format as in keys).
F_refs_out
indices to F_frac etc structures based on F1_partial.
F_stack_shift_out
[unused] previously used to keep track of added/dropped samples.
F_lti_out
F_lti updated.
F1_out
Emtpy list of arrays.
Notes
-----
|
| **Procedure:**
|
| If there are stored samples, iterate over list of stored samples:
| 1. [currently disabled] Add zero padding if first sample does not match the expected first sample number.
| 2. Concatenate new samples (accesed at F1 via F_ref) with stored samples.
| Otherwise initialize structures and store new samples.
|
|
| **Approximations:**
|
| -Zero-padding currently disabled, new samples are simply stored. This due to the need to debug yet the offsets in the
| update of added/dropped samples from fix_frac_over(). Although these numbers are obtained in the mapper, there may
| be invalid frames that thus leave a gap.
| -It is not checked if the first sample number is lesser than the last sample stored, but this should not be necessary.
|
|
| **Debugging:**
|
| Activate DEBUG_HSTACK for tabulated output of the concatenations.
|
|
| **TO DO:**
|
| Group repeated code, and create functions for managing F_lti.
"""
F_lti=np.copy(F_lti_in)
failed_hstack=0
F_stack_shift_out=F_stack_shift
reset_structures=0
F_ind_partial_out=[]
F1_partial_out=[]
F_lti_out=[]
F_refs_out=list(range(len(F1))) # This is for indices to other structures, to avoid moving data
adjusted=0
for i in range(max(len(F_ind),len(F_ind_partial))):
F_ind_partial_out.append(-1)
F1_partial_out.append(np.array([]).reshape(0))
if i<len(F_lti):
F_lti_out.append(F_lti[i])
else:
F_lti_out.append([-1,0,0,0]) # last, total, invalid, adjusted
if (len(F_ind_partial)==0):
reset_structures=1
else:
#TO DO: resort if necessary based on F_ind and F_ind_partial, and check for errors
# Currently simply checking that stored are new are equal, if not dismiss old data and take new
if F_ind_partial!=[]:
if F_ind_partial!=[]:
for i in range(len(F_ind)):
# Check if missing data
tot_samples=len(F1[i])
last_sample=F_first_sample[i]+tot_samples
# There may be a different sorting due to delay correction...
if F_ind[i] in F_ind_partial:
index_in_partial=F_ind_partial.index(F_ind[i])
diff_first=F_first_sample[i]-(F_lti_out[index_in_partial][0])
# TO DO: they should be equal, add check for other inequality
offset_frac=0
if F_frac_over_ind!=[]:
offset_frac=F_frac_over_ind[i][0]
#diff_first-=offset_frac
#diff_first-=F_lti[index_in_partial][3]
F_lti_out[index_in_partial][3]+=offset_frac
##if diff_first>0:
## TO DO: disabled padding for missing samples...
# # Missing samples
# F1_partial_out[index_in_partial]=np.hstack((F1_partial[index_in_partial],np.zeros(diff_first,dtype=complex),F1[i]))
# F_lti_out[index_in_partial][2]+=diff_first
# print("zR\Warning: Inserted "+str(diff_first)+" samples at ls "+str(last_sample)+" for st "+str(F_ind[i]))
#else:
try:
F1_partial_out[index_in_partial]=np.hstack((F1_partial[index_in_partial],F1[i]))
except IndexError:
print("Failed hstack "+str(F_first_sample[i]))
failed_hstack=1
F_ind_partial_out[index_in_partial]=F_ind[i]
F_refs_out[index_in_partial]=i
if DEBUG_HSTACK:
print_debug_r_hstack(mode_str,index_in_partial,F_ind_partial[index_in_partial],\
F_ind_partial_out[index_in_partial],i,F_ind[i])
F_lti_out[index_in_partial][0]=last_sample
F_lti_out[index_in_partial][1]+=tot_samples
else:
# new record
diff_first=F_first_sample[i]-(F_lti_out[i][0])
offset_frac=0
if F_frac_over_ind!=[]:
offset_frac=F_frac_over_ind[i][0]
#diff_first-=offset_frac
# This record does not exist yet
#diff_first-=F_lti[i][3]
F_lti_out[i][3]+=offset_frac
##if diff_first>0:
## HARDCODED: disabled padding...
# F1_partial_out[i]=np.hstack((np.zeros(diff_first,dtype=complex),F1[i]))
# F_lti_out[i][2]+=diff_first
# #F_lti_out[i][3]+=offset_frac
# print("zR\Warning: Inserted "+str(diff_first)+" samples at ls "+str(last_sample)+" for st "+str(F_ind[i]))
#else:
F1_partial_out[i]=np.copy(F1[i])
F_ind_partial_out[i]=F_ind[i]
F_refs_out[i]=i
if DEBUG_HSTACK:
print_debug_r_hstack(mode_str,i,F_ind_partial[i],F_ind_partial_out[i],i,F_ind[i])
F_lti_out[i][0]=last_sample
F_lti_out[i][1]+=tot_samples
#if F_frac_over_ind!=[]:
# F_lti_out[index_in_partial][3]=offset_frac
else:
reset_structures=1
else:
reset_structures=1
tried=0
if DEBUG_FRAC_OVER:
print("zR"+KEY_SEP+"oai"+str(len(F_stack_shift)).rjust(10)+str(len(F_adj_shift_partial)).rjust(10))
print("zR"+KEY_SEP+"oi".ljust(8)+" "+','.join(map(str,F_stack_shift))+" "+','.join(map(str,F_adj_shift_partial)))
# TO DO: this needs to be checked
if len(F_stack_shift)<len(F_adj_shift_partial):
F_stack_shift_out=[0]*len(F_adj_shift_partial)
if DEBUG_FRAC_OVER:
print("zR"+KEY_SEP+"o T="+str(tried)+",A="+str(adjusted))
if reset_structures:
#F1_partial_out=F1[:]
F_lti_out=[]
for i in range(len(F1)):
F_lti_out.append([0,0,0,0])
offset_frac=0
if F_frac_over_ind!=[]:
offset_frac=F_frac_over_ind[i][0]
F_lti_out[i][3]=offset_frac
tot_samples=len(F1[i])
last_sample=F_first_sample[i]+tot_samples
diff_first=F_first_sample[i]-(F_lti_out[i][0])
##if diff_first>0:
#if 1==0:
# F1_partial_out[i]=np.hstack((np.zeros(diff_first,dtype=complex),F1[i]))
# F_lti_out[i][2]+=diff_first
#else:
F1_partial_out[i]=np.copy(F1[i])
F_ind_partial_out[i]=F_ind[i]
F_refs_out[i]=i
F_lti_out[i][0]=last_sample
F_lti_out[i][1]+=tot_samples
if DEBUG_HSTACK:
print_debug_r_hstack(mode_str,i,None,F_ind_partial_out[i],i,F_ind[i])
F_stack_shift_out=[0]*len(F_ind_partial_out)
#F_ind_partial_out=F_ind[:]
#F_refs_out=list(range(len(F1)))
#if (F_stack_shift!=[])and(F_adj_shift_partial!=[]):
# F_stack_shift_out=F_adj_shift_partial[:]
#else:
# F_stack_shift_out=[0]*len(F_adj_shift_partial)
if DEBUG_FRAC_OVER:
print("zR"+KEY_SEP+"oao"+str(len(F_stack_shift_out)).rjust(10)+str(len(F_adj_shift_partial)).rjust(10))
if DEBUG_HSTACK:
print_debug_r_hstack_arrow(F1_partial,F1,F1_partial_out,F_lti_out)
if DEBUG_HSTACK:
print_debug_r_hstack_separator()
#if mode_str=="f":
# reset F1
len_F1=len(F1)
F1_out=[]
for i in range(len_F1):
F1_out.append(np.array([]))
return([F1_partial_out,F_ind_partial_out,F_refs_out,F_stack_shift_out,F_lti_out,F1_out])
###########################################
# Fractional sample overflow
###########################################
def get_frac_over_ind(F_first_sample,F1,F_rates,F_fs,F_ind,F_side):
"""
Get locations of samples to be added/dropped due to fractional bit shift.
Parameters
----------
F_first_sample
list with the first sample number for each vector of data.
F1
list of vectors with new data for each stpol (only lengths are read).
F_rates
model/clock delay information for computing delays.
F_fs
sampling frequency for each vector with new data.
F_ind
list with identifiers for stations (for logging).
F_side
list with sidebands
Returns
-------
F_frac_over_ind
list of [number_of_samples_to_be_added_or_dropped,[list_of_locations_for_these_changes]] for each element in F1.
Notes
-----
|
| **Algorithm:**
|
| Compute integer+fractional sample delay at both extremes of the vector with data, then find intersection
| with changes in fractional delay (given a fractional sample correction between 0 and 1, it checks for crossing at 0.5).
|
|
| **Debugging:**
|
| Activate DEBUG_FRAC_OVER for tabulated output of the fractional overflow corrections.
|
|
| **TO DO:**
|
| Add support for multiple samples.
"""
F_frac_over_ind=[]
for i in range(len(F1)):
fs=F_fs[i]
Ts=1/fs
[sideband,data_type]=F_side[i]
#sample0=F_first_sample[F_refs[stpol]]-len(F1[F_refs[stpol]])
first_sample=float(F_first_sample[i])
num_samples=len(F1[i])
timescale=Ts*np.array([first_sample,first_sample+num_samples])
[delay_rate_0,delay_rate_1,delay_rate_2,delay_rate_ref,clock_rate_0,\
clock_rate_1,clock_abs_rate_0,clock_abs_rate_1,clock_rate_ref,\
model_only_delay,clock_only_delay,diff_frac]=F_rates[i]
#diff_frac=0
clock_diff = [clock_rate_0,clock_rate_1]
poly_diff = [delay_rate_0,delay_rate_1,delay_rate_2]
clock_abs = [clock_abs_rate_0,clock_abs_rate_1]
seconds_offset=0
[r_recalc,m_unused,c_recalc,r_unused,a_unused] = get_delay_val(\
clock_diff=clock_diff,\
poly_diff=poly_diff,\
seconds_ref_clock=clock_rate_ref,\
seconds_ref_poly=delay_rate_ref,\
seconds=timescale,\
seconds_offset=seconds_offset,\
v=DEBUG_LIB_DELAY,diff_pol=DIFF_POLY)
[full_fractional_recalc_f,fractional_recalc_f] = get_full_frac_val(r_recalc[0],fs,bypass_correction=1)
[full_fractional_recalc_l,fractional_recalc_l] = get_full_frac_val(r_recalc[1],fs,bypass_correction=1)
r_shift_v=[fractional_recalc_f,fractional_recalc_l]
# TO DO: fix this
r_shift_v_int=[0]*len(r_shift_v)
delta_shift=0
delta_shift_frac=0
# TO DO: check this
if r_shift_v[0]<=0.5 and r_shift_v[1]>0.5 and poly_diff[1]>0:
delta_shift=1
#delta_shift=-1
elif r_shift_v[0]>=0.5 and r_shift_v[1]<0.5 and poly_diff[1]<0:
delta_shift=-1
#delta_shift=1
#elif full_fractional_recalc_f//1 != full_fractional_recalc_l//1:
# print("Warning: change in sample not taken into account!")
#if delta_shift!=0:
# if sideband=='L':
# delta_shift=-delta_shift
delta_shift_frac=r_shift_v[1]-r_shift_v[0]
if delta_shift>num_samples:
print("zR\Warning: frac - delta shift too high: "+str(delta_shift)+" for "+str(num_samples)+\
" samples, skipping frac shift at first sample "+str(first_sample))
delta_shift=0
shift_v=[]
if delta_shift==1 or delta_shift==-1:
shift_v.append(int(np.round((0.5-r_shift_v[0])*float(num_samples)/(float(delta_shift_frac)))))
elif delta_shift!=0:
for i_d in range(abs(delta_shift)):
# Linear approx., intersection with 0.5
x=(0.5-r_shift_v[0])*float(num_samples)/(float(delta_shift_frac))
x=int(np.round(x))
shift_v.append(x)
if DIFF_POLY==0:
delta_shift=-delta_shift
F_frac_over_ind.append([delta_shift,shift_v])
fractional_sample_correction=0
#if DEBUG_FRAC_OVER:
# print("zR"+KEY_SEP+"getfr: "+str(F_first_sample)+" "+str(F_frac_over_ind))
return(F_frac_over_ind)
def fix_frac_over(F1,F_frac_over_ind,F_ind,F_first_sample):
"""
Add or drop samples for overflow in fractional sample correction based on info from get_frac_over_ind().
Parameters
----------
F1
list of vectors with new data.
F_frac_over
structure obtained in get_frac_over_ind() with locations of samples to be added/dropped.
F_ind
list of stpols corresponding to F1 (only for logging).
F_first_sample
list of first sample number to F1 (only for logging).
Returns
-------
F1
modified list of vectors with new data.
"""
fixed_frac=0
for i in range(len(F_frac_over_ind)):
update_samp=F_frac_over_ind[i][0]
v_ind=F_frac_over_ind[i][1]
if update_samp!=0:
if fixed_frac==0:
if DEBUG_GENERAL_R:
print("zR\tFixing frac: "+str(F_ind)+" "+str(F_first_sample)+" "+str(F_frac_over_ind))
fixed_frac=1
if update_samp>0:
if len(v_ind)==1:
F1[i]=np.delete(F1[i],v_ind[0])
else:
F1[i]=np.delete(F1[i],v_ind)
else:
if len(v_ind)==1:
F1[i]=np.insert(F1[i],v_ind[0],0)
else:
F1[i]=np.insert(F1[i],v_ind,[0]*len(v_ind))
#if fixed_frac:
# print("zR\tFixed frac: "+str(F_ind)+" "+str(F_first_sample)+" "+str(F_frac_over_ind))
return(F1)
###########################################
# Exponential
###########################################
def get_exp(x):
"""
Get exponential based on fractional part of input, see Output below for details.
Parameters
----------
x : numpy array of float.
Returns
-------
y : 1D numpy array of complex
complex rotation (exponential of 2*j*pi*fractional_part(x))
nr : bool
do not rotate (1 if all elements in y are 1, 0 otherwise)
Notes
-----
|
| **Precision:**
|
| Integer part is removed to avoid problems with precision, rotation (j2pi).
|
|
| **Approximations:**
|
| -numpy.exp is not called in the trivial cases.
| -IMPORTANT!: For arrays of more than 1 element, if the first, second and last elements are equal to zero, then
| all the elements are assumed to be zero too. Need to replace this by some check on the polynomial.
"""
nr=0
# Check if it is only one element
if len(x)==1:
# Check if no rotation
if x==0:
y=1+0j
nr=1
else:
if USE_NE_EXP:
modf_val= np.modf(x)[0]
pi_val = np.pi
y=ne.evaluate("exp(1j*2*pi_val*modf_val)")
else:
y=np.exp(1j*2*np.pi*np.modf(x)[0])
# (!) Check if first, second and last sample are equal, if so, compute only once and repeat
# TO DO: check this approach
elif x[0]==x[-1] and x[0]==x[1]:
# Check if no rotation
if x[0]==0:
y=1+0j
nr=1
else:
if USE_NE_EXP:
modf_val= np.modf(x[0])[0]
pi_val = np.pi
y=ne.evaluate("exp(1j*2*pi_val*modf_val)")
else:
y=np.exp(1j*2*np.pi*np.modf(x[0])[0])
# Otherwise compute for all samples
#return(np.exp(1j*2*np.pi*np.modf(np.float64(x))[0]))
else:
if USE_NE_EXP:
modf_val= np.modf(x)[0]
pi_val = np.pi
y=ne.evaluate("exp(1j*2*pi_val*modf_val)")
else:
y=np.exp(1j*2*np.pi*np.modf(x)[0])
nr=0
return([y,nr])
#return(np.exp(1j*2*np.pi*np.modf(x)[0]))
def get_rotator(x):
"""
Get a single complex rotator from a list of rotators.
Parameters
----------
x : list of complex.
Returns
-------
rotator
product of the elements in x.
Notes
-----
TO DO: consider removing, devised to apply many rotators, but no longer needed.
"""
rotator=1+0j
for i in x:
rotator=
|
np.multiply(rotator,i)
|
numpy.multiply
|
r"""
Utilities for consistent data preprocessing
"""
from typing import Callable, Mapping, Optional, Tuple, Union
import anndata as ad
import numpy as np
import pandas as pd
import scanpy as sc
import scipy.sparse
import torch
import torch.utils.data
import matplotlib.pyplot as plt
from matplotlib import collections as mc
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.extmath import randomized_svd
import scglue
class Preprocessing:
def __init__(self) -> None:
self.params = {}
def fit_transform(self, adata: ad.AnnData) -> None:
raise NotImplementedError
def transform(self, adata: ad.AnnData) -> None:
raise NotImplementedError
class GEXPreprocessing(Preprocessing):
r"""
hvgs + scale + pca
"""
MERGE_ADT = {
"ADGRG1", "ANPEP", "B3GAT1", "BTLA", "C5AR1", "CCR2", "CCR4", "CCR5",
"CCR6", "CD101", "CD109", "CD14", "CD151", "CD163", "CD19", "CD1C",
"CD1D", "CD2", "CD200", "CD22", "CD226", "CD24", "CD244", "CD27",
"CD274", "CD28", "CD33", "CD36", "CD37", "CD38", "CD3D", "CD4", "CD40",
"CD40LG", "CD44", "CD47", "CD48", "CD5", "CD52", "CD55", "CD58", "CD63",
"CD69", "CD7", "CD72", "CD74", "CD79B", "CD81", "CD82", "CD83", "CD84",
"CD86", "CD8A", "CD9", "CD93", "CD99", "CLEC12A", "CLEC1B", "CLEC4C",
"CR1", "CR2", "CSF1R", "CSF2RA", "CSF2RB", "CTLA4", "CX3CR1", "CXCR3",
"CXCR5", "DPP4", "ENG", "ENTPD1", "F3", "FAS", "FCER1A", "FCER2",
"FCGR1A", "FCGR2A", "FCGR3A", "GGT1", "GP1BB", "HLA-A", "HLA-DRA",
"HLA-E", "ICAM1", "ICOS", "ICOSLG", "IFNGR1", "IGHD", "IGHE", "IGHM",
"IGKC", "IL2RA", "IL2RB", "IL3RA", "IL4R", "IL7R", "ITGA1", "ITGA2",
"ITGA2B", "ITGA4", "ITGA6", "ITGAE", "ITGAL", "ITGAM", "ITGAX", "ITGB1",
"ITGB2", "ITGB3", "ITGB7", "KIR2DL1", "KIR2DL3", "KIR3DL1", "KLRB1",
"KLRD1", "KLRF1", "KLRG1", "KLRK1", "LAG3", "LAIR1", "LAMP1", "LILRB1",
"MCAM", "MS4A1", "NCAM1", "NCR1", "NECTIN2", "NRP1", "NT5E", "OLR1",
"PDCD1", "PDPN", "PECAM1", "PTPRC", "PVR", "SELL", "SELP", "SELPLG",
"SIGLEC1", "SIGLEC7", "SIRPA", "SLAMF1", "SLAMF6", "SLAMF7", "SPN",
"TFRC", "THBD", "TIGIT", "TNFRSF13B", "TNFRSF13C", "TNFRSF14",
"TNFRSF4", "TNFRSF9", "TRAV7", "TRDV2", "TREM1"
}
def __init__(
self, n_comps: int = 100, n_genes: int = 2000,
merge_adt: bool = False
) -> None:
super().__init__()
self.n_comps = n_comps
self.n_genes = n_genes
self.merge_adt = merge_adt
def fit_transform(self, adata: ad.AnnData) -> None:
sc.pp.highly_variable_genes(
adata, layer="counts", n_top_genes=self.n_genes,
flavor="seurat_v3", batch_key="batch"
)
if self.merge_adt:
adata.var["highly_variable"] = [
highly_variable or var_name in self.MERGE_ADT
for var_name, highly_variable in
zip(adata.var_names, adata.var["highly_variable"])
]
features = adata.var.query("highly_variable").index.tolist()
hvg = set(features)
X = adata[:, features].X
if scipy.sparse.issparse(X):
mean = X.mean(axis=0).A1
std = np.sqrt(X.power(2).mean(axis=0).A1 - np.square(mean))
X = (X.toarray() - mean) / std
else:
mean = X.mean(axis=0)
std = np.sqrt(X.square().mean(axis=0) - np.square(mean))
X = (X - mean) / std
X = X.clip(-10, 10)
u, s, vh = randomized_svd(X.T @ X, self.n_comps, n_iter=15, random_state=0)
adata.obsm["X_pca"] = X @ vh.T
self.params["features"] = features
self.params["hvg"] = hvg
self.params["mean"] = mean
self.params["std"] = std
self.params["vh"] = vh
def transform(self, adata: ad.AnnData) -> None:
features = self.params["features"]
hvg = self.params["hvg"]
mean = self.params["mean"]
std = self.params["std"]
vh = self.params["vh"]
adata.var["highly_variable"] = [i in hvg for i in adata.var_names]
X = adata[:, features].X
if scipy.sparse.issparse(X):
X = (X.toarray() - mean) / std
else:
X = (X - mean) / std
X = X.clip(-10, 10)
adata.obsm["X_pca"] = X @ vh.T
class ATACPreprocessing(Preprocessing):
r"""
tfidf + normalize + log1p + svd + standardize
"""
def __init__(self, n_comps: int = 100, n_peaks: int = 30000) -> None:
super().__init__()
self.n_comps = n_comps
self.n_peaks = n_peaks
def fit_transform(self, adata: ad.AnnData) -> None:
top_idx = set(np.argsort(adata.X.sum(axis=0).A1)[-self.n_peaks:])
adata.var["highly_variable"] = [i in top_idx for i in range(adata.n_vars)]
features = adata.var_names.tolist()
hvg = set(adata.var.query("highly_variable").index.tolist())
X = adata[:, features].layers["counts"]
idf = X.shape[0] / X.sum(axis=0).A1
if scipy.sparse.issparse(X):
tf = X.multiply(1 / X.sum(axis=1))
X = tf.multiply(idf)
X = X.multiply(1e4 / X.sum(axis=1))
else:
tf = X / X.sum(axis=1, keepdims=True)
X = tf * idf
X = X * (1e4 / X.sum(axis=1, keepdims=True))
X = np.log1p(X)
u, s, vh = randomized_svd(X, self.n_comps, n_iter=15, random_state=0)
X_lsi = X @ vh.T / s
X_lsi -= X_lsi.mean(axis=1, keepdims=True)
X_lsi /= X_lsi.std(axis=1, ddof=1, keepdims=True)
adata.obsm["X_lsi"] = X_lsi
self.params["features"] = features
self.params["hvg"] = hvg
self.params["idf"] = idf
self.params["vh"] = vh
self.params["s"] = s
def transform(self, adata: ad.AnnData) -> None:
features = self.params["features"]
hvg = self.params["hvg"]
idf = self.params["idf"]
vh = self.params["vh"]
s = self.params["s"]
adata.var["highly_variable"] = [i in hvg for i in adata.var_names]
X = adata[:, features].layers["counts"]
if scipy.sparse.issparse(X):
tf = X.multiply(1 / X.sum(axis=1))
X = tf.multiply(idf)
X = X.multiply(1e4 / X.sum(axis=1))
else:
tf = X / X.sum(axis=1, keepdims=True)
X = tf * idf
X = X * (1e4 / X.sum(axis=1))
X =
|
np.log1p(X)
|
numpy.log1p
|
import numpy as np
import scipy.sparse as sp
from scipy.constants import mu_0
# from dask.distributed import Client, LocalCluster
from .. import Utils
from .. import Problem
from .. import Props
import multiprocessing
import properties
from ..Utils import mkvc, matutils, sdiag
from . import BaseMag as MAG
from .MagAnalytics import spheremodel, CongruousMagBC
import dask
import dask.array as da
from dask.diagnostics import ProgressBar
from scipy.sparse import csr_matrix as csr
import os
class MagneticIntegral(Problem.LinearProblem):
chi, chiMap, chiDeriv = Props.Invertible(
"Magnetic Susceptibility (SI)", default=1.0
)
forwardOnly = False # If false, matrix is store to memory (watch your RAM)
actInd = None #: Active cell indices provided
M = None #: Magnetization matrix provided, otherwise all induced
magType = "H0"
verbose = True # Don't display progress on screen
W = None
gtgdiag = None
n_cpu = None
parallelized = True
max_chunk_size = None
chunk_by_rows = False
coordinate_system = properties.StringChoice(
"Type of coordinate system we are regularizing in",
choices=["cartesian", "spherical"],
default="cartesian",
)
Jpath = "./sensitivity.zarr"
maxRAM = 1 # Maximum memory usage
modelType = properties.StringChoice(
"Type of magnetization model",
choices=["susceptibility", "vector", "amplitude"],
default="susceptibility",
)
def __init__(self, mesh, **kwargs):
assert mesh.dim == 3, "Integral formulation only available for 3D mesh"
Problem.BaseProblem.__init__(self, mesh, **kwargs)
if self.modelType == "vector":
self.magType = "full"
# Find non-zero cells
if getattr(self, "actInd", None) is not None:
if self.actInd.dtype == "bool":
inds = (
np.asarray(
[inds for inds, elem in enumerate(self.actInd, 1) if elem],
dtype=int,
)
- 1
)
else:
inds = self.actInd
else:
inds = np.asarray(range(self.mesh.nC))
self.nC = len(inds)
# Create active cell projector
P = csr(
(np.ones(self.nC), (inds, range(self.nC))), shape=(self.mesh.nC, self.nC)
)
# Create vectors of nodal location
# (lower and upper coners for each cell)
# if isinstance(self.mesh, Mesh.TreeMesh):
# Get upper and lower corners of each cell
bsw = self.mesh.gridCC - self.mesh.h_gridded / 2.0
tne = self.mesh.gridCC + self.mesh.h_gridded / 2.0
xn1, xn2 = bsw[:, 0], tne[:, 0]
yn1, yn2 = bsw[:, 1], tne[:, 1]
self.Yn = P.T * np.c_[mkvc(yn1), mkvc(yn2)]
self.Xn = P.T * np.c_[mkvc(xn1), mkvc(xn2)]
if self.mesh.dim > 2:
zn1, zn2 = bsw[:, 2], tne[:, 2]
self.Zn = P.T * np.c_[mkvc(zn1), mkvc(zn2)]
# else:
# xn = self.mesh.vectorNx
# yn = self.mesh.vectorNy
# zn = self.mesh.vectorNz
# yn2, xn2, zn2 = np.meshgrid(yn[1:], xn[1:], zn[1:])
# yn1, xn1, zn1 = np.meshgrid(yn[:-1], xn[:-1], zn[:-1])
# If equivalent source, use semi-infite prism
# if self.equiSourceLayer:
# zn1 -= 1000.
def fields(self, m):
if self.coordinate_system == "cartesian":
m = self.chiMap * m
else:
m = self.chiMap * (
matutils.atp2xyz(m.reshape((int(len(m) / 3), 3), order="F"))
)
if self.forwardOnly:
# Compute the linear operation without forming the full dense F
return np.array(
self.Intrgl_Fwr_Op(m=m, magType=self.magType), dtype="float"
)
# else:
if getattr(self, "_Mxyz", None) is not None:
vec = dask.delayed(csr.dot)(self.Mxyz, m)
M = da.from_delayed(vec, dtype=float, shape=[m.shape[0]])
fields = da.dot(self.G, M)
else:
fields = da.dot(self.G, m.astype(np.float32))
if self.modelType == "amplitude":
fields = self.calcAmpData(fields)
return fields
def calcAmpData(self, Bxyz):
"""
Compute amplitude of the field
"""
amplitude = da.sum(Bxyz.reshape((3, self.nD), order="F") ** 2.0, axis=0) ** 0.5
return amplitude
@property
def G(self):
if not self.ispaired:
raise Exception("Need to pair!")
if getattr(self, "_G", None) is None:
self._G = self.Intrgl_Fwr_Op(magType=self.magType)
return self._G
@property
def nD(self):
"""
Number of data
"""
self._nD = self.survey.srcField.rxList[0].locs.shape[0]
return self._nD
@property
def ProjTMI(self):
if not self.ispaired:
raise Exception("Need to pair!")
if getattr(self, "_ProjTMI", None) is None:
# Convert Bdecination from north to cartesian
self._ProjTMI = Utils.matutils.dipazm_2_xyz(
self.survey.srcField.param[1], self.survey.srcField.param[2]
)
return self._ProjTMI
def getJtJdiag(self, m, W=None):
"""
Return the diagonal of JtJ
"""
dmudm = self.chiMap.deriv(m)
self._dSdm = None
self._dfdm = None
self.model = m
if (self.gtgdiag is None) and (self.modelType != "amplitude"):
if W is None:
W = np.ones(self.G.shape[1])
self.gtgdiag = np.array(
da.sum(da.power(W[:, None].astype(np.float32) * self.G, 2), axis=0)
)
if self.coordinate_system == "cartesian":
if self.modelType == "amplitude":
return np.sum(
(self.dfdm * sdiag(mkvc(self.gtgdiag) ** 0.5) * dmudm).power(2.0),
axis=0,
)
else:
return mkvc(
np.sum(
(sdiag(mkvc(self.gtgdiag) ** 0.5) * dmudm).power(2.0), axis=0
)
)
else: # spherical
if self.modelType == "amplitude":
return mkvc(
np.sum(
(
(self.dfdm)
* sdiag(mkvc(self.gtgdiag) ** 0.5)
* (self.dSdm * dmudm)
).power(2.0),
axis=0,
)
)
else:
# Japprox = sdiag(mkvc(self.gtgdiag)**0.5*dmudm) * (self.dSdm * dmudm)
return mkvc(
np.sum(
(sdiag(mkvc(self.gtgdiag) ** 0.5) * self.dSdm * dmudm).power(2),
axis=0,
)
)
def getJ(self, m, f=None):
"""
Sensitivity matrix
"""
if self.coordinate_system == "cartesian":
dmudm = self.chiMap.deriv(m)
else: # spherical
dmudm = self.dSdm * self.chiMap.deriv(m)
if self.modelType == "amplitude":
return self.dfdm * da.dot(self.G, dmudm)
else:
prod = dask.delayed(csr.dot)(self.G, dmudm)
return da.from_delayed(
prod, dtype=float, shape=(self.G.shape[0], dmudm.shape[1])
)
def Jvec(self, m, v, f=None):
if self.coordinate_system == "cartesian":
dmudm = self.chiMap.deriv(m)
else:
dmudm = self.dSdm * self.chiMap.deriv(m)
if getattr(self, "_Mxyz", None) is not None:
# dmudm_v = dask.delayed(csr.dot)(dmudm, v)
# vec = dask.delayed(csr.dot)(self.Mxyz, dmudm_v)
M_dmudm_v = da.from_array(self.Mxyz * (dmudm * v), chunks=self.G.chunks[1])
Jvec = da.dot(self.G, M_dmudm_v.astype(np.float32))
else:
dmudm_v = da.from_array(dmudm * v, chunks=self.G.chunks[1])
Jvec = da.dot(self.G, dmudm_v.astype(np.float32))
if self.modelType == "amplitude":
dfdm_Jvec = dask.delayed(csr.dot)(self.dfdm, Jvec)
return da.from_delayed(dfdm_Jvec, dtype=float, shape=[self.dfdm.shape[0]])
else:
return Jvec
def Jtvec(self, m, v, f=None):
if self.coordinate_system == "cartesian":
dmudm = self.chiMap.deriv(m)
else:
dmudm = self.dSdm * self.chiMap.deriv(m)
if self.modelType == "amplitude":
dfdm_v = dask.delayed(csr.dot)(v, self.dfdm)
vec = da.from_delayed(dfdm_v, dtype=float, shape=[self.dfdm.shape[0]])
if getattr(self, "_Mxyz", None) is not None:
jtvec = da.dot(vec.astype(np.float32), self.G)
Jtvec = dask.delayed(csr.dot)(jtvec, self.Mxyz)
else:
Jtvec = da.dot(vec.astype(np.float32), self.G)
else:
Jtvec = da.dot(v.astype(np.float32), self.G)
dmudm_v = dask.delayed(csr.dot)(Jtvec, dmudm)
return da.from_delayed(dmudm_v, dtype=float, shape=[dmudm.shape[1]])
@property
def dSdm(self):
if getattr(self, "_dSdm", None) is None:
if self.model is None:
raise Exception("Requires a chi")
nC = int(len(self.model) / 3)
m_xyz = self.chiMap * matutils.atp2xyz(
self.model.reshape((nC, 3), order="F")
)
nC = int(m_xyz.shape[0] / 3.0)
m_atp = matutils.xyz2atp(m_xyz.reshape((nC, 3), order="F"))
a = m_atp[:nC]
t = m_atp[nC : 2 * nC]
p = m_atp[2 * nC :]
Sx = sp.hstack(
[
sp.diags(np.cos(t) * np.cos(p), 0),
sp.diags(-a * np.sin(t) * np.cos(p), 0),
sp.diags(-a * np.cos(t) * np.sin(p), 0),
]
)
Sy = sp.hstack(
[
sp.diags(np.cos(t) * np.sin(p), 0),
sp.diags(-a * np.sin(t) * np.sin(p), 0),
sp.diags(a * np.cos(t) * np.cos(p), 0),
]
)
Sz = sp.hstack(
[sp.diags(np.sin(t), 0), sp.diags(a * np.cos(t), 0), csr((nC, nC))]
)
self._dSdm = sp.vstack([Sx, Sy, Sz])
return self._dSdm
@property
def modelMap(self):
"""
Call for general mapping of the problem
"""
return self.chiMap
@property
def dfdm(self):
if self.model is None:
self.model = np.zeros(self.G.shape[1])
if getattr(self, "_dfdm", None) is None:
Bxyz = self.Bxyz_a(self.chiMap * self.model)
# Bx = sp.spdiags(Bxyz[:, 0], 0, self.nD, self.nD)
# By = sp.spdiags(Bxyz[:, 1], 0, self.nD, self.nD)
# Bz = sp.spdiags(Bxyz[:, 2], 0, self.nD, self.nD)
ii = np.kron(np.asarray(range(self.survey.nD), dtype="int"), np.ones(3))
jj = np.asarray(range(3 * self.survey.nD), dtype="int")
# (data, (row, col)), shape=(3, 3))
# P = s
self._dfdm = csr(
(mkvc(Bxyz), (ii, jj)), shape=(self.survey.nD, 3 * self.survey.nD)
)
return self._dfdm
def Bxyz_a(self, m):
"""
Return the normalized B fields
"""
# Get field data
if self.coordinate_system == "spherical":
m = matutils.atp2xyz(m)
if getattr(self, "_Mxyz", None) is not None:
Bxyz = da.dot(self.G, (self.Mxyz * m).astype(np.float32))
else:
Bxyz = da.dot(self.G, m.astype(np.float32))
amp = self.calcAmpData(Bxyz.astype(np.float64))
Bamp = sp.spdiags(1.0 / amp, 0, self.nD, self.nD)
return Bxyz.reshape((3, self.nD), order="F") * Bamp
def Intrgl_Fwr_Op(self, m=None, magType="H0"):
"""
Magnetic forward operator in integral form
magType = 'H0' | 'x' | 'y' | 'z'
components = 'tmi' | 'x' | 'y' | 'z'
Return
_G = Linear forward operator | (forwardOnly)=data
"""
# if m is not None:
# self.model = self.chiMap * m
# survey = self.survey
self.rxLoc = self.survey.srcField.rxList[0].locs
if magType == "H0":
if getattr(self, "M", None) is None:
self.M = matutils.dipazm_2_xyz(
np.ones(self.nC) * self.survey.srcField.param[1],
np.ones(self.nC) * self.survey.srcField.param[2],
)
Mx = sdiag(self.M[:, 0] * self.survey.srcField.param[0])
My = sdiag(self.M[:, 1] * self.survey.srcField.param[0])
Mz = sdiag(self.M[:, 2] * self.survey.srcField.param[0])
self.Mxyz = sp.vstack((Mx, My, Mz))
elif magType == "full":
self.Mxyz = sp.identity(3 * self.nC) * self.survey.srcField.param[0]
else:
raise Exception('magType must be: "H0" or "full"')
# Loop through all observations and create forward operator (nD-by-self.nC)
if self.verbose:
print(
"Begin forward: M=" + magType + ", Rx type= %s" % self.survey.components
)
# Switch to determine if the process has to be run in parallel
job = Forward(
rxLoc=self.rxLoc,
Xn=self.Xn,
Yn=self.Yn,
Zn=self.Zn,
n_cpu=self.n_cpu,
forwardOnly=self.forwardOnly,
model=m,
components=self.survey.components,
Mxyz=self.Mxyz,
P=self.ProjTMI,
parallelized=self.parallelized,
verbose=self.verbose,
Jpath=self.Jpath,
maxRAM=self.maxRAM,
max_chunk_size=self.max_chunk_size,
chunk_by_rows=self.chunk_by_rows,
)
G = job.calculate()
return G
class Forward:
progressIndex = -1
parallelized = True
rxLoc = None
Xn, Yn, Zn = None, None, None
n_cpu = None
forwardOnly = False
components = ["tmi"]
model = None
Mxyz = None
P = None
verbose = True
maxRAM = 1
chunk_by_rows = False
max_chunk_size = None
Jpath = "./sensitivity.zarr"
def __init__(self, **kwargs):
super().__init__()
Utils.setKwargs(self, **kwargs)
def calculate(self):
self.nD = self.rxLoc.shape[0]
self.nC = self.Mxyz.shape[1]
if self.n_cpu is None:
self.n_cpu = int(multiprocessing.cpu_count())
# Set this early so we can get a better memory estimate for dask chunking
# if self.components == 'xyz':
# nDataComps = 3
# else:
nDataComps = len(self.components)
if self.parallelized:
row = dask.delayed(self.calcTrow, pure=True)
makeRows = [row(self.rxLoc[ii, :]) for ii in range(self.nD)]
buildMat = [
da.from_delayed(makeRow, dtype=np.float32, shape=(nDataComps, self.nC))
for makeRow in makeRows
]
stack = da.vstack(buildMat)
# Auto rechunk
# To customise memory use set Dask config in calling scripts: dask.config.set({'array.chunk-size': '128MiB'})
if self.forwardOnly or self.chunk_by_rows:
label = "DASK: Chunking by rows"
# Autochunking by rows is faster and more memory efficient for
# very large problems sensitivty and forward calculations
target_size = dask.config.get("array.chunk-size").replace("MiB", " MB")
stack = stack.rechunk({0: "auto", 1: -1})
elif self.max_chunk_size:
label = "DASK: Chunking using parameters"
# Manual chunking is less sensitive to chunk sizes for some problems
target_size = f"{self.max_chunk_size:.0f} MB"
nChunks_col = 1
nChunks_row = 1
rowChunk = int(np.ceil(stack.shape[0] / nChunks_row))
colChunk = int(np.ceil(stack.shape[1] / nChunks_col))
chunk_size = rowChunk * colChunk * 8 * 1e-6 # in Mb
# Add more chunks until memory falls below target
while chunk_size >= self.max_chunk_size:
if rowChunk > colChunk:
nChunks_row += 1
else:
nChunks_col += 1
rowChunk = int(np.ceil(stack.shape[0] / nChunks_row))
colChunk = int(np.ceil(stack.shape[1] / nChunks_col))
chunk_size = rowChunk * colChunk * 8 * 1e-6 # in Mb
stack = stack.rechunk((rowChunk, colChunk))
else:
label = "DASK: Chunking by columns"
# Autochunking by columns is faster for Inversions
target_size = dask.config.get("array.chunk-size").replace("MiB", " MB")
stack = stack.rechunk({0: -1, 1: "auto"})
if self.verbose:
print(label)
print("Tile size (nD, nC): ", stack.shape)
# print('Chunk sizes (nD, nC): ', stack.chunks) # For debugging only
print(
"Number of chunks: %.0f x %.0f = %.0f"
% (
len(stack.chunks[0]),
len(stack.chunks[1]),
len(stack.chunks[0]) * len(stack.chunks[1]),
)
)
print("Target chunk size: %s" % target_size)
print(
"Max chunk size %.0f x %.0f = %.3f MB"
% (
max(stack.chunks[0]),
max(stack.chunks[1]),
max(stack.chunks[0]) * max(stack.chunks[1]) * 8 * 1e-6,
)
)
print(
"Min chunk size %.0f x %.0f = %.3f MB"
% (
min(stack.chunks[0]),
min(stack.chunks[1]),
min(stack.chunks[0]) * min(stack.chunks[1]) * 8 * 1e-6,
)
)
print(
"Max RAM (GB x %.0f CPU): %.6f"
% (
self.n_cpu,
max(stack.chunks[0])
* max(stack.chunks[1])
* 8
* 1e-9
* self.n_cpu,
)
)
print(
"Tile size (GB): %.3f"
% (stack.shape[0] * stack.shape[1] * 8 * 1e-9)
)
if self.forwardOnly:
with ProgressBar():
print("Forward calculation: ")
pred = da.dot(stack, self.model).compute()
return pred
else:
if os.path.exists(self.Jpath):
G = da.from_zarr(self.Jpath)
if np.all(
np.r_[
np.any(np.r_[G.chunks[0]] == stack.chunks[0]),
np.any(np.r_[G.chunks[1]] == stack.chunks[1]),
np.r_[G.shape] == np.r_[stack.shape],
]
):
# Check that loaded G matches supplied data and mesh
print(
"Zarr file detected with same shape and chunksize ... re-loading"
)
return G
else:
print(
"Zarr file detected with wrong shape and chunksize ... over-writing"
)
with ProgressBar():
print("Saving G to zarr: " + self.Jpath)
G = da.to_zarr(
stack,
self.Jpath,
compute=True,
return_stored=True,
overwrite=True,
)
else:
result = []
for ii in range(self.nD):
if self.forwardOnly:
result += [
np.c_[np.dot(self.calcTrow(self.rxLoc[ii, :]), self.model)]
]
else:
result += [self.calcTrow(self.rxLoc[ii, :])]
self.progress(ii, self.nD)
G = np.vstack(result)
return G
def calcTrow(self, xyzLoc):
"""
Load in the active nodes of a tensor mesh and computes the magnetic
forward relation between a cuboid and a given observation
location outside the Earth [obsx, obsy, obsz]
INPUT:
xyzLoc: [obsx, obsy, obsz] nC x 3 Array
OUTPUT:
Tx = [Txx Txy Txz]
Ty = [Tyx Tyy Tyz]
Tz = [Tzx Tzy Tzz]
"""
rows = calcRow(
self.Xn, self.Yn, self.Zn, xyzLoc, self.P, components=self.components
)
return rows * self.Mxyz
def progress(self, ind, total):
"""
progress(ind,prog,final)
Function measuring the progress of a process and print to screen the %.
Useful to estimate the remaining runtime of a large problem.
Created on Dec, 20th 2015
@author: dominiquef
"""
arg = np.floor(ind / total * 10.0)
if arg > self.progressIndex:
if self.verbose:
print("Done " + str(arg * 10) + " %")
self.progressIndex = arg
class Problem3D_DiffSecondary(Problem.BaseProblem):
"""
Secondary field approach using differential equations!
"""
surveyPair = MAG.BaseMagSurvey
modelPair = MAG.BaseMagMap
mu, muMap, muDeriv = Props.Invertible("Magnetic Permeability (H/m)", default=mu_0)
mui, muiMap, muiDeriv = Props.Invertible("Inverse Magnetic Permeability (m/H)")
Props.Reciprocal(mu, mui)
def __init__(self, mesh, **kwargs):
Problem.BaseProblem.__init__(self, mesh, **kwargs)
Pbc, Pin, self._Pout = self.mesh.getBCProjWF("neumann", discretization="CC")
Dface = self.mesh.faceDiv
Mc = sdiag(self.mesh.vol)
self._Div = Mc * Dface * Pin.T * Pin
@property
def MfMuI(self):
return self._MfMuI
@property
def MfMui(self):
return self._MfMui
@property
def MfMu0(self):
return self._MfMu0
def makeMassMatrices(self, m):
mu = self.muMap * m
self._MfMui = self.mesh.getFaceInnerProduct(1.0 / mu) / self.mesh.dim
# self._MfMui = self.mesh.getFaceInnerProduct(1./mu)
# TODO: this will break if tensor mu
self._MfMuI = sdiag(1.0 / self._MfMui.diagonal())
self._MfMu0 = self.mesh.getFaceInnerProduct(1.0 / mu_0) / self.mesh.dim
# self._MfMu0 = self.mesh.getFaceInnerProduct(1/mu_0)
@Utils.requires("survey")
def getB0(self):
b0 = self.survey.B0
B0 = np.r_[
b0[0] * np.ones(self.mesh.nFx),
b0[1] * np.ones(self.mesh.nFy),
b0[2] * np.ones(self.mesh.nFz),
]
return B0
def getRHS(self, m):
r"""
.. math ::
\mathbf{rhs} = \Div(\MfMui)^{-1}\mathbf{M}^f_{\mu_0^{-1}}\mathbf{B}_0 - \Div\mathbf{B}_0+\diag(v)\mathbf{D} \mathbf{P}_{out}^T \mathbf{B}_{sBC}
"""
B0 = self.getB0()
Dface = self.mesh.faceDiv
# Mc = sdiag(self.mesh.vol)
mu = self.muMap * m
chi = mu / mu_0 - 1
# Temporary fix
Bbc, Bbc_const = CongruousMagBC(self.mesh, self.survey.B0, chi)
self.Bbc = Bbc
self.Bbc_const = Bbc_const
# return self._Div*self.MfMuI*self.MfMu0*B0 - self._Div*B0 +
# Mc*Dface*self._Pout.T*Bbc
return self._Div * self.MfMuI * self.MfMu0 * B0 - self._Div * B0
def getA(self, m):
r"""
GetA creates and returns the A matrix for the Magnetics problem
The A matrix has the form:
.. math ::
\mathbf{A} = \Div(\MfMui)^{-1}\Div^{T}
"""
return self._Div * self.MfMuI * self._Div.T
def fields(self, m):
r"""
Return magnetic potential (u) and flux (B)
u: defined on the cell center [nC x 1]
B: defined on the cell center [nG x 1]
After we compute u, then we update B.
.. math ::
\mathbf{B}_s = (\MfMui)^{-1}\mathbf{M}^f_{\mu_0^{-1}}\mathbf{B}_0-\mathbf{B}_0 -(\MfMui)^{-1}\Div^T \mathbf{u}
"""
self.makeMassMatrices(m)
A = self.getA(m)
rhs = self.getRHS(m)
m1 = sp.linalg.interface.aslinearoperator(sdiag(1 / A.diagonal()))
u, info = sp.linalg.bicgstab(A, rhs, tol=1e-6, maxiter=1000, M=m1)
B0 = self.getB0()
B = self.MfMuI * self.MfMu0 * B0 - B0 - self.MfMuI * self._Div.T * u
return {"B": B, "u": u}
@Utils.timeIt
def Jvec(self, m, v, u=None):
"""
Computing Jacobian multiplied by vector
By setting our problem as
.. math ::
\\mathbf{C}(\\mathbf{m}, \\mathbf{u}) = \\mathbf{A}\\mathbf{u} - \\mathbf{rhs} = 0
And taking derivative w.r.t m
.. math ::
\\nabla \\mathbf{C}(\\mathbf{m}, \\mathbf{u}) = \\nabla_m \\mathbf{C}(\\mathbf{m}) \\delta \\mathbf{m} +
\\nabla_u \\mathbf{C}(\\mathbf{u}) \\delta \\mathbf{u} = 0
\\frac{\\delta \\mathbf{u}}{\\delta \\mathbf{m}} = - [\\nabla_u \\mathbf{C}(\\mathbf{u})]^{-1}\\nabla_m \\mathbf{C}(\\mathbf{m})
With some linear algebra we can have
.. math ::
\\nabla_u \\mathbf{C}(\\mathbf{u}) = \\mathbf{A}
\\nabla_m \\mathbf{C}(\\mathbf{m}) =
\\frac{\\partial \\mathbf{A}}{\\partial \\mathbf{m}}(\\mathbf{m})\\mathbf{u} - \\frac{\\partial \\mathbf{rhs}(\\mathbf{m})}{\\partial \\mathbf{m}}
.. math ::
\\frac{\\partial \\mathbf{A}}{\\partial \\mathbf{m}}(\\mathbf{m})\\mathbf{u} =
\\frac{\\partial \\mathbf{\\mu}}{\\partial \\mathbf{m}} \\left[\\Div \\diag (\\Div^T \\mathbf{u}) \\dMfMuI \\right]
\\dMfMuI = \\diag(\\MfMui)^{-1}_{vec} \\mathbf{Av}_{F2CC}^T\\diag(\\mathbf{v})\\diag(\\frac{1}{\\mu^2})
\\frac{\\partial \\mathbf{rhs}(\\mathbf{m})}{\\partial \\mathbf{m}} = \\frac{\\partial \\mathbf{\\mu}}{\\partial \\mathbf{m}} \\left[
\\Div \\diag(\\M^f_{\\mu_{0}^{-1}}\\mathbf{B}_0) \\dMfMuI \\right] - \\diag(\\mathbf{v})\\mathbf{D} \\mathbf{P}_{out}^T\\frac{\\partial B_{sBC}}{\\partial \\mathbf{m}}
In the end,
.. math ::
\\frac{\\delta \\mathbf{u}}{\\delta \\mathbf{m}} =
- [ \\mathbf{A} ]^{-1}\\left[ \\frac{\\partial \\mathbf{A}}{\\partial \\mathbf{m}}(\\mathbf{m})\\mathbf{u}
- \\frac{\\partial \\mathbf{rhs}(\\mathbf{m})}{\\partial \\mathbf{m}} \\right]
A little tricky point here is we are not interested in potential (u), but interested in magnetic flux (B).
Thus, we need sensitivity for B. Now we take derivative of B w.r.t m and have
.. math ::
\\frac{\\delta \\mathbf{B}} {\\delta \\mathbf{m}} = \\frac{\\partial \\mathbf{\\mu} } {\\partial \\mathbf{m} }
\\left[
\\diag(\\M^f_{\\mu_{0}^{-1} } \\mathbf{B}_0) \\dMfMuI \\
- \\diag (\\Div^T\\mathbf{u})\\dMfMuI
\\right ]
- (\\MfMui)^{-1}\\Div^T\\frac{\\delta\\mathbf{u}}{\\delta \\mathbf{m}}
Finally we evaluate the above, but we should remember that
.. note ::
We only want to evalute
.. math ::
\\mathbf{J}\\mathbf{v} = \\frac{\\delta \\mathbf{P}\\mathbf{B}} {\\delta \\mathbf{m}}\\mathbf{v}
Since forming sensitivity matrix is very expensive in that this monster is "big" and "dense" matrix!!
"""
if u is None:
u = self.fields(m)
B, u = u["B"], u["u"]
mu = self.muMap * (m)
dmudm = self.muDeriv
# dchidmu = sdiag(1 / mu_0 * np.ones(self.mesh.nC))
vol = self.mesh.vol
Div = self._Div
Dface = self.mesh.faceDiv
P = self.survey.projectFieldsDeriv(B) # Projection matrix
B0 = self.getB0()
MfMuIvec = 1 / self.MfMui.diagonal()
dMfMuI = sdiag(MfMuIvec ** 2) * self.mesh.aveF2CC.T * sdiag(vol * 1.0 / mu ** 2)
# A = self._Div*self.MfMuI*self._Div.T
# RHS = Div*MfMuI*MfMu0*B0 - Div*B0 + Mc*Dface*Pout.T*Bbc
# C(m,u) = A*m-rhs
# dudm = -(dCdu)^(-1)dCdm
dCdu = self.getA(m)
dCdm_A = Div * (sdiag(Div.T * u) * dMfMuI * dmudm)
dCdm_RHS1 = Div * (sdiag(self.MfMu0 * B0) * dMfMuI)
# temp1 = (Dface * (self._Pout.T * self.Bbc_const * self.Bbc))
# dCdm_RHS2v = (sdiag(vol) * temp1) * \
# np.inner(vol, dchidmu * dmudm * v)
# dCdm_RHSv = dCdm_RHS1*(dmudm*v) + dCdm_RHS2v
dCdm_RHSv = dCdm_RHS1 * (dmudm * v)
dCdm_v = dCdm_A * v - dCdm_RHSv
m1 = sp.linalg.interface.aslinearoperator(sdiag(1 / dCdu.diagonal()))
sol, info = sp.linalg.bicgstab(dCdu, dCdm_v, tol=1e-6, maxiter=1000, M=m1)
if info > 0:
print("Iterative solver did not work well (Jvec)")
# raise Exception ("Iterative solver did not work well")
# B = self.MfMuI*self.MfMu0*B0-B0-self.MfMuI*self._Div.T*u
# dBdm = d\mudm*dBd\mu
dudm = -sol
dBdmv = (
sdiag(self.MfMu0 * B0) * (dMfMuI * (dmudm * v))
- sdiag(Div.T * u) * (dMfMuI * (dmudm * v))
- self.MfMuI * (Div.T * (dudm))
)
return mkvc(P * dBdmv)
@Utils.timeIt
def Jtvec(self, m, v, u=None):
"""
Computing Jacobian^T multiplied by vector.
.. math ::
(\\frac{\\delta \\mathbf{P}\\mathbf{B}} {\\delta \\mathbf{m}})^{T} = \\left[ \\mathbf{P}_{deriv}\\frac{\\partial \\mathbf{\\mu} } {\\partial \\mathbf{m} }
\\left[
\\diag(\\M^f_{\\mu_{0}^{-1} } \\mathbf{B}_0) \\dMfMuI \\
- \\diag (\\Div^T\\mathbf{u})\\dMfMuI
\\right ]\\right]^{T}
- \\left[\\mathbf{P}_{deriv}(\\MfMui)^{-1}\\Div^T\\frac{\\delta\\mathbf{u}}{\\delta \\mathbf{m}} \\right]^{T}
where
.. math ::
\\mathbf{P}_{derv} = \\frac{\\partial \\mathbf{P}}{\\partial\\mathbf{B}}
.. note ::
Here we only want to compute
.. math ::
\\mathbf{J}^{T}\\mathbf{v} = (\\frac{\\delta \\mathbf{P}\\mathbf{B}} {\\delta \\mathbf{m}})^{T} \\mathbf{v}
"""
if u is None:
u = self.fields(m)
B, u = u["B"], u["u"]
mu = self.mapping * (m)
dmudm = self.mapping.deriv(m)
# dchidmu = sdiag(1 / mu_0 * np.ones(self.mesh.nC))
vol = self.mesh.vol
Div = self._Div
Dface = self.mesh.faceDiv
P = self.survey.projectFieldsDeriv(B) # Projection matrix
B0 = self.getB0()
MfMuIvec = 1 / self.MfMui.diagonal()
dMfMuI = sdiag(MfMuIvec ** 2) * self.mesh.aveF2CC.T * sdiag(vol * 1.0 / mu ** 2)
# A = self._Div*self.MfMuI*self._Div.T
# RHS = Div*MfMuI*MfMu0*B0 - Div*B0 + Mc*Dface*Pout.T*Bbc
# C(m,u) = A*m-rhs
# dudm = -(dCdu)^(-1)dCdm
dCdu = self.getA(m)
s = Div * (self.MfMuI.T * (P.T * v))
m1 = sp.linalg.interface.aslinearoperator(sdiag(1 / (dCdu.T).diagonal()))
sol, info = sp.linalg.bicgstab(dCdu.T, s, tol=1e-6, maxiter=1000, M=m1)
if info > 0:
print("Iterative solver did not work well (Jtvec)")
# raise Exception ("Iterative solver did not work well")
# dCdm_A = Div * ( sdiag( Div.T * u )* dMfMuI *dmudm )
# dCdm_Atsol = ( dMfMuI.T*( sdiag( Div.T * u ) * (Div.T * dmudm)) ) * sol
dCdm_Atsol = (dmudm.T * dMfMuI.T * (sdiag(Div.T * u) * Div.T)) * sol
# dCdm_RHS1 = Div * (sdiag( self.MfMu0*B0 ) * dMfMuI)
# dCdm_RHS1tsol = (dMfMuI.T*( sdiag( self.MfMu0*B0 ) ) * Div.T * dmudm) * sol
dCdm_RHS1tsol = (dmudm.T * dMfMuI.T * (sdiag(self.MfMu0 * B0)) * Div.T) * sol
# temp1 = (Dface*(self._Pout.T*self.Bbc_const*self.Bbc))
# temp1sol = (Dface.T * (sdiag(vol) * sol))
# temp2 = self.Bbc_const * (self._Pout.T * self.Bbc).T
# dCdm_RHS2v = (sdiag(vol)*temp1)*np.inner(vol, dchidmu*dmudm*v)
# dCdm_RHS2tsol = (dmudm.T * dchidmu.T * vol) * np.inner(temp2, temp1sol)
# dCdm_RHSv = dCdm_RHS1*(dmudm*v) + dCdm_RHS2v
# temporary fix
# dCdm_RHStsol = dCdm_RHS1tsol - dCdm_RHS2tsol
dCdm_RHStsol = dCdm_RHS1tsol
# dCdm_RHSv = dCdm_RHS1*(dmudm*v) + dCdm_RHS2v
# dCdm_v = dCdm_A*v - dCdm_RHSv
Ctv = dCdm_Atsol - dCdm_RHStsol
# B = self.MfMuI*self.MfMu0*B0-B0-self.MfMuI*self._Div.T*u
# dBdm = d\mudm*dBd\mu
# dPBdm^T*v = Atemp^T*P^T*v - Btemp^T*P^T*v - Ctv
Atemp = sdiag(self.MfMu0 * B0) * (dMfMuI * (dmudm))
Btemp = sdiag(Div.T * u) * (dMfMuI * (dmudm))
Jtv = Atemp.T * (P.T * v) - Btemp.T * (P.T * v) - Ctv
return mkvc(Jtv)
def MagneticsDiffSecondaryInv(mesh, model, data, **kwargs):
"""
Inversion module for MagneticsDiffSecondary
"""
from .. import Optimization, Regularization, Parameters, ObjFunction, Inversion
prob = MagneticsDiffSecondary(mesh, model)
miter = kwargs.get("maxIter", 10)
if prob.ispaired:
prob.unpair()
if data.ispaired:
data.unpair()
prob.pair(data)
# Create an optimization program
opt = Optimization.InexactGaussNewton(maxIter=miter)
opt.bfgsH0 = Solver(sp.identity(model.nP), flag="D")
# Create a regularization program
reg = Regularization.Tikhonov(model)
# Create an objective function
beta = Parameters.BetaSchedule(beta0=1e0)
obj = ObjFunction.BaseObjFunction(data, reg, beta=beta)
# Create an inversion object
inv = Inversion.BaseInversion(obj, opt)
return inv, reg
def calcRow(
Xn,
Yn,
Zn,
rxlocation,
P,
components=["bxx", "bxy", "bxz", "byy", "byz", "bzz", "bx", "by", "bz"],
):
"""
calcRow
Takes in the lower SW and upper NE nodes of a tensor mesh,
observation location rxLoc[obsx, obsy, obsz] and computes the
magnetic tensor for the integral of a each prisms
INPUT:
Xn, Yn, Zn: Node location matrix for the lower and upper most corners of
all cells in the mesh shape[nC,2]
OUTPUT:
"""
eps = 1e-16 # add a small value to the locations to avoid /0
# number of cells in mesh
nC = Xn.shape[0]
# comp. pos. differences for tne, bsw nodes
dz2 = Zn[:, 1] - rxlocation[2] + eps
dz1 = Zn[:, 0] - rxlocation[2] + eps
dy2 = Yn[:, 1] - rxlocation[1] + eps
dy1 = Yn[:, 0] - rxlocation[1] + eps
dx2 = Xn[:, 1] - rxlocation[0] + eps
dx1 = Xn[:, 0] - rxlocation[0] + eps
# comp. squared diff
dx2dx2 = dx2 ** 2.0
dx1dx1 = dx1 ** 2.0
dy2dy2 = dy2 ** 2.0
dy1dy1 = dy1 ** 2.0
dz2dz2 = dz2 ** 2.0
dz1dz1 = dz1 ** 2.0
# 2D radius compent squared of corner nodes
R1 = dy2dy2 + dx2dx2
R2 = dy2dy2 + dx1dx1
R3 = dy1dy1 + dx2dx2
R4 = dy1dy1 + dx1dx1
# radius to each cell node
r1 = np.sqrt(dz2dz2 + R2) + eps
r2 = np.sqrt(dz2dz2 + R1) + eps
r3 = np.sqrt(dz1dz1 + R1) + eps
r4 =
|
np.sqrt(dz1dz1 + R2)
|
numpy.sqrt
|
import os
import argparse
import subprocess
import numpy as np
class LPSolver(object):
def __init__(self, args=None, B=None, u=None, e=None, d=None,
Z=None, C_ans=None, C_ans_sgn=None, eps=1e-5, alpha=0.99,
save_dir=None, prob_name=None):
self.args = args
self.backend = self.get_lp()
self.B = B
self.u = u
self.e = e
self.d = d
self.Z = Z
self.C_ans = C_ans
self.C_ans_sgn = C_ans_sgn
self.eps = eps
self.alpha = alpha
self.save_dir = save_dir
self.prob_name = prob_name
def get_lp(self):
from milp import milp
lp = milp
return lp
def get_lp_results(self):
solution = self.model.solution
obj_val = solution.get_objective_value()
C = np.array([[solution.get_values('c_%s_%s'%(i, p)) \
for p in range(self.d.shape[0])] for i in range(self.u.shape[0])])
C_delta = dict()
for symbol in ['minus', 'zero', 'plus']:
C_delta[symbol] = np.array([[solution.get_values('c_%s_%s_%s'%(symbol, i, p)) \
for p in range(self.d.shape[0])] for i in range(self.u.shape[0])])
C_delta[symbol] = (C_delta[symbol] > 0.5).astype(int)
C_sign = (C_delta['plus'] - C_delta['minus']).astype(int)
return obj_val, C, C_delta, C_sign
def save_solution(self):
subprocess.call(['mkdir', '-p', self.save_dir])
fout = open('%s/output_summary.txt'%(self.save_dir), 'w')
fout.write('Input:\n')
fout.write('B = \n%s\n'%(self.B))
fout.write('u = %s\n'%(self.u))
fout.write('d = %s\n'%(self.d))
fout.write('e = %s\n'%(self.e))
if self.C_ans is not None:
fout.write('C = \n%s\n'%(self.C_ans))
if self.C_ans_sgn is not None:
fout.write('C_sgn = \n%s\n'%(self.C_ans_sgn.astype(int)))
fout.write('eps = %s\n'%(self.eps))
fout.write('alpha = %s\n'%(self.alpha))
fout.write('\nSolution:\n')
fout.write('Objective value = %s\n'%(self.obj_val))
fout.write('solution = \n%s\n'%(np.array2string(self.C, suppress_small=True)))
fout.write('solution_sgn = \n%s\n'%(self.C_sign))
if self.C_ans is not None:
fout.write('norm = %s\n'%(
|
np.linalg.norm(self.C - self.C_ans)
|
numpy.linalg.norm
|
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import inspect
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from scipy.io import loadmat
from mne import pick_types
from mne.datasets import testing
from mne.externals.six import iterbytes
from mne.utils import run_tests_if_main, requires_pandas, _TempDir
from mne.io import read_raw_edf
from mne.io.base import _RawShell
from mne.io.meas_info import _empty_info
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.pick import channel_type
from mne.io.edf.edf import find_edf_events, _read_annot, _read_annotations_edf
from mne.io.edf.edf import read_annotations_edf, _get_edf_default_event_id
from mne.io.edf.edf import _read_edf_header
from mne.event import find_events
from mne.annotations import events_from_annotations
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
montage_path = op.join(data_dir, 'biosemi.hpts')
bdf_path = op.join(data_dir, 'test.bdf')
edf_path = op.join(data_dir, 'test.edf')
edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
data_path = testing.data_path(download=False)
edf_stim_resamp_path = op.join(data_path, 'EDF', 'test_edf_stim_resamp.edf')
edf_overlap_annot_path = op.join(data_path, 'EDF',
'test_edf_overlapping_annotations.edf')
edf_reduced = op.join(data_path, 'EDF', 'test_reduced.edf')
bdf_stim_channel_path = op.join(data_path, 'BDF', 'test_bdf_stim_channel.bdf')
eog = ['REOG', 'LEOG', 'IEOG']
misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
def test_bdf_data():
"""Test reading raw bdf files."""
raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path,
montage=montage_path, eog=eog, misc=misc,
exclude=['M2', 'IEOG'], stim_channel=-1)
assert 'RawEDF' in repr(raw_py)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
# bdf saved as a single, resolution to seven decimal points in matlab
assert_array_almost_equal(data_py, data_eeglab, 8)
# Manually checking that float coordinates are imported
assert (raw_py.info['chs'][0]['loc']).any()
assert (raw_py.info['chs'][25]['loc']).any()
assert (raw_py.info['chs'][63]['loc']).any()
@testing.requires_testing_data
def test_bdf_stim_channel():
"""Test BDF stim channel."""
# test if last channel is detected as STIM by default
raw_py = _test_raw_reader(read_raw_edf, input_fname=bdf_path)
assert channel_type(raw_py.info, raw_py.info["nchan"] - 1) == 'stim'
# test BDF file with wrong scaling info in header - this should be ignored
# for BDF stim channels
events = [[242, 0, 4],
[310, 0, 2],
[952, 0, 1],
[1606, 0, 1],
[2249, 0, 1],
[2900, 0, 1],
[3537, 0, 1],
[4162, 0, 1],
[4790, 0, 1]]
raw = read_raw_edf(bdf_stim_channel_path, preload=True)
bdf_events = find_events(raw)
assert_array_equal(events, bdf_events)
raw = read_raw_edf(bdf_stim_channel_path, preload=False)
bdf_events = find_events(raw)
assert_array_equal(events, bdf_events)
@testing.requires_testing_data
def test_edf_overlapping_annotations():
"""Test EDF with overlapping annotations."""
with pytest.warns(RuntimeWarning, match='overlapping.* not fully support'):
read_raw_edf(edf_overlap_annot_path, preload=True, stim_channel='auto',
verbose=True)
@testing.requires_testing_data
def test_edf_reduced():
"""Test EDF with various sampling rates."""
_test_raw_reader(read_raw_edf, input_fname=edf_reduced, stim_channel=None,
verbose='error')
def test_edf_data():
"""Test edf files."""
raw = _test_raw_reader(read_raw_edf, input_fname=edf_path,
stim_channel=None, exclude=['Ergo-Left', 'H10'],
verbose='error')
raw_py = read_raw_edf(edf_path, stim_channel='auto', preload=True)
assert_equal(len(raw.ch_names) + 2, len(raw_py.ch_names))
# Test saving and loading when annotations were parsed.
edf_events = find_events(raw_py, output='step', shortest_event=0,
stim_channel='STI 014')
# onset, duration, id
events = [[0.1344, 0.2560, 2],
[0.3904, 1.0000, 2],
[2.0000, 0.0000, 3],
[2.5000, 2.5000, 2]]
events = np.array(events)
events[:, :2] *= 512 # convert time to samples
events = np.array(events, dtype=int)
events[:, 1] -= 1
events[events[:, 1] <= 0, 1] = 1
events[:, 1] += events[:, 0]
onsets = events[:, [0, 2]]
offsets = events[:, [1, 2]]
events = np.zeros((2 * events.shape[0], 3), dtype=int)
events[0::2, [0, 2]] = onsets
events[1::2, [0, 1]] = offsets
assert_array_equal(edf_events, events)
# Test with number of records not in header (-1).
tempdir = _TempDir()
broken_fname = op.join(tempdir, 'broken.edf')
with open(edf_path, 'rb') as fid_in:
fid_in.seek(0, 2)
n_bytes = fid_in.tell()
fid_in.seek(0, 0)
rbytes = fid_in.read(int(n_bytes * 0.4))
with open(broken_fname, 'wb') as fid_out:
fid_out.write(rbytes[:236])
fid_out.write(bytes('-1 '.encode()))
fid_out.write(rbytes[244:])
with pytest.warns(RuntimeWarning,
match='records .* not match the file size'):
raw = read_raw_edf(broken_fname, preload=True, stim_channel='auto')
read_raw_edf(broken_fname, exclude=raw.ch_names[:132], preload=True,
stim_channel='auto')
@testing.requires_testing_data
def test_stim_channel():
"""Test reading raw edf files with stim channel."""
raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True,
exclude=['EDF Annotations'])
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = loadmat(edf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab, 10)
events = find_edf_events(raw_py)
assert len(events) - 1 == len(find_events(raw_py)) # start not found
# Test uneven sampling
raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
data_py, _ = raw_py[0]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = loadmat(edf_uneven_eeglab_path)
raw_eeglab = raw_eeglab['data']
data_eeglab = raw_eeglab[0]
# match upsampling
upsample = len(data_eeglab) / len(raw_py)
data_py = np.repeat(data_py, repeats=upsample)
assert_array_equal(data_py, data_eeglab)
pytest.raises(RuntimeError, read_raw_edf, edf_path, preload=False,
stim_channel=-1)
with pytest.warns(RuntimeWarning,
match='Interpolating stim .* Events may jitter'):
raw = read_raw_edf(edf_stim_resamp_path, verbose=True, stim_channel=-1)
with pytest.warns(None) as w:
raw[:]
assert len(w) == 0
events = raw_py.find_edf_events()
assert len(events) == 0
def test_parse_annotation():
"""Test parsing the tal channel."""
# test the parser
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot = [a for a in iterbytes(annot)]
annot[1::2] = [a * 256 for a in annot[1::2]]
tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
onset, duration, description = _read_annotations_edf([tal_channel])
assert_equal(np.column_stack((onset, duration, description)),
[[180., 0., 'Lights off'], [180., 0., 'Close door'],
[180., 0., 'Lights off'], [180., 0., 'Close door'],
[3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']])
def test_edf_annotations():
"""Test if events are detected correctly in a typical MNE workflow."""
# test an actual file
raw = read_raw_edf(edf_path, preload=True, stim_channel='auto')
edf_events = find_events(raw, output='step', shortest_event=0,
stim_channel='STI 014')
# onset, duration, id
events = [[0.1344, 0.2560, 2],
[0.3904, 1.0000, 2],
[2.0000, 0.0000, 3],
[2.5000, 2.5000, 2]]
events = np.array(events)
events[:, :2] *= 512 # convert time to samples
events = np.array(events, dtype=int)
events[:, 1] -= 1
events[events[:, 1] <= 0, 1] = 1
events[:, 1] += events[:, 0]
onsets = events[:, [0, 2]]
offsets = events[:, [1, 2]]
events = np.zeros((2 * events.shape[0], 3), dtype=int)
events[0::2, [0, 2]] = onsets
events[1::2, [0, 1]] = offsets
assert_array_equal(edf_events, events)
def test_edf_stim_channel():
"""Test stim channel for edf file."""
# test if stim channel is automatically detected
raw = read_raw_edf(edf_path, preload=True)
assert channel_type(raw.info, raw.info["nchan"] - 1) == 'stim'
raw = read_raw_edf(edf_stim_channel_path, preload=True,
stim_channel=-1)
true_data = np.loadtxt(edf_txt_stim_channel_path).T
# EDF writer pad data if file to small
_, ns = true_data.shape
edf_data = raw._data[:, :ns]
# assert stim channels are equal
assert_array_equal(true_data[-1], edf_data[-1])
# assert data are equal
assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
@requires_pandas
def test_to_data_frame():
"""Test edf Raw Pandas exporter."""
for path in [edf_path, bdf_path]:
raw = read_raw_edf(path, stim_channel=None, preload=True,
verbose='error')
_, times = raw[0, :10]
df = raw.to_data_frame()
assert (df.columns == raw.ch_names).all()
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
assert 'time' in df.index.names
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
def test_read_annot(tmpdir):
"""Test parsing the tal channel."""
EXPECTED_ANNOTATIONS = [[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'],
[180.0, 0, 'Lights off'], [180.0, 0, 'Close door'],
[3.14, 4.2, 'nothing'], [1800.2, 25.5, 'Apnea']]
SFREQ = 100
DATA_LENGTH = int(EXPECTED_ANNOTATIONS[-1][0] * SFREQ) + 1
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot_file = tmpdir.join('annotations.txt')
annot_file.write(annot)
annotmap_file = tmpdir.join('annotations_map.txt')
annotmap_file.write('Lights off:1,nothing:2,Apnea:3,Close door:4')
stim_ch = _read_annot(annot=str(annot_file), annotmap=str(annotmap_file),
sfreq=SFREQ, data_length=DATA_LENGTH)
assert stim_ch.shape == (DATA_LENGTH,)
assert_array_equal(
|
np.bincount(stim_ch)
|
numpy.bincount
|
#! /usr/bin/env python3
"""Parse through the subsample counts of group specific kmer counts."""
import argparse as ap
import glob
import gzip
import os
import numpy as np
SAMPLES = {'ba': [], 'bcg': [], 'lef': []}
SAMPLE_COLS = [
'sample', 'replicate', 'group', 'coverage', 'is_bcg', 'is_ba',
'has_lethal', 'total_kmers', 'total_count', 'tp', 'tn', 'fp', 'fn',
'kmer_cov_min', 'kmer_cov_mean', 'kmer_cov_median', 'kmer_cov_max',
'non_zero_kmer_cov_min', 'non_zero_kmer_cov_mean',
'non_zero_kmer_cov_median', 'non_zero_kmer_cov_max'
]
KMERS = {}
def get_coverage_stats(coverage):
"""Return summary stats of a set of coverages."""
non_zero = [c for c in coverage if c]
np_array =
|
np.array(coverage)
|
numpy.array
|
import pytest
import numpy as np
from ..run_gp import read_data
def test_read_data():
filename = "test_data.csv"
datadir = "data/test_data/"
read_data(filename, datadir)
def test_outputs():
filename = "test_data.csv"
datadir = "data/test_data/"
time, flux, flux_err = read_data(filename, datadir)
assert len(time) == 10
assert len(time) == len(flux) == len(flux_err)
time_true = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert np.all(time == time_true)
assert np.all(flux == 5)
assert
|
np.all(flux_err == 1)
|
numpy.all
|
"""
This module implements several environments, i.e., the simulators in which agents will interact and learn.
Any environment is characterized by the following two methods:
* step : receives the actions taken by the agents, and returns the new state of the simulator and the rewards
perceived by each agent, amongst other things.
* reset : sets the simulator at the initial state.
"""
import numpy as np
class RMG():
"""
A two-agent environment for a repeated matrix (symmetric) game.
Possible actions for each agent are (C)ooperate (0) and (D)efect (1).
The state is s_t = (a_{t-1}, b_{t-1}) with a_{t-1} and b_{t-1} the actions of the two players in the last turn,
plus an initial state s_0.
"""
# Possible actions
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_AGENTS*NUM_ACTIONS + 1 # we add the initial state.
def __init__(self, max_steps, payouts, batch_size=1):
self.max_steps = max_steps
self.batch_size = batch_size
self.payout_mat = payouts
self.available_actions = [
np.ones((batch_size, self.NUM_ACTIONS), dtype=int)
for _ in range(self.NUM_AGENTS)
]
self.step_count = None
def reset(self):
self.step_count = 0
init_state = np.zeros((self.batch_size, self.NUM_STATES))
init_state[:, -1] = 1
observations = [init_state, init_state]
info = [{'available_actions': aa} for aa in self.available_actions]
return observations, info
def step(self, action):
ac0, ac1 = action
self.step_count += 1
rewards = []
# The state is a OHE vector indicating [CC, CD, DC, DD, initial], (iff NUM_STATES = 5)
state0 = np.zeros((self.batch_size, self.NUM_STATES))
state1 = np.zeros((self.batch_size, self.NUM_STATES))
for i, (a0, a1) in enumerate(zip(ac0, ac1)): # iterates over batch dimension
rewards.append([self.payout_mat[a1][a0], self.payout_mat[a0][a1]])
state0[i, a0 * 2 + a1] = 1
state1[i, a1 * 2 + a0] = 1
rewards = list(map(np.asarray, zip(*rewards)))
observations = [state0, state1]
done = (self.step_count == self.max_steps)
info = [{'available_actions': aa} for aa in self.available_actions]
return observations, rewards, done, info
class AdvRw():
"""
A two-action stateless environment in which an adversary controls the reward
"""
def __init__(self, mode='friend', p=0.5):
self._mode = mode
# adversary estimation of our action
self._policy = np.asarray([0.5, 0.5])
self._learning_rate = 0.25
self._p = p # probability for the neutral environment
def reset(self):
# self._policy = np.asarray([0.5, 0.5])
return
def step(self, action):
if self._mode == 'friend':
if np.argmax(self._policy) == action:
reward = +50
else:
reward = -50
elif self._mode == 'adversary':
if np.argmax(self._policy) == action:
reward = -50
else:
reward = +50
elif self._mode == 'neutral':
box = np.random.rand() < self._p
if int(box) == action:
reward = +50
else:
reward = -50
self._policy = (self._learning_rate * np.array([1.0-action, action])
+ (1.0-self._learning_rate) * self._policy)
self._policy /= np.sum(self._policy)
# print('---')
#print('r', reward)
#print('p', self._policy)
# print('---')
return None, (reward, -reward), True, None
class AdvRw2():
"""
Friend or Foe modified to model adversary separately..
"""
def __init__(self, max_steps, payout=50, batch_size=1):
self.max_steps = max_steps
self.batch_size = batch_size
self.payout = payout
self.available_actions =
|
np.array([0, 1])
|
numpy.array
|
import gym
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
import random
from tqdm import tqdm
'''
Implement n-step Sarsa and evaluate it on the 8x8 env. Evaluate the performance for different choices of n and alpha. Visualize your results
plot the pe4foramcne over alpha for different choices of n simialr to lecture 8 slide 9
'''
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓',u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1,1]])
down = np.array([[0, 0], [0.5, 0.5], [1,0]])
left = np.array([[0, 0], [0.5, 0.5], [0,1]])
right = np.array([[1, 0], [0.5, 0.5], [1,1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0,vmax=.6)
ax.imshow(
|
np.zeros(dims)
|
numpy.zeros
|
import math
import numpy as np
import matplotlib.pyplot as plt
import random
## simulation
s0 = 100.0 # initial price
x0 = 4000.0 # starting cash value
sigma = 0.05 # variance
M = 4000 # steps
Sim = 100 # number of simulations
A = 0.5 # arrival intensity (lambda)
k = 1.5 # arrival intensity (lambda)
## parameters
Delta = 0.1
Give = 0.5
def simulate():
s = -1
while
|
np.min(s)
|
numpy.min
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: traversal_perceptual_length.py
# --- Creation Date: 12-05-2020
# --- Last Modified: Tue 16 Mar 2021 17:43:20 AEDT
# --- Author: <NAME>
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""Traversal Perceptual Length (TPL)."""
import os
import numpy as np
import pdb
import time
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from metrics import metric_base
from metrics.perceptual_path_length import normalize, slerp
from training import misc
from training.utils import get_return_v
#----------------------------------------------------------------------------
class TPL(metric_base.MetricBase):
def __init__(self, n_samples_per_dim, crop, Gs_overrides, n_traversals, no_mapping, no_convert=False, active_thresh=0.1, use_bound_4=True, **kwargs):
super().__init__(**kwargs)
self.crop = crop
self.Gs_overrides = Gs_overrides
self.n_samples_per_dim = n_samples_per_dim
self.n_traversals = n_traversals
self.no_mapping = no_mapping
self.no_convert = no_convert
self.active_thresh = active_thresh
self.use_bound_4 = use_bound_4
def _evaluate(self, Gs, Gs_kwargs, num_gpus, **kwargs):
Gs_kwargs = dict(Gs_kwargs)
Gs_kwargs.update(self.Gs_overrides)
minibatch_per_gpu = (self.n_samples_per_dim - 1) // num_gpus + 1
if (not self.no_mapping) and (not self.no_convert):
Gs = Gs.convert(new_func_name='training.ps_sc_networks2.G_main_ps_sc')
# Construct TensorFlow graph.
n_continuous = Gs.input_shape[1]
distance_expr = []
eval_dim_phs = []
lat_start_alpha_phs = []
lat_end_alpha_phs = []
lat_sample_phs = []
lerps_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
if self.no_mapping:
noise_vars = [var for name, var in Gs_clone.vars.items() if name.startswith('noise')]
else:
noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')]
# Latent pairs placeholder
eval_dim = tf.placeholder(tf.int32)
lat_start_alpha = tf.placeholder(tf.float32) # should be in [0, 1]
lat_end_alpha = tf.placeholder(tf.float32) # should be in [0, 1]
eval_dim_phs.append(eval_dim)
lat_start_alpha_phs.append(lat_start_alpha)
lat_end_alpha_phs.append(lat_end_alpha)
eval_dim_mask = tf.tile(tf.one_hot(eval_dim, n_continuous)[tf.newaxis, :] > 0, [minibatch_per_gpu, 1])
lerp_t = tf.linspace(lat_start_alpha, lat_end_alpha, minibatch_per_gpu) # [b]
lerps_expr.append(lerp_t)
lat_sample = tf.placeholder(tf.float32, shape=Gs_clone.input_shape[1:])
lat_sample_phs.append(lat_sample)
# lat_t0 = tf.zeros([minibatch_per_gpu] + Gs_clone.input_shape[1:])
lat_t0 = tf.tile(lat_sample[tf.newaxis, :], [minibatch_per_gpu, 1])
if self.use_bound_4:
lat_t0_min2 = tf.zeros_like(lat_t0) - 4
else:
lat_t0_min2 = lat_t0 - 2
lat_t0 = tf.where(eval_dim_mask, lat_t0_min2, lat_t0) # [b, n_continuous]
lat_t1 = tf.tile(lat_sample[tf.newaxis, :], [minibatch_per_gpu, 1])
if self.use_bound_4:
lat_t1_add2 = tf.zeros_like(lat_t1) + 4
else:
lat_t1_add2 = lat_t1 + 2
lat_t1 = tf.where(eval_dim_mask, lat_t1_add2, lat_t1) # [b, n_continuous]
lat_e = tflib.lerp(lat_t0, lat_t1, lerp_t[:, tf.newaxis]) # [b, n_continuous]
# labels = tf.reshape(self._get_random_labels_tf(minibatch_per_gpu), [minibatch_per_gpu, -1])
labels = tf.zeros([minibatch_per_gpu, 0], dtype=tf.float32)
if self.no_mapping:
dlat_e = lat_e
else:
dlat_e = get_return_v(Gs_clone.components.mapping.get_output_for(lat_e, labels, **Gs_kwargs), 1)
# Synthesize images.
with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch
if self.no_mapping:
images = get_return_v(Gs_clone.get_output_for(dlat_e, labels, randomize_noise=False, **Gs_kwargs), 1)
else:
images = get_return_v(Gs_clone.components.synthesis.get_output_for(dlat_e, randomize_noise=False, **Gs_kwargs), 1)
# print('images.shape:', images.get_shape().as_list())
images = tf.cast(images, tf.float32)
# Crop only the face region.
if self.crop:
c = int(images.shape[2] // 8)
images = images[:, :, c*3 : c*7, c*2 : c*6]
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
factor = images.shape[2] // 256
if factor > 1:
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3,5])
# Scale dynamic range from [-1,1] to [0,255] for VGG.
images = (images + 1) * (255 / 2)
# Evaluate perceptual distance.
if images.get_shape().as_list()[1] == 1:
images = tf.tile(images, [1, 3, 1, 1])
img_e0 = images[:-1]
img_e1 = images[1:]
distance_measure = misc.load_pkl('http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/vgg16_zhang_perceptual.pkl')
distance_tmp = distance_measure.get_output_for(img_e0, img_e1)
print('distance_tmp.shape:', distance_tmp.get_shape().as_list())
distance_expr.append(distance_tmp)
# Sampling loop
n_segs_per_dim = (self.n_samples_per_dim - 1) // ((minibatch_per_gpu - 1) * num_gpus)
self.n_samples_per_dim = n_segs_per_dim * ((minibatch_per_gpu - 1) * num_gpus) + 1
alphas = np.linspace(0., 1., num=(n_segs_per_dim * num_gpus)+1)
traversals_dim = []
for n in range(self.n_traversals):
lat_sample_np =
|
np.random.normal(size=Gs_clone.input_shape[1:])
|
numpy.random.normal
|
#!/usr/bin/env python
# coding: utf-8
import typer
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import functools
import os
import torch
import functools
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
import tqdm
import numpy as np
import torch
from torch.utils.data import Dataset
import xarray as xr
import matplotlib.pyplot as plt
from scipy import integrate
from torchvision.utils import make_grid
import seaborn as sns
from enum import Enum
from ml_downscaling_emulator.utils import cp_model_rotated_pole
class Device(str, Enum):
cuda = "cuda"
cpu = "cpu"
def main(n_epochs: int = 10, dataset_name: str = '2.2km-coarsened-2x_london_pr_random', device: Device = Device.cuda, batch_size: int = 32, lr: float = 1e-4, sampling_steps: int = 500, snr: float = 0.1, error_tolerance: float = 1e-5, sigma:float = 25.0):
# device = 'cuda' #@param ['cuda', 'cpu'] {'type':'string'}
# ## size of a mini-batch
# batch_size = 32 #@param {'type':'integer'}
# ## learning rate
# lr=1e-4 #@param {'type':'number'}
# ## The number of sampling steps.
# num_steps = 500#@param {'type':'integer'}
# signal_to_noise_ratio = 0.16 #@param {'type':'number'}
# ## The error tolerance for the black-box ODE solver
# error_tolerance = 1e-5 #@param {'type': 'number'}
num_steps = sampling_steps
signal_to_noise_ratio = snr
class XRDataset(Dataset):
def __init__(self, ds, variables):
self.ds = ds
self.variables = variables
def __len__(self):
return len(self.ds.time)
def __getitem__(self, idx):
subds = self.ds.isel(time=idx)
X = torch.tensor(np.stack([subds[var].values for var in self.variables], axis=0))
y = torch.tensor(np.stack([subds["target_pr"].values], axis=0))
return X, y
class GaussianFourierProjection(nn.Module):
"""Gaussian random features for encoding time steps."""
def __init__(self, embed_dim, scale=30.):
super().__init__()
# Randomly sample weights during initialization. These weights are fixed
# during optimization and are not trainable.
self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False)
def forward(self, x):
x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
class Dense(nn.Module):
"""A fully connected layer that reshapes outputs to feature maps."""
def __init__(self, input_dim, output_dim):
super().__init__()
self.dense = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.dense(x)[..., None, None]
class ScoreNet(nn.Module):
"""A time-dependent score-based model built upon U-Net architecture."""
def __init__(self, marginal_prob_std, channels=[32, 64, 128, 256], embed_dim=256):
"""Initialize a time-dependent score-based network.
Args:
marginal_prob_std: A function that takes time t and gives the standard
deviation of the perturbation kernel p_{0t}(x(t) | x(0)).
channels: The number of channels for feature maps of each resolution.
embed_dim: The dimensionality of Gaussian random feature embeddings.
"""
super().__init__()
# Gaussian random feature embedding layer for time
self.embed = nn.Sequential(GaussianFourierProjection(embed_dim=embed_dim),
nn.Linear(embed_dim, embed_dim))
# Encoding layers where the resolution decreases
self.conv1 = nn.Conv2d(1, channels[0], 3, stride=1, bias=False)
self.dense1 = Dense(embed_dim, channels[0])
self.gnorm1 = nn.GroupNorm(4, num_channels=channels[0])
self.conv2 = nn.Conv2d(channels[0], channels[1], 3, stride=2, bias=False)
self.dense2 = Dense(embed_dim, channels[1])
self.gnorm2 = nn.GroupNorm(32, num_channels=channels[1])
self.conv3 = nn.Conv2d(channels[1], channels[2], 3, stride=2, bias=False)
self.dense3 = Dense(embed_dim, channels[2])
self.gnorm3 = nn.GroupNorm(32, num_channels=channels[2])
self.conv4 = nn.Conv2d(channels[2], channels[3], 3, stride=2, bias=False)
self.dense4 = Dense(embed_dim, channels[3])
self.gnorm4 = nn.GroupNorm(32, num_channels=channels[3])
# Decoding layers where the resolution increases
self.tconv4 = nn.ConvTranspose2d(channels[3], channels[2], 3, stride=2, bias=False)
self.dense5 = Dense(embed_dim, channels[2])
self.tgnorm4 = nn.GroupNorm(32, num_channels=channels[2])
self.tconv3 = nn.ConvTranspose2d(channels[2] + channels[2], channels[1], 3, stride=2, bias=False, output_padding=1)
self.dense6 = Dense(embed_dim, channels[1])
self.tgnorm3 = nn.GroupNorm(32, num_channels=channels[1])
self.tconv2 = nn.ConvTranspose2d(channels[1] + channels[1], channels[0], 3, stride=2, bias=False, output_padding=1)
self.dense7 = Dense(embed_dim, channels[0])
self.tgnorm2 = nn.GroupNorm(32, num_channels=channels[0])
self.tconv1 = nn.ConvTranspose2d(channels[0] + channels[0], 1, 3, stride=1)
# The swish activation function
self.act = lambda x: x * torch.sigmoid(x)
self.marginal_prob_std = marginal_prob_std
def forward(self, x, t):
# Obtain the Gaussian random feature embedding for t
embed = self.act(self.embed(t))
# Encoding path
h1 = self.conv1(x)
## Incorporate information from t
h1 += self.dense1(embed)
## Group normalization
h1 = self.gnorm1(h1)
h1 = self.act(h1)
h2 = self.conv2(h1)
h2 += self.dense2(embed)
h2 = self.gnorm2(h2)
h2 = self.act(h2)
h3 = self.conv3(h2)
h3 += self.dense3(embed)
h3 = self.gnorm3(h3)
h3 = self.act(h3)
h4 = self.conv4(h3)
h4 += self.dense4(embed)
h4 = self.gnorm4(h4)
h4 = self.act(h4)
# Decoding path
h = self.tconv4(h4)
## Skip connection from the encoding path
h += self.dense5(embed)
h = self.tgnorm4(h)
h = self.act(h)
h = self.tconv3(torch.cat([h, h3], dim=1))
h += self.dense6(embed)
h = self.tgnorm3(h)
h = self.act(h)
h = self.tconv2(torch.cat([h, h2], dim=1))
h += self.dense7(embed)
h = self.tgnorm2(h)
h = self.act(h)
h = self.tconv1(torch.cat([h, h1], dim=1))
# Normalize output
h = h / self.marginal_prob_std(t)[:, None, None, None]
return h
def marginal_prob_std(t, sigma):
"""Compute the mean and standard deviation of $p_{0t}(x(t) | x(0))$.
Args:
t: A vector of time steps.
sigma: The $\sigma$ in our SDE.
Returns:
The standard deviation.
"""
# t = torch.tensor(t, device=device)
t = t.clone().detach().to(device)
return torch.sqrt((sigma**(2 * t) - 1.) / 2. / np.log(sigma))
def diffusion_coeff(t, sigma):
"""Compute the diffusion coefficient of our SDE.
Args:
t: A vector of time steps.
sigma: The $\sigma$ in our SDE.
Returns:
The vector of diffusion coefficients.
"""
return torch.tensor(sigma**t, device=device)
def loss_fn(model, x, marginal_prob_std, eps=1e-5):
"""The loss function for training score-based generative models.
Args:
model: A PyTorch model instance that represents a
time-dependent score-based model.
x: A mini-batch of training data.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
eps: A tolerance value for numerical stability.
"""
random_t = torch.rand(x.shape[0], device=x.device) * (1. - eps) + eps
z = torch.randn_like(x)
std = marginal_prob_std(random_t)
perturbed_x = x + z * std[:, None, None, None]
score = model(perturbed_x, random_t)
loss = torch.mean(torch.sum((score * std[:, None, None, None] + z)**2, dim=(1,2,3)))
return loss
def Euler_Maruyama_sampler(score_model,
marginal_prob_std,
diffusion_coeff,
batch_size=64,
num_steps=num_steps,
device='cuda',
eps=1e-3):
"""Generate samples from score-based models with the Euler-Maruyama solver.
Args:
score_model: A PyTorch model that represents the time-dependent score-based model.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
diffusion_coeff: A function that gives the diffusion coefficient of the SDE.
batch_size: The number of samplers to generate by calling this function once.
num_steps: The number of sampling steps.
Equivalent to the number of discretized time steps.
device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs.
eps: The smallest time step for numerical stability.
Returns:
Samples.
"""
t = torch.ones(batch_size, device=device)
init_x = torch.randn(batch_size, 1, 28, 28, device=device) * marginal_prob_std(t)[:, None, None, None]
time_steps = torch.linspace(1., eps, num_steps, device=device)
step_size = time_steps[0] - time_steps[1]
x = init_x
with torch.no_grad():
# for time_step in tqdm.notebook.tqdm(time_steps):
for time_step in tqdm.tqdm(time_steps):
batch_time_step = torch.ones(batch_size, device=device) * time_step
g = diffusion_coeff(batch_time_step)
mean_x = x + (g**2)[:, None, None, None] * score_model(x, batch_time_step) * step_size
x = mean_x + torch.sqrt(step_size) * g[:, None, None, None] * torch.randn_like(x)
# Do not include any noise in the last sampling step.
return mean_x
def pc_sampler(score_model,
marginal_prob_std,
diffusion_coeff,
batch_size=64,
num_steps=num_steps,
snr=signal_to_noise_ratio,
device='cuda',
eps=1e-3):
"""Generate samples from score-based models with Predictor-Corrector method.
Args:
score_model: A PyTorch model that represents the time-dependent score-based model.
marginal_prob_std: A function that gives the standard deviation
of the perturbation kernel.
diffusion_coeff: A function that gives the diffusion coefficient
of the SDE.
batch_size: The number of samplers to generate by calling this function once.
num_steps: The number of sampling steps.
Equivalent to the number of discretized time steps.
device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs.
eps: The smallest time step for numerical stability.
Returns:
Samples.
"""
t = torch.ones(batch_size, device=device)
init_x = torch.randn(batch_size, 1, 28, 28, device=device) * marginal_prob_std(t)[:, None, None, None]
time_steps = np.linspace(1., eps, num_steps)
step_size = time_steps[0] - time_steps[1]
x = init_x
with torch.no_grad():
# for time_step in tqdm.notebook.tqdm(time_steps):
for time_step in tqdm.tqdm(time_steps):
batch_time_step = torch.ones(batch_size, device=device) * time_step
# Corrector step (Langevin MCMC)
grad = score_model(x, batch_time_step)
grad_norm = torch.norm(grad.reshape(grad.shape[0], -1), dim=-1).mean()
noise_norm = np.sqrt(np.prod(x.shape[1:]))
langevin_step_size = 2 * (snr * noise_norm / grad_norm)**2
x = x + langevin_step_size * grad + torch.sqrt(2 * langevin_step_size) * torch.randn_like(x)
# Predictor step (Euler-Maruyama)
g = diffusion_coeff(batch_time_step)
x_mean = x + (g**2)[:, None, None, None] * score_model(x, batch_time_step) * step_size
x = x_mean + torch.sqrt(g**2 * step_size)[:, None, None, None] * torch.randn_like(x)
# The last step does not include any noise
return x_mean
def ode_sampler(score_model,
marginal_prob_std,
diffusion_coeff,
batch_size=64,
atol=error_tolerance,
rtol=error_tolerance,
device='cuda',
z=None,
eps=1e-3):
"""Generate samples from score-based models with black-box ODE solvers.
Args:
score_model: A PyTorch model that represents the time-dependent score-based model.
marginal_prob_std: A function that returns the standard deviation
of the perturbation kernel.
diffusion_coeff: A function that returns the diffusion coefficient of the SDE.
batch_size: The number of samplers to generate by calling this function once.
atol: Tolerance of absolute errors.
rtol: Tolerance of relative errors.
device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs.
z: The latent code that governs the final sample. If None, we start from p_1;
otherwise, we start from the given z.
eps: The smallest time step for numerical stability.
"""
t = torch.ones(batch_size, device=device)
# Create the latent code
if z is None:
init_x = torch.randn(batch_size, 1, 28, 28, device=device) * marginal_prob_std(t)[:, None, None, None]
else:
init_x = z
shape = init_x.shape
def score_eval_wrapper(sample, time_steps):
"""A wrapper of the score-based model for use by the ODE solver."""
sample = torch.tensor(sample, device=device, dtype=torch.float32).reshape(shape)
time_steps = torch.tensor(time_steps, device=device, dtype=torch.float32).reshape((sample.shape[0], ))
with torch.no_grad():
score = score_model(sample, time_steps)
return score.cpu().numpy().reshape((-1,)).astype(np.float64)
def ode_func(t, x):
"""The ODE function for use by the ODE solver."""
time_steps = np.ones((shape[0],)) * t
g = diffusion_coeff(torch.tensor(t)).cpu().numpy()
return -0.5 * (g**2) * score_eval_wrapper(x, time_steps)
# Run the black-box ODE solver.
res = integrate.solve_ivp(ode_func, (1., eps), init_x.reshape(-1).cpu().numpy(), rtol=rtol, atol=atol, method='RK45')
print(f"Number of function evaluations: {res.nfev}")
x = torch.tensor(res.y[:, -1], device=device).reshape(shape)
return x
def prior_likelihood(z, sigma):
"""The likelihood of a Gaussian distribution with mean zero and
standard deviation sigma."""
shape = z.shape
N = np.prod(shape[1:])
return -N / 2. * torch.log(2*np.pi*sigma**2) - torch.sum(z**2, dim=(1,2,3)) / (2 * sigma**2)
def ode_likelihood(x,
score_model,
marginal_prob_std,
diffusion_coeff,
batch_size=64,
device='cuda',
eps=1e-5):
"""Compute the likelihood with probability flow ODE.
Args:
x: Input data.
score_model: A PyTorch model representing the score-based model.
marginal_prob_std: A function that gives the standard deviation of the
perturbation kernel.
diffusion_coeff: A function that gives the diffusion coefficient of the
forward SDE.
batch_size: The batch size. Equals to the leading dimension of `x`.
device: 'cuda' for evaluation on GPUs, and 'cpu' for evaluation on CPUs.
eps: A `float` number. The smallest time step for numerical stability.
Returns:
z: The latent code for `x`.
bpd: The log-likelihoods in bits/dim.
"""
# Draw the random Gaussian sample for Skilling-Hutchinson's estimator.
epsilon = torch.randn_like(x)
def divergence_eval(sample, time_steps, epsilon):
"""Compute the divergence of the score-based model with Skilling-Hutchinson."""
with torch.enable_grad():
sample.requires_grad_(True)
score_e = torch.sum(score_model(sample, time_steps) * epsilon)
grad_score_e = torch.autograd.grad(score_e, sample)[0]
return torch.sum(grad_score_e * epsilon, dim=(1, 2, 3))
shape = x.shape
def score_eval_wrapper(sample, time_steps):
"""A wrapper for evaluating the score-based model for the black-box ODE solver."""
sample = torch.tensor(sample, device=device, dtype=torch.float32).reshape(shape)
time_steps = torch.tensor(time_steps, device=device, dtype=torch.float32).reshape((sample.shape[0], ))
with torch.no_grad():
score = score_model(sample, time_steps)
return score.cpu().numpy().reshape((-1,)).astype(np.float64)
def divergence_eval_wrapper(sample, time_steps):
"""A wrapper for evaluating the divergence of score for the black-box ODE solver."""
with torch.no_grad():
# Obtain x(t) by solving the probability flow ODE.
sample = torch.tensor(sample, device=device, dtype=torch.float32).reshape(shape)
time_steps = torch.tensor(time_steps, device=device, dtype=torch.float32).reshape((sample.shape[0], ))
# Compute likelihood.
div = divergence_eval(sample, time_steps, epsilon)
return div.cpu().numpy().reshape((-1,)).astype(np.float64)
def ode_func(t, x):
"""The ODE function for the black-box solver."""
time_steps = np.ones((shape[0],)) * t
sample = x[:-shape[0]]
logp = x[-shape[0]:]
g = diffusion_coeff(torch.tensor(t)).cpu().numpy()
sample_grad = -0.5 * g**2 * score_eval_wrapper(sample, time_steps)
logp_grad = -0.5 * g**2 * divergence_eval_wrapper(sample, time_steps)
return
|
np.concatenate([sample_grad, logp_grad], axis=0)
|
numpy.concatenate
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
import datetime
import gc
import time
from contextlib import contextmanager, redirect_stdout
# Original code from https://www.kaggle.com/aitude/ashrae-missing-weather-data-handling by @aitude
@contextmanager
def timer(name):
print(f'{datetime.datetime.now()} - [{name}] ...')
t0 = time.time()
yield
print(f'{datetime.datetime.now()} - [{name}] done in {time.time() - t0:.0f} s\n')
def fill_weather_dataset(weather_df):
# Find Missing Dates
time_format = "%Y-%m-%d %H:%M:%S"
start_date = datetime.datetime.strptime(weather_df['timestamp'].min(),time_format)
end_date = datetime.datetime.strptime(weather_df['timestamp'].max(),time_format)
total_hours = int(((end_date - start_date).total_seconds() + 3600) / 3600)
hours_list = [(end_date - datetime.timedelta(hours=x)).strftime(time_format) for x in range(total_hours)]
missing_hours = []
for site_id in range(16):
site_hours = np.array(weather_df[weather_df['site_id'] == site_id]['timestamp'])
new_rows = pd.DataFrame(np.setdiff1d(hours_list,site_hours),columns=['timestamp'])
new_rows['site_id'] = site_id
weather_df = pd.concat([weather_df,new_rows])
weather_df = weather_df.reset_index(drop=True)
# Add new Features
weather_df["datetime"] = pd.to_datetime(weather_df["timestamp"])
weather_df["day"] = weather_df["datetime"].dt.day
weather_df["week"] = weather_df["datetime"].dt.week
weather_df["month"] = weather_df["datetime"].dt.month
# Reset Index for Fast Update
weather_df = weather_df.set_index(['site_id','day','month'])
air_temperature_filler = pd.DataFrame(weather_df.groupby(['site_id','day','month'])['air_temperature'].mean(),columns=["air_temperature"])
weather_df.update(air_temperature_filler,overwrite=False)
# Step 1
cloud_coverage_filler = weather_df.groupby(['site_id','day','month'])['cloud_coverage'].mean()
# Step 2
cloud_coverage_filler = pd.DataFrame(cloud_coverage_filler.fillna(method='ffill'),columns=["cloud_coverage"])
weather_df.update(cloud_coverage_filler,overwrite=False)
due_temperature_filler = pd.DataFrame(weather_df.groupby(['site_id','day','month'])['dew_temperature'].mean(),columns=["dew_temperature"])
weather_df.update(due_temperature_filler,overwrite=False)
# Step 1
sea_level_filler = weather_df.groupby(['site_id','day','month'])['sea_level_pressure'].mean()
# Step 2
sea_level_filler = pd.DataFrame(sea_level_filler.fillna(method='ffill'),columns=['sea_level_pressure'])
weather_df.update(sea_level_filler,overwrite=False)
wind_direction_filler = pd.DataFrame(weather_df.groupby(['site_id','day','month'])['wind_direction'].mean(),columns=['wind_direction'])
weather_df.update(wind_direction_filler,overwrite=False)
wind_speed_filler = pd.DataFrame(weather_df.groupby(['site_id','day','month'])['wind_speed'].mean(),columns=['wind_speed'])
weather_df.update(wind_speed_filler,overwrite=False)
# Step 1
precip_depth_filler = weather_df.groupby(['site_id','day','month'])['precip_depth_1_hr'].mean()
# Step 2
precip_depth_filler = pd.DataFrame(precip_depth_filler.fillna(method='ffill'),columns=['precip_depth_1_hr'])
weather_df.update(precip_depth_filler,overwrite=False)
weather_df = weather_df.reset_index()
weather_df = weather_df.drop(['datetime','day','week','month'],axis=1)
return weather_df
# Original code from https://www.kaggle.com/gemartin/load-data-reduce-memory-usage by @gemartin
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_categorical_dtype
def reduce_mem_usage(df, use_float16=False):
"""
Iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min >
|
np.iinfo(np.int16)
|
numpy.iinfo
|
import torch
import numpy as np
from models import spinal_net
import decoder
import os
from dataset import BaseDataset
import time
import cobb_evaluate
import argparse
def apply_mask(image, mask, alpha=0.5):
"""Apply the given mask to the image.
"""
color = np.random.rand(3)
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
class Network(object):
def __init__(self, args):
torch.manual_seed(317)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
heads = {'hm': args.num_classes, # cen, tl, tr, bl, br
'reg': 2*args.num_classes,
'wh': 2*4,
'mid_point': 2 * args.num_classes, }
self.model = spinal_net.SpineNet(heads=heads,
pretrained=True,
down_ratio=args.down_ratio,
final_kernel=1,
head_conv=256)
self.num_classes = args.num_classes
self.decoder = decoder.DecDecoder(K=args.K, conf_thresh=args.conf_thresh)
self.dataset = {'spinal': BaseDataset}
def load_model(self, model, resume):
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
print('loaded weights from {}, epoch {}'.format(resume, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
model.load_state_dict(state_dict_, strict=False)
return model
def eval(self, args, save):
save_path = args.weights_dir #+args.dataset
self.model = self.load_model(self.model, os.path.join(save_path, args.resume))
self.model = self.model.to(self.device)
self.model.eval()
dataset_module = self.dataset[args.dataset]
dsets = dataset_module(data_dir=args.data_dir,
phase='test',
input_h=args.input_h,
input_w=args.input_w,
down_ratio=args.down_ratio)
data_loader = torch.utils.data.DataLoader(dsets,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True)
total_time = []
landmark_dist = []
pr_cobb_angles = []
gt_cobb_angles = []
for cnt, data_dict in enumerate(data_loader):
begin_time = time.time()
images = data_dict['images'][0]
img_id = data_dict['img_id'][0]
images = images.to('cuda')
print('processing {}/{} image ...'.format(cnt, len(data_loader)))
with torch.no_grad():
output = self.model(images)
hm = output['hm']
wh = output['wh']
reg = output['reg']
torch.cuda.synchronize(self.device)
pts2 = self.decoder.ctdet_decode(hm, wh, reg) # 17, 11
pts0 = pts2.copy()
pts0[:,:10] *= args.down_ratio
x_index = range(0,10,2)
y_index = range(1,10,2)
ori_image = dsets.load_image(dsets.img_ids.index(img_id)).copy()
h,w,c = ori_image.shape
pts0[:, x_index] = pts0[:, x_index]/args.input_w*w
pts0[:, y_index] = pts0[:, y_index]/args.input_h*h
# sort the y axis
sort_ind = np.argsort(pts0[:,1])
pts0 = pts0[sort_ind]
pr_landmarks = []
for i, pt in enumerate(pts0):
pr_landmarks.append(pt[2:4])
pr_landmarks.append(pt[4:6])
pr_landmarks.append(pt[6:8])
pr_landmarks.append(pt[8:10])
pr_landmarks = np.asarray(pr_landmarks, np.float32) #[68, 2]
end_time = time.time()
total_time.append(end_time-begin_time)
gt_landmarks = dsets.load_gt_pts(dsets.load_annoFolder(img_id))
pr_cobb_angles.append(cobb_evaluate.cobb_angle_calc(pr_landmarks, ori_image))
gt_cobb_angles.append(cobb_evaluate.cobb_angle_calc(gt_landmarks, ori_image))
pr_cobb_angles = np.asarray(pr_cobb_angles, np.float32)
gt_cobb_angles = np.asarray(gt_cobb_angles, np.float32)
out_abs = abs(gt_cobb_angles - pr_cobb_angles)
out_add = gt_cobb_angles + pr_cobb_angles
term1 = np.sum(out_abs, axis=1)
term2 = np.sum(out_add, axis=1)
SMAPE = np.mean(term1 / term2 * 100)
print('SMAPE is {}'.format(SMAPE))
total_time = total_time[1:]
print('avg time is {}'.format(np.mean(total_time)))
print('FPS is {}'.format(1./
|
np.mean(total_time)
|
numpy.mean
|
import cv2 as cv
import numpy as np
from picamera import PiCamera
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=['shot', 'diff'])
parser.add_argument('-name', choices=['0','1'])
args = parser.parse_args()
def normal(name):
img = cv.imread(name)
img_s = img[:, :, 0] + img[:, :, 1] + img[:, :, 2]
mean = np.mean(img_s)
img_n = img_s - mean
return img_n
def normal2(name):
img = cv.imread(name)
for i in range(3):
img[:, :, i] = img[:, :, i] -
|
np.mean(img[:, :, i])
|
numpy.mean
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import matplotlib.cm as cm
import os
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"Codigo para encntrar los porcentajes a partir de los cuales se consida un dato de nubosidad"
"tomando enc uenta los cambios en los datos de piranómetros, así encontrar un umbral promedio"
"apartir del cual se considere un porcentaje nublado en los datos de las Fisheye. Los datos"
"se trabajaran a resolucion minutal."
################################################################################################################
## -------------------------LECTURA DE LOS DATOS DE COBERTURA DE NUBES FISH EYE-------------------------------##
################################################################################################################
df_cloud_TS = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Fish_Eye/Totales/Total_Timeseries_FishEye_TS.csv', sep=',')
df_cloud_TS.columns = ['fecha_hora', 'Porcentaje']
df_cloud_TS.index = df_cloud_TS['fecha_hora']
df_cloud_TS = df_cloud_TS.drop(['fecha_hora'], axis =1)
df_cloud_TS.index = pd.to_datetime(df_cloud_TS.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_cloud_TS.index = [df_cloud_TS.index[i].strftime("%Y-%m-%d %H:%M:00 ") for i in range(len(df_cloud_TS.index))]
df_cloud_TS.index = pd.to_datetime(df_cloud_TS.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_cloud_TS = df_cloud_TS.between_time('06:00', '17:59')
df_cloud_CI = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Fish_Eye/Totales/Total_Timeseries_FishEye_CI.csv', sep=',')
df_cloud_CI.columns = ['fecha_hora', 'Porcentaje']
df_cloud_CI.index = df_cloud_CI['fecha_hora']
df_cloud_CI = df_cloud_CI.drop(['fecha_hora'], axis =1)
df_cloud_CI.index = pd.to_datetime(df_cloud_CI.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_cloud_CI.index = [df_cloud_CI.index[i].strftime("%Y-%m-%d %H:%M:00 ") for i in range(len(df_cloud_CI.index))]
df_cloud_CI.index = pd.to_datetime(df_cloud_CI.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_cloud_CI = df_cloud_CI.between_time('06:00', '17:59')
df_cloud_AMVA = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Fish_Eye/Totales/Total_Timeseries_FishEye_AMVA.csv', sep=',')
df_cloud_AMVA.columns = ['fecha_hora', 'Porcentaje']
df_cloud_AMVA.index = df_cloud_AMVA['fecha_hora']
df_cloud_AMVA = df_cloud_AMVA.drop(['fecha_hora'], axis =1)
df_cloud_AMVA.index = pd.to_datetime(df_cloud_AMVA.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_cloud_AMVA.index = [df_cloud_AMVA.index[i].strftime("%Y-%m-%d %H:%M:00 ") for i in range(len(df_cloud_AMVA.index))]
df_cloud_AMVA.index = pd.to_datetime(df_cloud_AMVA.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_cloud_AMVA = df_cloud_AMVA.between_time('06:00', '17:59')
##########################################################################################################
##-----------------------------------LECTURA DE LOS DATOS DE PIRANOMETRO-------------------------------##
##########################################################################################################
df_pira_TS = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018.txt', parse_dates=[2])
df_pira_TS = df_pira_TS.set_index(["fecha_hora"])
df_pira_TS.index = df_pira_TS.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_TS.index = df_pira_TS.index.tz_localize(None)
df_pira_TS = df_pira_TS[df_pira_TS['radiacion'] >=0]
df_pira_TS = df_pira_TS.between_time('06:00', '17:59')
df_pira_CI = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018.txt', parse_dates=[2])
df_pira_CI = df_pira_CI.set_index(["fecha_hora"])
df_pira_CI.index = df_pira_CI.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_CI.index = df_pira_CI.index.tz_localize(None)
df_pira_CI = df_pira_CI[df_pira_CI['radiacion'] >=0]
df_pira_CI = df_pira_CI.between_time('06:00', '17:59')
df_pira_JV = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018.txt', parse_dates=[2])
df_pira_JV = df_pira_JV.set_index(["fecha_hora"])
df_pira_JV.index = df_pira_JV.index.tz_localize('UTC').tz_convert('America/Bogota')
df_pira_JV.index = df_pira_JV.index.tz_localize(None)
df_pira_JV = df_pira_JV[df_pira_JV['radiacion'] >=0]
df_pira_JV = df_pira_JV.between_time('06:00', '17:59')
##########################################################################################################
##-------------------------------------UNION DE LOS DATAFRAMES DE DATOS---------------------------------##
##########################################################################################################
df_result_TS = pd.concat([df_cloud_TS, df_pira_TS], axis=1, join='inner')
df_result_CI = pd.concat([df_cloud_CI, df_pira_TS], axis=1, join='inner')
df_result_AMVA = pd.concat([df_cloud_AMVA, df_pira_TS], axis=1, join='inner')
df_result_TS = df_result_TS.drop(['Unnamed: 0', 'idestacion','temperatura'], axis=1)
df_result_CI = df_result_CI.drop(['Unnamed: 0', 'idestacion','temperatura'], axis=1)
df_result_AMVA = df_result_AMVA.drop(['Unnamed: 0', 'idestacion','temperatura'], axis=1)
##########################################################################################################
##---------------------------------------DETERMINACION DE LOS UMBRALES----------------------------------##
##########################################################################################################
"Con los gradientes de la radiacion se estableceran los umbrales correspondientes para el caso de la mañana"
"y para el caso de la tarde"
## ---- UMBRAL CASO NUBOSO :
df_result_TS['Rad_deriv'] = np.gradient(df_result_TS['radiacion'].values)
df_result_CI['Rad_deriv'] =
|
np.gradient(df_result_CI['radiacion'].values)
|
numpy.gradient
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 17 17:43:39 2019
@author: deniz
"""
import json
import os
import math
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns; sns.set() # for plot styling
os.chdir("/media/deniz/02B89600B895F301/BBD100K")
train_path = "data/labels/train/bdd100k_labels_images_train.json"
with open(train_path,"r") as ftr:
trlabel = json.load(ftr)
BBDlabeldict = {"bike":0,
"bus":1,
"car":2,
"motor":3,
"person":4,
"rider":5,
"traffic light":6,
"traffic sign":7,
"train":8,
"truck":9,
"drivable area":[],
"lane":[]}
w,h = [] , []
for ind1 in range(len(trlabel)):
for ind2 in range(len(trlabel[ind1]["labels"])):
try:
a=trlabel[ind1]["labels"][ind2]["box2d"] #traffic sign
x1,y1,x2,y2 = list(a.values())
width = abs(x1-x2)
height = abs(y1-y2)
w.append(width)
h.append(height)
except:
pass
w=np.asarray(w)
h=np.asarray(h)
x=[w,h]
x=np.asarray(x)
x=x.transpose()
########################################## K- Means
##########################################
from sklearn.cluster import KMeans
kmeans3 = KMeans(n_clusters=9)
kmeans3.fit(x)
y_kmeans3 = kmeans3.predict(x)
##########################################
centers3 = kmeans3.cluster_centers_
yolo_anchor_average=[]
for ind in range (9):
yolo_anchor_average.append(np.mean(x[y_kmeans3==ind],axis=0))
yolo_anchor_average=np.array(yolo_anchor_average)
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans3, s=2, cmap='viridis')
plt.scatter(yolo_anchor_average[:, 0], yolo_anchor_average[:, 1], c='red', s=50);
yoloV3anchors = yolo_anchor_average
yoloV3anchors[:, 0] =yolo_anchor_average[:, 0] /1280 *608
yoloV3anchors[:, 1] =yolo_anchor_average[:, 1] /720 *608
yoloV3anchors =
|
np.rint(yoloV3anchors)
|
numpy.rint
|
'''
Classes and functions to implement lipid COM gridding and analysis for lipid bilayers. Acts on MemSys objects.
The gridding and anlaysis procedures are based on
the decriptions given in Gapsys et al. J Comput Aided Mol Des (2013) 27:845-858,
which is itself a modified version of the GridMAT-MD method by Allen et al. Vol. 30, No. 12 Journal of Computational Chemistry.
However, I have currently left out several bits of the extra functionality, e.g. the handling of an embedded protein.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import object
import numpy as np
# import my running stats class
from pybilt.common.running_stats import RunningStats
from six.moves import range
class LipidGrid_2d(object):
def __init__(self, mda_frame, mda_universe, mda_frame_resids, plane, nxbins=50, nybins=50):
# store the frame and leaflet
self.frame = mda_frame
# self.leaflet = ms_leaflet
# get the x and y indices
ix = plane[0]
iy = plane[1]
iz = [i for i in [0, 1, 2] if i not in plane][0]
# get the box dimemsions
box = mda_frame.box[plane]
boxx = box[ix]
boxy = box[iy]
# save the numbers of bins
self.x_nbins = nxbins
self.y_nbins = nybins
# initialize the edges of the and centers of the gridpoints
# x
self.x_min = 0.0
self.x_max = boxx
self.x_edges = np.linspace(self.x_min, self.x_max, (nxbins + 1), endpoint=True)
self.x_incr = self.x_edges[1] - self.x_edges[0]
x_incr_h = self.x_incr / 2.0
self.x_centers = np.zeros(nxbins)
self.x_nedges = len(self.x_edges)
for i in range(1, self.x_nedges):
j = i - 1
self.x_centers[j] = self.x_edges[j] + x_incr_h
# y
self.y_min = 0.0
self.y_max = boxy
self.y_edges = np.linspace(self.y_min, self.y_max, (nybins + 1), endpoint=True)
self.y_incr = self.y_edges[1] - self.y_edges[0]
y_incr_h = self.y_incr / 2.0
self.y_centers = np.zeros(nybins)
self.y_nedges = len(self.y_edges)
for i in range(1, self.x_nedges):
j = i - 1
self.y_centers[j] = self.y_edges[j] + y_incr_h
self.x_length = self.x_max - self.x_min
self.y_length = self.y_max - self.y_min
# get the lipid indices for this leaflet
resids = mda_frame_resids
#void_ind = max(indices) + 1
# now assign lipids to the gridpoints
self.lipid_grid = np.zeros((nxbins, nybins), dtype=np.int)
self.lipid_grid_z = np.zeros((nxbins, nybins))
self.lipid_grid_resnames = []
bxh = boxx / 2.0
byh = boxy / 2.0
cx = 0
for x in self.x_centers:
self.lipid_grid_resnames.append([])
cy = 0
for y in self.y_centers:
r_min = 1.0e10
i_min = 0
z_min = 0.0
resname_min = "UNK"
# check lipid COMs
for i in resids:
res_sel_string = "resid "+str(i)
res_sel = mda_universe.select_atoms(res_sel_string)
resname = res_sel.resname
res_indices = res_sel.indices
for index in res_indices:
pos = mda_frame.positions[index]
xi = pos[ix]
yi = pos[iy]
zi = pos[iz]
# print "iz ",iz," zi ",zi
dx = x - xi
dy = y - yi
# Minimum image -- coordinates must be pre-wrapped
if np.absolute(dx) > bxh:
dx = boxx - np.absolute(x - bxh) - np.absolute(xi - bxh)
if np.absolute(dy) > bxh:
dy = boxy - np.absolute(y - byh) - np.absolute(yi - byh)
rxy = np.sqrt(dx ** 2 + dy ** 2)
if rxy < r_min:
r_min = rxy
i_min = i
z_min = zi
resname_min = resname
# if embedded_protein is not None:
# print "i_min ",i_min," z_min ",z_min
# if cutoff is not None:
# else:
self.lipid_grid[cx, cy] = i_min
self.lipid_grid_z[cx, cy] = z_min
self.lipid_grid_resnames[cx].append(resname_min)
cy += 1
cx += 1
def get_index_at(self, ix, iy):
return self.lipid_grid[ix, iy]
def get_z_at(self, ix, iy):
return self.lipid_grid_z[ix, iy]
# Outputs the grid as an xyz coordinate file
def write_xyz(self, xyz_name):
# Open up the file to write to
xyz_out = open(xyz_name, "w")
npoints = self.x_nbins * self.y_nbins
comment = "Leaflet Grid " + self.leaflet.name
xyz_out.write(str(npoints))
xyz_out.write("\n")
xyz_out.write(comment)
xyz_out.write("\n")
cx = 0
for x in self.x_centers:
cy = 0
for y in self.y_centers:
# get the z coordinate
z = self.lipid_grid_z[cx, cy]
# get the lipid resname
oname = self.lipid_grid_resnames[cx][cy]
# write to file
line = str(oname) + " " + str(x) + " " + str(y) + " " + str(z)
xyz_out.write(line)
xyz_out.write("\n")
cy += 1
cx += 1
xyz_out.close()
return
class LipidGrids(object):
def __init__(self, mda_frame, mda_universe, leaflets, plane, nxbins=50, nybins=50):
# store the frame and leaflet
self.frame = mda_frame
self.leaflets = leaflets
self.plane = plane
self.norm = [i for i in [0, 1, 2] if i not in plane][0]
self.nbins_x = nxbins
self.nbins_y = nybins
self.leaf_grid = {}
self.myframe = mda_frame.frame
# initialize the grids
# upper
upper_resids = leaflets['upper'].get_member_resids()
self.leaf_grid['upper'] = LipidGrid_2d(mda_frame, mda_universe, upper_resids, plane, nxbins=nxbins, nybins=nybins)
# lower
lower_resids = leaflets['lower'].get_member_resids()
self.leaf_grid['lower'] = LipidGrid_2d(mda_frame, mda_universe, lower_resids, plane, nxbins=nxbins, nybins=nybins)
return
def thickness_grid(self):
tgrid = np.zeros((self.nbins_x, self.nbins_y))
for ix in range(self.nbins_x):
for iy in range(self.nbins_y):
zu = self.leaf_grid['upper'].get_z_at(ix, iy)
zl = self.leaf_grid['lower'].get_z_at(ix, iy)
dz = zu - zl
tgrid[ix, iy] = dz
if dz < 0.0:
print("Warning!!--MD frame number ", self.myframe, " --Value thickness less than zero (", dz, ") at grid point ", ix, " ", iy)
return tgrid
def average_thickness(self, return_grid=False):
trun = RunningStats()
tgrid = self.thickness_grid()
for ix in range(self.nbins_x):
for iy in range(self.nbins_y):
tc = tgrid[ix, iy]
trun.push(tc)
avg_out = (trun.mean(), trun.deviation())
if return_grid:
return avg_out, tgrid
else:
return avg_out
def map_to_grid(self, com_values_dict, leaflet='both'):
do_leaflet = []
if leaflet == "both":
do_leaflet.append('upper')
do_leaflet.append('lower')
elif leaflet == "upper" or leaflet == "lower":
do_leaflet.append(leaflet)
else:
# unknown option--use default "both"
print("!! Warning - request for unknown leaflet name \'", leaflet, "\' from the LeafletGrids of frame ", self.myframe)
print("!! the options are \"upper\", \"lower\", or \"both\"--using the default \"both\"")
do_leaflet.append('upper')
do_leaflet.append('lower')
out_dict = {}
for leaf in do_leaflet:
out_dict[leaf] = np.zeros((self.nbins_x, self.nbins_y))
for ix in range(self.nbins_x):
for iy in range(self.nbins_y):
com_ind = self.leaf_grid[leaf].get_index_at(ix, iy)
value = com_values_dict[com_ind]
out_dict[leaf][ix, iy] = value
return out_dict
def area_per_lipid(self):
do_leaflet = []
do_leaflet.append('upper')
do_leaflet.append('lower')
# get the unique type/resnames in the system
resnames = []
for leaf in do_leaflet:
for group in self.leaflets[leaf].groups:
gname = group.name()
if gname not in resnames:
resnames.append(gname)
# initialize counters for each residue/type
area_run_per_res_type = {}
for name in resnames:
area_run_per_res_type[name] = RunningStats()
area_per_lipid = {}
area_run = RunningStats()
for leaf in do_leaflet:
area_per_bin = self.leaf_grid[leaf].x_incr * self.leaf_grid[leaf].y_incr
lip_ind = self.leaflets[leaf].get_member_indices()
for i in lip_ind:
rname = self.frame.lipidcom[i].type
locations = np.where(self.leaf_grid[leaf].lipid_grid == i)
nlocs = len(locations[0])
# print locations
# print 'nlocs ',nlocs
area = area_per_bin * nlocs
area_per_lipid[i] = area
area_run_per_res_type[rname].push(area)
area_run.push(area)
average_per_res = {}
for name in resnames:
average = area_run_per_res_type[name].mean()
std = area_run_per_res_type[name].deviation()
average_per_res[name] = (average, std)
system_average = area_run.mean()
#system_dev = area_run.deviation()
output = (system_average, average_per_res, area_per_lipid)
return output
def curvature(self):
nxb = self.nbins_x
nyb = self.nbins_y
# first order derivtives
sx_u = np.zeros((nxb, nyb))
sy_u = np.zeros((nxb, nyb))
sx_l = np.zeros((nxb, nyb))
sy_l = np.zeros((nxb, nyb))
for ix in range(nxb):
for iy in range(nyb):
ixp = ix - 1
if ixp < 0:
ixp += nxb
ixn = ix + 1
if ixn >= nxb:
ixn -= nxb
iyp = ix - 1
if iyp < 0:
iyp += nyb
iyn = iy + 1
if iyn >= nyb:
iyn -= nyb
# upper
## using central difference for numerical first derivative
sx = self.leaf_grid['upper'].lipid_grid_z[ixn, iy] - self.leaf_grid['upper'].lipid_grid_z[ixp, iy]
sx /= (self.leaf_grid['upper'].x_incr) ** 2
sy = self.leaf_grid['upper'].lipid_grid_z[ix, iyn] - self.leaf_grid['upper'].lipid_grid_z[ix, iyp]
sy /= (self.leaf_grid['upper'].y_incr) ** 2
sx_u[ix, iy] = sx
sy_u[ix, iy] = sy
# lower
sx = self.leaf_grid['lower'].lipid_grid_z[ixn, iy] - self.leaf_grid['lower'].lipid_grid_z[ixp, iy]
sx /= (self.leaf_grid['lower'].x_incr) ** 2
sy = self.leaf_grid['lower'].lipid_grid_z[ix, iyn] - self.leaf_grid['lower'].lipid_grid_z[ix, iyp]
sy /= (self.leaf_grid['lower'].y_incr) ** 2
sx_l[ix, iy] = sx
sy_l[ix, iy] = sy
# now do second order derivatives - central difference numerical derivative of the first derivative
ssx_u = np.zeros((nxb, nyb))
ssy_u = np.zeros((nxb, nyb))
ssxy_u = np.zeros((nxb, nyb))
ssx_l = np.zeros((nxb, nyb))
ssy_l = np.zeros((nxb, nyb))
ssxy_l = np.zeros((nxb, nyb))
for ix in range(nxb):
for iy in range(nyb):
ixp = ix - 1
if ixp < 0:
ixp += nxb
ixn = ix + 1
if ixn >= nxb:
ixn -= nxb
iyp = ix - 1
if iyp < 0:
iyp += nyb
iyn = iy + 1
if iyn >= nyb:
iyn -= nyb
# upper
## using central difference for numerical first derivative
ssx = sx_u[ixn, iy] - sx_u[ixp, iy]
ssx /= (self.leaf_grid['upper'].x_incr) ** 2
ssy = sy_u[ix, iyn] - sy_u[ix, iyp]
ssy /= (self.leaf_grid['upper'].y_incr) ** 2
ssxy = sx_u[ix, iyn] - sx_u[ix, iyp]
ssxy /= (self.leaf_grid['upper'].y_incr) ** 2
ssx_u[ix, iy] = ssx
ssy_u[ix, iy] = ssy
ssxy_u[ix, iy] = ssxy
# lower
ssx = sx_l[ixn, iy] - sx_l[ixp, iy]
ssx /= (self.leaf_grid['lower'].x_incr) ** 2
ssy = sy_l[ix, iyn] - sy_l[ix, iyp]
ssy /= (self.leaf_grid['lower'].y_incr) ** 2
ssxy = sx_l[ix, iyn] - sx_l[ix, iyp]
ssxy /= (self.leaf_grid['upper'].y_incr) ** 2
ssx_l[ix, iy] = ssx
ssy_l[ix, iy] = ssy
ssxy_l[ix, iy] = ssxy
# now get curvatures
curv_mean_u = np.zeros((nxb, nyb))
curv_gauss_u = np.zeros((nxb, nyb))
curv_mean_l = np.zeros((nxb, nyb))
curv_gauss_l = np.zeros((nxb, nyb))
dx_u = self.leaf_grid['upper'].x_incr
dy_u = self.leaf_grid['upper'].y_incr
dx_l = self.leaf_grid['lower'].x_incr
dy_l = self.leaf_grid['lower'].y_incr
for ix in range(nxb):
for iy in range(nyb):
# upper
sx = sx_u[ix, iy]
sy = sy_u[ix, iy]
ssx = ssx_u[ix, iy]
ssy = ssy_u[ix, iy]
ssxy = ssxy_u[ix, iy]
sx_v = np.array([self.leaf_grid['upper'].x_centers[ix] + dx_u, 0.0, sx])
sy_v = np.array([0.0, self.leaf_grid['upper'].y_centers[iy] + dy_u, sy])
ssx_v = np.array([self.leaf_grid['upper'].x_centers[ix] + dx_u, 0.0, ssx])
ssy_v = np.array([0.0, self.leaf_grid['upper'].y_centers[iy] + dy_u, ssy])
ssxy_v = np.array([0.0, self.leaf_grid['upper'].y_centers[iy] + dy_u, ssxy])
E = np.dot(sx_v, sx_v)
F = np.dot(sx_v, sy_v)
G = np.dot(sy_v, sy_v)
n = np.cross(sx_v, sy_v)
n /= np.linalg.norm(n)
L =
|
np.dot(ssx_v, n)
|
numpy.dot
|
import numpy as np
import time
import torch
import torch.nn as nn
from torch.nn import init
class SememeSumLstm(nn.Module):
def __init__(self, sememe_dim, mem_dim):
super(SememeSumLstm, self).__init__()
self.in_dim = sememe_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.reset_parameters()
def node_forward(self, inputs):
iou = self.ioux(inputs)# three Wx+b
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
c = torch.mul(i, u)
h = torch.mul(o, torch.tanh(c))
return c, h
def forward(self, inputs):
max_time, batch_size, _ = inputs.size()
c = []
h = []
for time in range(max_time):
new_c, new_h = self.node_forward(inputs[time])
c.append(new_c)
h.append(new_h)
return torch.stack(c, 0), torch.stack(h, 0)
def reset_parameters(self):
layers = [self.ioux]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
class SememeSumGRU(nn.Module):
def __init__(self, sememe_dim, mem_dim):
super(SememeSumGRU, self).__init__()
self.in_dim = sememe_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 2 * self.mem_dim)
self.reset_parameters()
def node_forward(self, inputs):
iou = self.ioux(inputs)# three Wx+b
i, o = torch.split(iou, iou.size(1) // 2, dim=1)
i, o = torch.sigmoid(i), torch.tanh(o)
h = torch.mul(i,o)
return h
def forward(self, inputs):
max_time, batch_size, _ = inputs.size()
h = []
for time in range(max_time):
new_h = self.node_forward(inputs[time])
h.append(new_h)
return torch.stack(h, 0)
def reset_parameters(self):
layers = [self.ioux]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
class LSTM_baseline(nn.Module):
def __init__(self, config, sememe):
super(LSTM_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.emb_sememe = nn.Embedding(2186, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fx_s, self.fh, self.fs]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), 2186), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class LSTM_concat(nn.Module):
def __init__(self, config, sememe):
super(LSTM_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.fx, self.fh, self.fs]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, word_emb, length, sememe_data):
emb_s_1 = self.sememe_sum(sememe_data)
inputs = torch.cat([word_emb, emb_s_1], dim = 2)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), 2186), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class LSTM_gate(nn.Module):
def __init__(self, config, sememe):
super(LSTM_gate, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 4 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
#self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.W_c = nn.Linear(self.in_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.W_c]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
f, i, o, o_c = torch.split(iou, iou.size(1) // 4, dim=1)
f, i, o, o_c = torch.sigmoid(f), torch.sigmoid(i), torch.sigmoid(o), torch.sigmoid(o_c)
c_telta = self.fx(inputs) + self.fh(child_h)
c_telta = torch.tanh(c_telta)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, c_telta) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c)) + torch.mul(o_c, torch.tanh(self.W_c(sememe_h)))
return (c, h)
def forward(self, inputs, length, sememe_data):
sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_h[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), 2186), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class LSTM_cell_bert_baseline(nn.Module):
def __init__(self, config, ):
super(LSTM_cell_bert_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememesumlstm = SememeSumLstm(512, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(512, self.enc_lstm_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
# 乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
# ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
# fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
self.dic_lemma = self.read_lemmatization('../../NLI/dataset/lemmatization.txt')
self.sense_tensor_dict = np.load('../../PrepareSememeDict/sense_tensor_dict.npy', allow_pickle=True).item()
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.fs, self.fx_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_c, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) # part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c) # part of memory cell induced by sememe-child
c = torch.mul(i, u) + fc + fc_s # sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, def_vec):
sememe_c, sememe_h = self.sememe_sum(def_vec)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, sememe_c.size()[2]).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, sememe_h.size()[2]).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_c[time], sememe_h[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i] - 1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(input_s[i].float())
input_sememe = torch.stack(input_sememe, dim=0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)' % (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
# return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def read_lemmatization(self, lemma_dir):
dic_lemma = {}
for line in open(lemma_dir):
line = line.strip().split()
dic_lemma[line[1]] = line[0]
return dic_lemma
def get_def_vec_by_word(self, word):
word_lower = word.lower()
if word_lower in self.dic_lemma.keys():
word_lower = self.dic_lemma[word_lower]
if word_lower in self.sense_tensor_dict.keys():
tensor_list = self.sense_tensor_dict[word_lower]
base_tensor = np.zeros(512)
for pos, tensor in tensor_list:
base_tensor = np.add(base_tensor, tensor)
base_tensor = base_tensor / float(len(tensor_list))
return base_tensor
else:
return np.zeros(512)
def get_batch(self, batch, emb_dim=300, ):
embed = np.zeros((len(batch[0]), len(batch), 300))
# sememe_data = np.zeros((len(batch[0]), len(batch), size), dtype=np.uint8)
def_data = np.zeros((len(batch[0]), len(batch), 512), dtype=np.float)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
def_data[j, i] = self.get_def_vec_by_word(batch[i][j])
return torch.from_numpy(embed).float(), torch.from_numpy(def_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False,):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], 300, )
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings) / (time.time() - tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class LSTM_cell(nn.Module):
def __init__(self, config, sememe):
super(LSTM_cell, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.fs, self.fx_s]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_c, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c) # part of memory cell induced by sememe-child
c = torch.mul(i, u) + fc + fc_s #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
sememe_c, sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, sememe_c.size()[2]).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, sememe_h.size()[2]).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_c[time], sememe_h[time], hx)
output.append(next_hx[1])
hx = next_hx
return torch.stack([output[length[i]-1][i] for i in range(len(length))], 0)
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300, size=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), size), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False, size=300):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe,300, size)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class LSTM_extra_void(nn.Module):
def __init__(self, config):
super(LSTM_extra_void, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.W_s = nn.Linear(config['sememe_size'], self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.fx, self.fh, self.W]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
emb_s = sememe_data.float().cuda()
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx)
output.append(next_hx[1])
hx = next_hx
new_output = []
new_output_2 = []
for i in range(len(length)):
hidden_old = torch.stack(output[0:length[i]], dim = 0)[:, i, :]
new_output_2.append(torch.index_select(output[length[i]-1], 0, torch.tensor(i, device = 'cuda')))
hidden = self.W(hidden_old)
emb_s_sum = emb_s[0:length[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output.append(torch.mm(att.transpose(1,0), hidden_old))
new_output = self.W_p(torch.squeeze(torch.stack(new_output, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2, dim = 0)))
new_output = torch.tanh(new_output)
return new_output
class LSTM_extra_concat(nn.Module):
def __init__(self, config):
super(LSTM_extra_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.W_s = nn.Linear(self.in_dim, self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.fx, self.fh, self.W]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, emb_s_concat):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, emb_s_concat], dim = 1)
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
emb_s = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx, emb_s[time])
output.append(next_hx[1])
hx = next_hx
new_output = []
new_output_2 = []
for i in range(len(length)):
hidden_old = torch.stack(output[0:length[i]], dim = 0)[:, i, :]
new_output_2.append(torch.index_select(output[length[i]-1], 0, torch.tensor(i, device = 'cuda')))
hidden = self.W(hidden_old)
emb_s_sum = emb_s[0:length[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output.append(torch.mm(att.transpose(1,0), hidden_old))
new_output = self.W_p(torch.squeeze(torch.stack(new_output, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2, dim = 0)))
new_output = torch.tanh(new_output)
return new_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class LSTM_extra_gate(nn.Module):
def __init__(self, config):
super(LSTM_extra_gate, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 4 * self.mem_dim)
self.ious = nn.Linear(self.in_dim, 4 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fh_s = nn.Linear(self.mem_dim, self.mem_dim)
self.fc_s = nn.Linear(self.mem_dim, self.mem_dim)
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.W_s = nn.Linear(self.in_dim, self.mem_dim)
self.W_c = nn.Linear(self.in_dim, self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fh, self.W, self.fx_s, self.fh_s, self.fc_s, self.fs]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, emb_s):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(emb_s)
f, i, o, o_c = torch.split(iou, iou.size(1) // 4, dim=1)
f, i, o, o_c = torch.sigmoid(f), torch.sigmoid(i), torch.sigmoid(o), torch.sigmoid(o_c)
c_telta = self.fx(inputs) + self.fh(child_h)
c_telta = torch.tanh(c_telta)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, c_telta) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c)) + torch.mul(o_c, torch.tanh(self.W_c(emb_s)))
return (c, h)
def forward(self, inputs, length, sememe_data):
emb_s = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], hx, emb_s[time])
output.append(next_hx[1])
hx = next_hx
new_output = []
new_output_2 = []
for i in range(len(length)):
hidden_old = torch.stack(output[0:length[i]], dim = 0)[:, i, :]
new_output_2.append(torch.index_select(output[length[i]-1], 0, torch.tensor(i, device = 'cuda')))
hidden = self.W(hidden_old)
emb_s_sum = emb_s[0:length[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output.append(torch.mm(att.transpose(1,0), hidden_old))
new_output = self.W_p(torch.squeeze(torch.stack(new_output, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2, dim = 0)))
new_output = torch.tanh(new_output)
return new_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class LSTM_extra_cell(nn.Module):
def __init__(self, config):
super(LSTM_extra_cell, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.W_s = nn.Linear(self.mem_dim, self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.iouh, self.ious, self.fx, self.fx_s, self.fs, self.fh, self.W]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, sememe_c, sememe_h, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c) # part of memory cell induced by sememe-child
c = torch.mul(i, u) + fc + fc_s #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, inputs, length, sememe_data):
# hx: (child_c, child_h)
sememe_c, sememe_h = self.sememe_sum(sememe_data)
max_time, batch_size, _ = inputs.size()
output = []
hx = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(inputs[time], sememe_c[time], sememe_h[time], hx)
output.append(next_hx[1])
hx = next_hx
new_output = []
new_output_2 = []
for i in range(len(length)):
hidden_old = torch.stack(output[0:length[i]], dim = 0)[:, i, :]
new_output_2.append(torch.index_select(output[length[i]-1], 0, torch.tensor(i, device = 'cuda')))
hidden = self.W(hidden_old)
emb_s_sum = sememe_h[0:length[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output.append(torch.mm(att.transpose(1,0), hidden_old))
new_output = self.W_p(torch.squeeze(torch.stack(new_output, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2, dim = 0)))
new_output = torch.tanh(new_output)
return new_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
class BILSTM_baseline(nn.Module):
def __init__(self, config, sememe):
super(BILSTM_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward)
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward)
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = 0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), 2186), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class BILSTM_concat(nn.Module):
def __init__(self, config, sememe):
super(BILSTM_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.fx, self.fx_b, self.fh, self.fh_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, sememe_h], dim = 1)
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, sememe_h], dim = 1)
iou = self.ioux_b(inputs) + self.iouh_b(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = 0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), 2186), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class BILSTM_gate(nn.Module):
def __init__(self, config, sememe):
super(BILSTM_gate, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 4 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 4 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.ious_b = nn.Linear(self.in_dim, 4 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.in_dim, self.mem_dim)
self.fs_b = nn.Linear(self.in_dim, self.mem_dim)
self.W_c = nn.Linear(self.in_dim, self.mem_dim)
self.W_c_b = nn.Linear(self.in_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b, self.W_c, self.W_c_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
f, i, o, o_c = torch.split(iou, iou.size(1) // 4, dim=1)
f, i, o, o_c = torch.sigmoid(f), torch.sigmoid(i), torch.sigmoid(o), torch.sigmoid(o_c)
c_telta = self.fx(inputs) + self.fh(child_h)
c_telta = torch.tanh(c_telta)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, c_telta) + fc#sum means sigma
h = torch.mul(o, torch.tanh(c)) + torch.mul(o_c, torch.tanh(self.W_c(sememe_h)))
return (c, h)
def node_backward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h) + self.ious_b(sememe_h)
f, i, o, o_c = torch.split(iou, iou.size(1) // 4, dim=1)
f, i, o, o_c = torch.sigmoid(f), torch.sigmoid(i), torch.sigmoid(o), torch.sigmoid(o_c)
c_telta = self.fx_b(inputs) + self.fh_b(child_h)
c_telta = torch.tanh(c_telta)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, c_telta) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c)) + torch.mul(o_c, torch.tanh(self.W_c_b(sememe_h)))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = 0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float().cuda(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300):
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), 2186), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class BILSTM_cell(nn.Module):
def __init__(self, config, sememe):
super(BILSTM_cell, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c)
c = torch.mul(i, u) + fc + fc_s#sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h) + self.ious_b(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
f_s_b = torch.sigmoid(
self.fs_b(sememe_h) + self.fx_s_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
fc_s = torch.mul(f_s_b, sememe_c)
c = torch.mul(i, u) + fc + fc_s #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_c, sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
sememe_c = sememe_c.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_c[time, 0:pack_length[time]], sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_c[max_time-time-1, 0:pack_length[max_time-time-1]], sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i]-1][i] for i in range(batch_size)], dim = 0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
#return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def get_batch(self, batch, sememe, emb_dim=300, size=300):
print(size)
embed = np.zeros((len(batch[0]), len(batch), 300))
sememe_data = np.zeros((len(batch[0]), len(batch), size), dtype = np.uint8)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
for k in sememe.read_word_sememe(batch[i][j]):
sememe_data[j, i, k] = 1
return torch.from_numpy(embed).float(), torch.from_numpy(sememe_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False, size=3000):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], self.sememe, 300, size)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class BILSTM_cell_bert_baseline(nn.Module):
def __init__(self, config, ):
super(BILSTM_cell_bert_baseline, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
# self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(512, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(512, self.enc_lstm_dim)
# self.sememe_dim = config['sememe_dim']
# self.sememe_size = config['sememe_size']
# self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
# self.pool_type = config['pool_type']
# 乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
# ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
# fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.reset_parameters()
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
self.dic_lemma = self.read_lemmatization('../../NLI/dataset/lemmatization.txt')
self.sense_tensor_dict = np.load('../../PrepareSememeDict/sense_tensor_dict.npy', allow_pickle=True).item()
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s,
self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h) + self.ious(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
f_s = torch.sigmoid(
self.fs(sememe_h) + self.fx_s(inputs)
)
fc = torch.mul(f, child_c) # part of memory cell induced by word-child
fc_s = torch.mul(f_s, sememe_c)
c = torch.mul(i, u) + fc + fc_s # sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_c, sememe_h):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h) + self.ious_b(sememe_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
f_s_b = torch.sigmoid(
self.fs_b(sememe_h) + self.fx_s_b(inputs)
)
fc = torch.mul(f, child_c) # part of memory cell induced by word-child
fc_s = torch.mul(f_s_b, sememe_c)
c = torch.mul(i, u) + fc + fc_s # sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, def_vecs):
# hx: (child_c, child_h)
sememe_c, sememe_h = self.sememe_sum(def_vecs)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
sememe_c = sememe_c.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype=np.int)
time_point = batch_size - 1
last_point = 0
while (True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point + 1
last_point = sent_len_sorted[time_point]
if (sent_len_sorted[time_point] == max_time):
break
time_point = time_point - 1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward,
sememe_c[time, 0:pack_length[time]], sememe_h[time, 0:pack_length[time]])
output_forward.append(
torch.cat([next_hx[1], torch.zeros([batch_size - next_hx[1].size()[0], self.mem_dim], device='cuda')],
dim=0))
if (time < max_time - 1):
hx_forward = (next_hx[0][0:pack_length[time + 1]], next_hx[1][0:pack_length[time + 1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (sent[0][0].detach().new(pack_length[max_time - 1], self.mem_dim).fill_(0.).requires_grad_(),
sent[0][0].detach().new(pack_length[max_time - 1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time - time - 1, 0:pack_length[max_time - time - 1]], hx_backward,
sememe_c[max_time - time - 1, 0:pack_length[max_time - time - 1]],
sememe_h[max_time - time - 1, 0:pack_length[max_time - time - 1]])
output_backward[max_time - time - 1] = torch.cat(
[next_hx[1], torch.zeros([batch_size - next_hx[1].size()[0], self.mem_dim], device='cuda')], dim=0)
if (time < max_time - 1):
hx_backward = (torch.cat([next_hx[0], torch.zeros(
[pack_length[max_time - time - 2] - next_hx[0].size()[0], self.mem_dim]).cuda()], dim=0), \
torch.cat([next_hx[1], torch.zeros(
[pack_length[max_time - time - 2] - next_hx[1].size()[0], self.mem_dim]).cuda()],
dim=0))
a = torch.stack(output_forward, dim=0)
b = torch.stack(output_backward, dim=0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
final_output_forward = torch.stack([sent_output_forward[sent_len[i] - 1][i] for i in range(batch_size)], dim=0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim=1)
return final_output
def sememe_sum(self, input_s):
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(input_s[i].float())
input_sememe = torch.stack(input_sememe, dim=0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)' % (len(self.word_vec), len(new_word_vec)))
'''
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
'''
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return True
# return self.enc_lstm.bias_hh_l0.data.is_cuda
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def read_lemmatization(self, lemma_dir):
dic_lemma = {}
for line in open(lemma_dir):
line = line.strip().split()
dic_lemma[line[1]] = line[0]
return dic_lemma
def get_def_vec_by_word(self, word):
word_lower = word.lower()
if word_lower in self.dic_lemma.keys():
word_lower = self.dic_lemma[word_lower]
if word_lower in self.sense_tensor_dict.keys():
tensor_list = self.sense_tensor_dict[word_lower]
base_tensor = np.zeros(512)
for pos, tensor in tensor_list:
base_tensor = np.add(base_tensor, tensor)
base_tensor = base_tensor / float(len(tensor_list))
return base_tensor
else:
return np.zeros(512)
def get_batch(self, batch, emb_dim=300, ):
embed = np.zeros((len(batch[0]), len(batch), 300))
# sememe_data = np.zeros((len(batch[0]), len(batch), size), dtype=np.uint8)
def_data = np.zeros((len(batch[0]), len(batch), 512), dtype=np.float)
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
def_data[j, i] = self.get_def_vec_by_word(batch[i][j])
return torch.from_numpy(embed).float(), torch.from_numpy(def_data).cuda()
def encode(self, sentences, bsize=64, tokenize=True, verbose=False, size=3000):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch, batch_s = self.get_batch(sentences[stidx:stidx + bsize], 300)
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward(batch, lengths[stidx:stidx + bsize], batch_s).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings) / (time.time() - tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
class BILSTM_extra_void(nn.Module):
def __init__(self, config):
super(BILSTM_extra_void, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.W_s = nn.Linear(config['sememe_size'], self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.W_s_b = nn.Linear(config['sememe_size'], self.mem_dim)
self.W_b = nn.Linear(self.mem_dim, self.mem_dim)
self.query_b = nn.Embedding(2*self.mem_dim, 1)
self.W_p_b = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x_b = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
self.sememe = sememe
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
emb_s = sememe_data.float().cuda()
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward)
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (inputs[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward)
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
new_output_forward = []
new_output_2_forward = []
new_output_backward = []
for i in range(len(sent_len)):
hidden_old_forward = sent_output_forward[0:sent_len[i], i, :]
new_output_2_forward.append(sent_output_forward[sent_len[i]-1, i])
hidden = self.W(hidden_old_forward)
emb_s_sum = emb_s[0:sent_len[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output_forward.append(torch.mm(att.transpose(1,0), hidden_old_forward))
new_output_forward = self.W_p(torch.squeeze(torch.stack(new_output_forward, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2_forward, dim = 0)))
new_output_forward = torch.tanh(new_output_forward)
for i in range(len(sent_len)):
hidden_old_backward = sent_output_backward[0:sent_len[i], i, :]
hidden = self.W_b(hidden_old_backward)
emb_s_sum = emb_s[0:sent_len[i], i, :]
emb_s_sum = self.W_s_b(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query_b.weight))
new_output_backward.append(torch.mm(att.transpose(1,0), hidden_old_backward))
new_output_backward = self.W_p_b(torch.squeeze(torch.stack(new_output_backward, dim = 0))) + self.W_x_b(sent_output_backward[0])
new_output_backward = torch.tanh(new_output_backward)
final_output = torch.cat([new_output_forward, new_output_backward], dim = 1)
return final_output
class BILSTM_extra_concat(nn.Module):
def __init__(self, config):
super(BILSTM_extra_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.W_s = nn.Linear(self.in_dim, self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.W_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.W_b = nn.Linear(self.mem_dim, self.mem_dim)
self.query_b = nn.Embedding(2*self.mem_dim, 1)
self.W_p_b = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x_b = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.fx, self.fx_b, self.fh, self.fh_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, sememe_h], dim = 1)
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, sememe_h], dim = 1)
iou = self.ioux_b(inputs) + self.iouh_b(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1],
|
np.argsort(-sent_len)
|
numpy.argsort
|
# %%
import numpy as np
import os
from numpy import genfromtxt
import math
import matplotlib.pyplot as plt
import os.path
import csv
from math import pi
#%%
#cartesian and polar conversion
#send cartesian and polar coordinate as
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return np.array([rho,phi])
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return
|
np.array([x,y])
|
numpy.array
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.sparse import rand
from scipy.sparse.linalg import lsqr
from pylops.utils import dottest
from pylops.basicoperators import Regression, LinearRegression, MatrixMult, \
Identity, Zero, Flip, Symmetrize, Roll, Sum, Real, Imag, Conj
par1 = {'ny': 11, 'nx': 11, 'imag': 0,
'dtype':'float64'} # square real
par2 = {'ny': 21, 'nx': 11, 'imag': 0,
'dtype':'float64'} # overdetermined real
par1j = {'ny': 11, 'nx': 11, 'imag': 1j,
'dtype':'complex128'} # square complex
par2j = {'ny': 21, 'nx': 11, 'imag': 1j,
'dtype':'complex128'} # overdetermined complex
par3 = {'ny': 11, 'nx': 21, 'imag': 0,
'dtype':'float64'} # underdetermined real
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Regression(par):
"""Dot-test, inversion and apply for Regression operator
"""
np.random.seed(10)
order = 4
t = np.arange(par['ny'], dtype=np.float32)
LRop = Regression(t, order=order, dtype=par['dtype'])
assert dottest(LRop, par['ny'], order+1)
x = np.array([1., 2., 0., 3., -1.], dtype=np.float32)
xlsqr = lsqr(LRop, LRop*x, damp=1e-10, iter_lim=300, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
y = LRop * x
y1 = LRop.apply(t, x)
assert_array_almost_equal(y, y1, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_LinearRegression(par):
"""Dot-test and inversion for LinearRegression operator
"""
np.random.seed(10)
t = np.arange(par['ny'], dtype=np.float32)
LRop = LinearRegression(t, dtype=par['dtype'])
assert dottest(LRop, par['ny'], 2)
x = np.array([1., 2.], dtype=np.float32)
xlsqr = lsqr(LRop, LRop*x, damp=1e-10, iter_lim=300, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
y = LRop * x
y1 = LRop.apply(t, x)
assert_array_almost_equal(y, y1, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_MatrixMult(par):
"""Dot-test and inversion for MatrixMult operator
"""
np.random.seed(10)
G = np.random.normal(0, 10, (par['ny'],
par['nx'])).astype('float32') + \
par['imag']*np.random.normal(0, 10, (par['ny'],
par['nx'])).astype('float32')
Gop = MatrixMult(G, dtype=par['dtype'])
assert dottest(Gop, par['ny'], par['nx'],
complexflag=0 if par['imag'] == 0 else 3)
x = np.ones(par['nx']) + par['imag']*np.ones(par['nx'])
xlsqr = lsqr(Gop, Gop*x, damp=1e-20, iter_lim=300, show=0)[0]
|
assert_array_almost_equal(x, xlsqr, decimal=4)
|
numpy.testing.assert_array_almost_equal
|
#!/usr/bin/env python
# coding=utf-8
"""
Script for rescaling space heating peak load day (related to robust rescaling
in pyCity_opt
"""
from __future__ import division
import copy
import numpy as np
import matplotlib.pyplot as plt
def resc_sh_peak_load(loadcurve, timestep, resc_factor, span=1):
"""
Rescales space heating peak load day with given rescaling factor.
Assuming that loadcurve represents one year.
Parameters
----------
loadcurve : array
Space heating load curve in Watt
timestep : int
Timestep in seconds
resc_factor : float
Rescaling factor for peak load day (e.g. 2 means, that every space
heating power at peak load day are going to rescaled with factor 2)
span : int, optional
Timespan in hours, defining peak load period (default: 1).
Returns
-------
mod_loadcurve : array
Rescaled loadcurve
"""
assert resc_factor > 0
assert isinstance(span, int)
assert span > 0
# Get original space heating demand value
sp_dem_org = sum(loadcurve) * timestep / (3600 * 1000)
# Identify peak load timestep
idx_max =
|
np.argmax(loadcurve)
|
numpy.argmax
|
# 2021-03 : Initial code [<NAME>, IGE-CNRS]
#====================================================================================================
import numpy as np
import sys
#====================================================================================================
def grid_bounds_oce(region='Amundsen'):
""" Gives minimum and maximum longitude and latitude for the common MISOMIP2 ocean grid
region: 'Amundsen' (default), 'WeddellShelf', 'WeddellGyre'
exemple: [lonmin,lonmax,latmin,latmax] = grid_bounds_oce(region='Amundsen')
"""
if ( region == 'Amundsen' ):
longitude_min = -140.0
longitude_max = -90.0
latitude_min = -76.0
latitude_max = -69.0
elif ( region == 'WeddellShelf' ):
longitude_min = -90.0
longitude_max = -15.0
latitude_min = -85.0
latitude_max = -69.0
elif ( region == 'WeddellGyre' ):
longitude_min = -90.0
longitude_max = 30.0
latitude_min = -85.0
latitude_max = -60.0
else:
sys.exit("~!@#$%^* error : region is not defined, choose either 'Amundsen' or 'Weddell'")
return [longitude_min,longitude_max,latitude_min,latitude_max]
#====================================================================================================
def generate_3d_grid_oce(region='Amundsen'):
"""Generates (longitude, latitude, depth) of the common MISOMIP2 3d ocean grid
region: 'Amundsen' (default), 'WeddellShelf', 'WeddellGyre'
exemple: [lon,lat,depth]=generate_3d_grid_oce(region='Amundsen')
"""
[lonmin,lonmax,latmin,latmax] = grid_bounds_oce(region=region)
if ( region == 'Amundsen' ):
longitude=np.arange(lonmin,lonmax+0.1,0.1)
latitude=np.arange(latmin,latmax+1./30.,1./30.)
depth=np.array([0., 100., 200., 300., 400., 500., 600., 700., 800., 900., 1000., 1500.])
elif ( region == 'WeddellShelf' ):
longitude=np.arange(lonmin,lonmax+0.15,0.15)
latitude=np.arange(latmin,latmax+1./20.,1./20.)
depth=np.array([0., 100., 200., 300., 400., 500., 600., 700., 800., 900., 1000., 1500.])
elif ( region == 'WeddellGyre' ):
longitude=np.arange(lonmin,lonmax+0.5,0.5)
latitude=np.arange(latmin,latmax+1./6.,1./6.)
depth=np.array([0., 100., 200., 300., 400., 500., 600., 700., 800., 900., 1000., 1500.])
else:
sys.exit("~!@#$%^* error : region is not defined, choose either 'Amundsen' or 'WeddellShelf' or 'WeddellGyre'")
return [longitude,latitude,depth]
#====================================================================================================
def generate_section_grid_oce(region='Amundsen',section=1):
"""Generates (longitude, latitude, depth) of the common MISOMIP2 ocean section
region: 'Amundsen' (default), 'Weddell'
section: 1 (default) -> Pine Island Trough for Amundsen
-> xxxxxxxxxxxxxxxxxx for Weddell
2 -> Dotson Trough for Amundsen
exemple: [lon,lat,depth]=generate_section_grid_oce(region='Amundsen')
"""
if ( region == 'Amundsen' & section == 1 ):
longitude=np.array([ -97.643 , -97.994 , -98.344 , -98.694 , -99.045 , -99.395 , -99.746, -100.096,
-100.446, -100.797, -101.147, -101.497, -101.847, -102.056, -102.264, -102.473,
-102.681, -102.89 , -103.098, -103.306, -103.515, -103.723, -103.932, -104.069,
-104.150, -104.231, -104.311, -104.392, -104.472, -104.553, -104.633, -104.714,
-104.795, -104.875, -105.005, -105.147, -105.288, -105.430, -105.572, -105.713,
-105.855, -105.997, -106.138, -106.280, -106.349, -106.371, -106.393, -106.416,
-106.438, -106.46 , -106.483, -106.505, -106.528, -106.550, -106.572, -106.595,
-106.617, -106.639, -106.662, -106.684, -106.707, -106.716, -106.687, -106.659,
-106.630, -106.602, -106.573, -106.545, -106.516, -106.488, -106.460, -106.431,
-106.403, -106.374, -106.330, -106.230, -106.130, -106.030, -105.930, -105.830,
-105.730, -105.63 , -105.530, -105.430, -105.330, -105.230, -105.130, -105.030,
-104.942, -104.873, -104.803, -104.733, -104.663, -104.593, -104.523, -104.454,
-104.384, -104.314, -104.244, -104.174, -104.104, -104.017, -103.929, -103.841,
-103.753, -103.665, -103.578, -103.490, -103.402, -103.314, -103.226, -103.138,
-103.050, -103.003, -102.963, -102.923, -102.883, -102.843, -102.803, -102.763,
-102.724, -102.684, -102.644, -102.604, -102.563, -102.518, -102.472, -102.427,
-102.382, -102.338, -102.294, -102.251, -102.208, -102.164, -102.121, -102.104,
-102.093, -102.082, -102.071, -102.059, -102.048, -102.037, -102.026, -102.014,
-102.003, -101.992, -101.981, -101.969, -101.958, -101.947, -101.936, -101.942,
-101.951, -101.96 , -101.969, -101.978, -101.987, -101.996, -102.005, -102.015,
-102.024, -102.033, -102.042, -102.051, -102.060 ])
latitude=np.arange(-75.5+1./30.,-70.0+1./30.,1./30.)
depth=np.arange(10.,1510.,10.)
elif ( region == 'Amundsen' & section == 2 ):
longitude=np.array([ -114.313, -114.127, -113.94 , -113.753, -113.567, -113.380, -113.193, -113.058,
-112.975, -112.892, -112.808, -112.725, -112.642, -112.575, -112.525, -112.475,
-112.425, -112.375, -112.325, -112.318, -112.353, -112.389, -112.424, -112.460,
-112.495, -112.538, -112.587, -112.635, -112.684, -112.733, -112.781, -112.830,
-112.878, -112.927, -112.975, -113.024, -113.079, -113.177, -113.275, -113.373,
-113.471, -113.569, -113.667, -113.765, -113.863, -113.961, -114.076, -114.208,
-114.340, -114.472, -114.604, -114.735, -114.867, -114.999, -115.123, -115.247,
-115.371, -115.495, -115.619, -115.743, -115.867, -115.991, -116.115, -116.239,
-116.363, -116.487, -116.580, -116.669, -116.758, -116.847, -116.936, -117.025,
-117.114, -117.203, -117.292, -117.381, -117.470, -117.559, -117.648, -117.730,
-117.785, -117.840, -117.896, -117.951, -118.006, -118.061, -118.117, -118.172,
-118.227, -118.282, -118.338, -118.393, -118.448 ])
latitude=np.arange(-75.05+1./30.,-71.95+1./30.,1./30.)
depth=np.arange(10.,1510.,10.)
elif ( region == 'WeddellShelf' ):
longitude=np.array([-45.,-46.]) # to update
latitude=np.array([-80.,-70.]) # to update
depth=np.arange(10.,1510.,10.) # to update
elif ( region == 'WeddellGyre' ):
longitude=np.array([-45.,-46.]) # to update
latitude=np.array([-80.,-70.]) # to update
depth=np.arange(10.,1510.,10.) # to update
else:
sys.exit("~!@#$%^* error : region is not defined, choose either 'Amundsen' or 'Weddell'")
if ( np.size(latitude) != np.size(longitude) ):
sys.exit("~!@#$%^* error : section must be defined with equal longitude and latitude values")
return [longitude,latitude,depth]
#====================================================================================================
def generate_mooring_grid_oce(region='Amundsen'):
"""Generates (longitude, latitude, depth) of the common MISOMIP2 mooring
region: 'Amundsen' (default), 'WeddellShelf', 'WeddellGyre'
exemple: [lon,lat,depth]=generate_mooring_grid_oce(region='Amundsen')
"""
if ( region == 'Amundsen' ):
longitude=np.array([ -102. ])
latitude=np.array([ -75. ])
depth=np.arange(10.,1210.,10.)
elif ( region == 'WeddellShelf' ):
longitude=np.array([ -45. ]) # to update
latitude=np.array([ -76. ]) # to update
depth=np.arange(10.,1210.,10.) # to update
elif ( region == 'WeddellGyre' ):
longitude=np.array([ -45. ]) # to update
latitude=np.array([ -76. ]) # to update
depth=
|
np.arange(10.,1210.,10.)
|
numpy.arange
|
from simulator.util.World import World
from simulator.util.Camera import Camera
import cv2
import os
import numpy as np
import time
from config import Config
class GUI:
mouse = (0,0, 0)
mouse_world = (0,0,0)
@staticmethod
def mouse_listener(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
pass
elif event == cv2.EVENT_LBUTTONUP:
pass
GUI.mouse = (x,y, event)
@staticmethod
def mouse_on_world( mouse, camera):
"""
Don't use the GUI.mouse variable....or maybe use it???
http://antongerdelan.net/opengl/raycasting.html
Given the mouse click, find the 3D point on the ground plane
"""
mouse_homogeneous = np.array([[mouse[0],mouse[1],1,1]]).T
ray_eye =
|
np.linalg.inv(camera.K)
|
numpy.linalg.inv
|
import os
import numpy
from deeprank.models.variant import PdbVariantSelection
from deeprank.tools import sparse
from deeprank.domain.amino_acid import amino_acids
def get_variant_group_name(variant):
"""
Args:
variant (PdbVariantSelection): a variant object
Returns (str): an unique name for a given variant object
"""
mol_name = str(variant)
return "%s-%s" % (mol_name, str(hash(variant)).replace('-', 'm'))
def store_variant(variant_group, variant):
""" Stores the variant in the HDF5 variant group
Args:
variant_group (HDF5 group): the group belonging to the variant selection
variant (PdbVariantSelection): the variant object
"""
variant_group.attrs['pdb_ac'] = variant.pdb_ac
variant_group.attrs['variant_chain_id'] = variant.chain_id
variant_group.attrs['variant_residue_number'] = variant.residue_number
if variant.insertion_code is not None:
variant_group.attrs['variant_insertion_code'] = variant.insertion_code
variant_group.attrs['variant_amino_acid_name'] = variant.variant_amino_acid.name
variant_group.attrs['wild_type_amino_acid_name'] = variant.wild_type_amino_acid.name
if variant.protein_accession is not None and variant.protein_residue_number is not None:
variant_group.attrs['variant_protein_accession'] = variant.protein_accession
variant_group.attrs['variant_protein_residue_number'] = variant.protein_residue_number
def load_variant(variant_group):
""" Loads the variant from the HDF5 variant group
Args:
variant_group (HDF5 group): the group belonging to the variant selection
Returns (PdbVariantSelection): the variant object
"""
pdb_ac = variant_group.attrs['pdb_ac']
chain_id = str(variant_group.attrs['variant_chain_id'])
residue_number = int(variant_group.attrs['variant_residue_number'])
if 'variant_insertion_code' in variant_group.attrs:
insertion_code = str(variant_group.attrs['variant_insertion_code'])
else:
insertion_code = None
if 'variant_protein_accession' in variant_group.attrs and 'variant_protein_residue_number' in variant_group.attrs:
protein_accession = str(variant_group.attrs['variant_protein_accession'])
protein_residue_number = int(variant_group.attrs['variant_protein_residue_number'])
else:
protein_accession = None
protein_residue_number = None
amino_acids_by_name = {amino_acid.name: amino_acid for amino_acid in amino_acids}
variant_amino_acid = amino_acids_by_name[variant_group.attrs['variant_amino_acid_name']]
wild_type_amino_acid = amino_acids_by_name[variant_group.attrs['wild_type_amino_acid_name']]
variant = PdbVariantSelection(pdb_ac, chain_id, residue_number, wild_type_amino_acid, variant_amino_acid, insertion_code=insertion_code,
protein_accession=protein_accession, protein_residue_number=protein_residue_number)
return variant
def store_grid_center(variant_group, center):
""" Stores the center position in the HDF5 variant group
Args:
variant_group (HDF5 group): the group belonging to the variant selection
center (float, float, float): xyz position of the center
"""
grid_group = variant_group.require_group("grid_points")
if 'center' in grid_group:
del(grid_group['center'])
grid_group.create_dataset('center', data=center, compression='lzf', chunks=True)
def load_grid_center(variant_group):
""" Loads the center position from the HDF5 variant group
Args:
variant_group (HDF5 group): the group belonging to the variant selection
Returns (float, float, float): xyz position of the center
"""
grid_group = variant_group['grid_points']
return numpy.array(grid_group['center'])
def store_grid_points(variant_group, x_coords, y_coords, z_coords):
""" Stores the grid point coordinates in the HDF5 variant group
Args:
variant_group (HDF5 group): the group belonging to the variant selection
x_coords (list(float)): the x coords of the grid points
y_coords (list(float)): the y coords of the grid points
z_coords (list(float)): the z coords of the grid points
"""
grid_group = variant_group.require_group("grid_points")
for coord in ['x', 'y', 'z']:
if coord in grid_group:
del(grid_group[coord])
grid_group.create_dataset('x', data=x_coords, compression='lzf', chunks=True)
grid_group.create_dataset('y', data=y_coords, compression='lzf', chunks=True)
grid_group.create_dataset('z', data=z_coords, compression='lzf', chunks=True)
def load_grid_points(variant_group):
""" Loads the grid point coordinates from the HDF5 variant group
Args:
variant_group (HDF5 group): the group belonging to the variant selection
Returns (list(float), list(float), list(float)): the x, y and z coordinates of the grid points
"""
grid_group = variant_group['grid_points']
x_coords = numpy.array(grid_group['x'])
y_coords =
|
numpy.array(grid_group['y'])
|
numpy.array
|
import numpy
import math
from scipy import optimize, interpolate, signal, stats, ndimage
import scipy
import re
import datetime
import copy
import sys
import importlib
import itertools
from multiprocessing import Pool, TimeoutError
from multiprocessing.pool import ThreadPool
import time
from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
from .jroproc_base import ProcessingUnit, Operation, MPDecorator
from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
from scipy import asarray as ar,exp
from scipy.optimize import curve_fit
from schainpy.utils import log
import warnings
from numpy import NaN
from scipy.optimize.optimize import OptimizeWarning
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
SPEED_OF_LIGHT = 299792458
'''solving pickling issue'''
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
class ParametersProc(ProcessingUnit):
METHODS = {}
nSeconds = None
def __init__(self):
ProcessingUnit.__init__(self)
# self.objectDict = {}
self.buffer = None
self.firstdatatime = None
self.profIndex = 0
self.dataOut = Parameters()
self.setupReq = False #Agregar a todas las unidades de proc
def __updateObjFromInput(self):
self.dataOut.inputUnit = self.dataIn.type
self.dataOut.timeZone = self.dataIn.timeZone
self.dataOut.dstFlag = self.dataIn.dstFlag
self.dataOut.errorCount = self.dataIn.errorCount
self.dataOut.useLocalTime = self.dataIn.useLocalTime
self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
self.dataOut.channelList = self.dataIn.channelList
self.dataOut.heightList = self.dataIn.heightList
self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
# self.dataOut.nHeights = self.dataIn.nHeights
# self.dataOut.nChannels = self.dataIn.nChannels
# self.dataOut.nBaud = self.dataIn.nBaud
# self.dataOut.nCode = self.dataIn.nCode
# self.dataOut.code = self.dataIn.code
# self.dataOut.nProfiles = self.dataOut.nFFTPoints
self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
# self.dataOut.utctime = self.firstdatatime
self.dataOut.utctime = self.dataIn.utctime
self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
self.dataOut.nCohInt = self.dataIn.nCohInt
# self.dataOut.nIncohInt = 1
# self.dataOut.ippSeconds = self.dataIn.ippSeconds
# self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
self.dataOut.timeInterval1 = self.dataIn.timeInterval
self.dataOut.heightList = self.dataIn.heightList
self.dataOut.frequency = self.dataIn.frequency
# self.dataOut.noise = self.dataIn.noise
def run(self):
#---------------------- Voltage Data ---------------------------
if self.dataIn.type == "Voltage":
self.__updateObjFromInput()
self.dataOut.data_pre = self.dataIn.data.copy()
self.dataOut.flagNoData = False
self.dataOut.utctimeInit = self.dataIn.utctime
self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
if hasattr(self.dataIn, 'dataPP_POW'):
self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
if hasattr(self.dataIn, 'dataPP_POWER'):
self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
if hasattr(self.dataIn, 'dataPP_DOP'):
self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
if hasattr(self.dataIn, 'dataPP_SNR'):
self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
if hasattr(self.dataIn, 'dataPP_WIDTH'):
self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
return
#---------------------- Spectra Data ---------------------------
if self.dataIn.type == "Spectra":
self.dataOut.data_pre = (self.dataIn.data_spc, self.dataIn.data_cspc)
self.dataOut.data_spc = self.dataIn.data_spc
self.dataOut.data_cspc = self.dataIn.data_cspc
self.dataOut.nProfiles = self.dataIn.nProfiles
self.dataOut.nIncohInt = self.dataIn.nIncohInt
self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
self.dataOut.ippFactor = self.dataIn.ippFactor
self.dataOut.abscissaList = self.dataIn.getVelRange(1)
self.dataOut.spc_noise = self.dataIn.getNoise()
self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
# self.dataOut.normFactor = self.dataIn.normFactor
self.dataOut.pairsList = self.dataIn.pairsList
self.dataOut.groupList = self.dataIn.pairsList
self.dataOut.flagNoData = False
if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
self.dataOut.ChanDist = self.dataIn.ChanDist
else: self.dataOut.ChanDist = None
#if hasattr(self.dataIn, 'VelRange'): #Velocities range
# self.dataOut.VelRange = self.dataIn.VelRange
#else: self.dataOut.VelRange = None
if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
self.dataOut.RadarConst = self.dataIn.RadarConst
if hasattr(self.dataIn, 'NPW'): #NPW
self.dataOut.NPW = self.dataIn.NPW
if hasattr(self.dataIn, 'COFA'): #COFA
self.dataOut.COFA = self.dataIn.COFA
#---------------------- Correlation Data ---------------------------
if self.dataIn.type == "Correlation":
acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
self.dataOut.groupList = (acf_pairs, ccf_pairs)
self.dataOut.abscissaList = self.dataIn.lagRange
self.dataOut.noise = self.dataIn.noise
self.dataOut.data_snr = self.dataIn.SNR
self.dataOut.flagNoData = False
self.dataOut.nAvg = self.dataIn.nAvg
#---------------------- Parameters Data ---------------------------
if self.dataIn.type == "Parameters":
self.dataOut.copy(self.dataIn)
self.dataOut.flagNoData = False
return True
self.__updateObjFromInput()
self.dataOut.utctimeInit = self.dataIn.utctime
self.dataOut.paramInterval = self.dataIn.timeInterval
return
def target(tups):
obj, args = tups
return obj.FitGau(args)
class SpectralFilters(Operation):
'''This class allows the Rainfall / Wind Selection for CLAIRE RADAR
LimitR : It is the limit in m/s of Rainfall
LimitW : It is the limit in m/s for Winds
Input:
self.dataOut.data_pre : SPC and CSPC
self.dataOut.spc_range : To select wind and rainfall velocities
Affected:
self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
self.dataOut.spcparam_range : Used in SpcParamPlot
self.dataOut.SPCparam : Used in PrecipitationProc
'''
def __init__(self):
Operation.__init__(self)
self.i=0
def run(self, dataOut, PositiveLimit=1.5, NegativeLimit=2.5):
#Limite de vientos
LimitR = PositiveLimit
LimitN = NegativeLimit
self.spc = dataOut.data_pre[0].copy()
self.cspc = dataOut.data_pre[1].copy()
self.Num_Hei = self.spc.shape[2]
self.Num_Bin = self.spc.shape[1]
self.Num_Chn = self.spc.shape[0]
VelRange = dataOut.spc_range[2]
TimeRange = dataOut.spc_range[1]
FrecRange = dataOut.spc_range[0]
Vmax= 2*numpy.max(dataOut.spc_range[2])
Tmax= 2*numpy.max(dataOut.spc_range[1])
Fmax= 2*numpy.max(dataOut.spc_range[0])
Breaker1R=VelRange[numpy.abs(VelRange-(-LimitN)).argmin()]
Breaker1R=numpy.where(VelRange == Breaker1R)
Delta = self.Num_Bin/2 - Breaker1R[0]
'''Reacomodando SPCrange'''
VelRange=numpy.roll(VelRange,-(int(self.Num_Bin/2)) ,axis=0)
VelRange[-(int(self.Num_Bin/2)):]+= Vmax
FrecRange=numpy.roll(FrecRange,-(int(self.Num_Bin/2)),axis=0)
FrecRange[-(int(self.Num_Bin/2)):]+= Fmax
TimeRange=numpy.roll(TimeRange,-(int(self.Num_Bin/2)),axis=0)
TimeRange[-(int(self.Num_Bin/2)):]+= Tmax
''' ------------------ '''
Breaker2R=VelRange[numpy.abs(VelRange-(LimitR)).argmin()]
Breaker2R=numpy.where(VelRange == Breaker2R)
SPCroll = numpy.roll(self.spc,-(int(self.Num_Bin/2)) ,axis=1)
SPCcut = SPCroll.copy()
for i in range(self.Num_Chn):
SPCcut[i,0:int(Breaker2R[0]),:] = dataOut.noise[i]
SPCcut[i,-int(Delta):,:] = dataOut.noise[i]
SPCcut[i]=SPCcut[i]- dataOut.noise[i]
SPCcut[ numpy.where( SPCcut<0 ) ] = 1e-20
SPCroll[i]=SPCroll[i]-dataOut.noise[i]
SPCroll[ numpy.where( SPCroll<0 ) ] = 1e-20
SPC_ch1 = SPCroll
SPC_ch2 = SPCcut
SPCparam = (SPC_ch1, SPC_ch2, self.spc)
dataOut.SPCparam = numpy.asarray(SPCparam)
dataOut.spcparam_range=numpy.zeros([self.Num_Chn,self.Num_Bin+1])
dataOut.spcparam_range[2]=VelRange
dataOut.spcparam_range[1]=TimeRange
dataOut.spcparam_range[0]=FrecRange
return dataOut
class GaussianFit(Operation):
'''
Function that fit of one and two generalized gaussians (gg) based
on the PSD shape across an "power band" identified from a cumsum of
the measured spectrum - noise.
Input:
self.dataOut.data_pre : SelfSpectra
Output:
self.dataOut.SPCparam : SPC_ch1, SPC_ch2
'''
def __init__(self):
Operation.__init__(self)
self.i=0
def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
"""This routine will find a couple of generalized Gaussians to a power spectrum
input: spc
output:
Amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1,noise
"""
self.spc = dataOut.data_pre[0].copy()
self.Num_Hei = self.spc.shape[2]
self.Num_Bin = self.spc.shape[1]
self.Num_Chn = self.spc.shape[0]
Vrange = dataOut.abscissaList
GauSPC = numpy.empty([self.Num_Chn,self.Num_Bin,self.Num_Hei])
SPC_ch1 = numpy.empty([self.Num_Bin,self.Num_Hei])
SPC_ch2 = numpy.empty([self.Num_Bin,self.Num_Hei])
SPC_ch1[:] = numpy.NaN
SPC_ch2[:] = numpy.NaN
start_time = time.time()
noise_ = dataOut.spc_noise[0].copy()
pool = Pool(processes=self.Num_Chn)
args = [(Vrange, Ch, pnoise, noise_, num_intg, SNRlimit) for Ch in range(self.Num_Chn)]
objs = [self for __ in range(self.Num_Chn)]
attrs = list(zip(objs, args))
gauSPC = pool.map(target, attrs)
dataOut.SPCparam = numpy.asarray(SPCparam)
''' Parameters:
1. Amplitude
2. Shift
3. Width
4. Power
'''
def FitGau(self, X):
Vrange, ch, pnoise, noise_, num_intg, SNRlimit = X
SPCparam = []
SPC_ch1 = numpy.empty([self.Num_Bin,self.Num_Hei])
SPC_ch2 = numpy.empty([self.Num_Bin,self.Num_Hei])
SPC_ch1[:] = 0#numpy.NaN
SPC_ch2[:] = 0#numpy.NaN
for ht in range(self.Num_Hei):
spc = numpy.asarray(self.spc)[ch,:,ht]
#############################################
# normalizing spc and noise
# This part differs from gg1
spc_norm_max = max(spc)
#spc = spc / spc_norm_max
pnoise = pnoise #/ spc_norm_max
#############################################
fatspectra=1.0
wnoise = noise_ #/ spc_norm_max
#wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using <NAME>, only wnoise is used
#if wnoise>1.1*pnoise: # to be tested later
# wnoise=pnoise
noisebl=wnoise*0.9;
noisebh=wnoise*1.1
spc=spc-wnoise
minx=numpy.argmin(spc)
#spcs=spc.copy()
spcs=numpy.roll(spc,-minx)
cum=numpy.cumsum(spcs)
tot_noise=wnoise * self.Num_Bin #64;
snr = sum(spcs)/tot_noise
snrdB=10.*numpy.log10(snr)
if snrdB < SNRlimit :
snr = numpy.NaN
SPC_ch1[:,ht] = 0#numpy.NaN
SPC_ch1[:,ht] = 0#numpy.NaN
SPCparam = (SPC_ch1,SPC_ch2)
continue
#if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
# return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
cummax=max(cum);
epsi=0.08*fatspectra # cumsum to narrow down the energy region
cumlo=cummax*epsi;
cumhi=cummax*(1-epsi)
powerindex=numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
if len(powerindex) < 1:# case for powerindex 0
continue
powerlo=powerindex[0]
powerhi=powerindex[-1]
powerwidth=powerhi-powerlo
firstpeak=powerlo+powerwidth/10.# first gaussian energy location
secondpeak=powerhi-powerwidth/10.#second gaussian energy location
midpeak=(firstpeak+secondpeak)/2.
firstamp=spcs[int(firstpeak)]
secondamp=spcs[int(secondpeak)]
midamp=spcs[int(midpeak)]
x=numpy.arange( self.Num_Bin )
y_data=spc+wnoise
''' single Gaussian '''
shift0=numpy.mod(midpeak+minx, self.Num_Bin )
width0=powerwidth/4.#Initialization entire power of spectrum divided by 4
power0=2.
amplitude0=midamp
state0=[shift0,width0,amplitude0,power0,wnoise]
bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
lsq1=fmin_l_bfgs_b(self.misfit1,state0,args=(y_data,x,num_intg),bounds=bnds,approx_grad=True)
chiSq1=lsq1[1];
if fatspectra<1.0 and powerwidth<4:
choice=0
Amplitude0=lsq1[0][2]
shift0=lsq1[0][0]
width0=lsq1[0][1]
p0=lsq1[0][3]
Amplitude1=0.
shift1=0.
width1=0.
p1=0.
noise=lsq1[0][4]
#return (numpy.array([shift0,width0,Amplitude0,p0]),
# numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
''' two gaussians '''
#shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
shift0=numpy.mod(firstpeak+minx, self.Num_Bin );
shift1=numpy.mod(secondpeak+minx, self.Num_Bin )
width0=powerwidth/6.;
width1=width0
power0=2.;
power1=power0
amplitude0=firstamp;
amplitude1=secondamp
state0=[shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
#bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
#bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
chiSq2=lsq2[1];
oneG=(chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
if oneG:
choice=0
else:
w1=lsq2[0][1]; w2=lsq2[0][5]
a1=lsq2[0][2]; a2=lsq2[0][6]
p1=lsq2[0][3]; p2=lsq2[0][7]
s1=(2**(1+1./p1))*scipy.special.gamma(1./p1)/p1;
s2=(2**(1+1./p2))*scipy.special.gamma(1./p2)/p2;
gp1=a1*w1*s1; gp2=a2*w2*s2 # power content of each ggaussian with proper p scaling
if gp1>gp2:
if a1>0.7*a2:
choice=1
else:
choice=2
elif gp2>gp1:
if a2>0.7*a1:
choice=2
else:
choice=1
else:
choice=numpy.argmax([a1,a2])+1
#else:
#choice=argmin([std2a,std2b])+1
else: # with low SNR go to the most energetic peak
choice=numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
shift0=lsq2[0][0];
vel0=Vrange[0] + shift0*(Vrange[1]-Vrange[0])
shift1=lsq2[0][4];
vel1=Vrange[0] + shift1*(Vrange[1]-Vrange[0])
max_vel = 1.0
#first peak will be 0, second peak will be 1
if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range
shift0=lsq2[0][0]
width0=lsq2[0][1]
Amplitude0=lsq2[0][2]
p0=lsq2[0][3]
shift1=lsq2[0][4]
width1=lsq2[0][5]
Amplitude1=lsq2[0][6]
p1=lsq2[0][7]
noise=lsq2[0][8]
else:
shift1=lsq2[0][0]
width1=lsq2[0][1]
Amplitude1=lsq2[0][2]
p1=lsq2[0][3]
shift0=lsq2[0][4]
width0=lsq2[0][5]
Amplitude0=lsq2[0][6]
p0=lsq2[0][7]
noise=lsq2[0][8]
if Amplitude0<0.05: # in case the peak is noise
shift0,width0,Amplitude0,p0 = [0,0,0,0]#4*[numpy.NaN]
if Amplitude1<0.05:
shift1,width1,Amplitude1,p1 = [0,0,0,0]#4*[numpy.NaN]
SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0))/width0)**p0
SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1))/width1)**p1
SPCparam = (SPC_ch1,SPC_ch2)
return GauSPC
def y_model1(self,x,state):
shift0,width0,amplitude0,power0,noise=state
model0=amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
model0u=amplitude0*numpy.exp(-0.5*abs((x-shift0- self.Num_Bin )/width0)**power0)
model0d=amplitude0*numpy.exp(-0.5*abs((x-shift0+ self.Num_Bin )/width0)**power0)
return model0+model0u+model0d+noise
def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,noise=state
model0=amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
model0u=amplitude0*numpy.exp(-0.5*abs((x-shift0- self.Num_Bin )/width0)**power0)
model0d=amplitude0*numpy.exp(-0.5*abs((x-shift0+ self.Num_Bin )/width0)**power0)
model1=amplitude1*numpy.exp(-0.5*abs((x-shift1)/width1)**power1)
model1u=amplitude1*numpy.exp(-0.5*abs((x-shift1- self.Num_Bin )/width1)**power1)
model1d=amplitude1*numpy.exp(-0.5*abs((x-shift1+ self.Num_Bin )/width1)**power1)
return model0+model0u+model0d+model1+model1u+model1d+noise
def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
def misfit2(self,state,y_data,x,num_intg):
return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
class PrecipitationProc(Operation):
'''
Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
Input:
self.dataOut.data_pre : SelfSpectra
Output:
self.dataOut.data_output : Reflectivity factor, rainfall Rate
Parameters affected:
'''
def __init__(self):
Operation.__init__(self)
self.i=0
def gaus(self,xSamples,Amp,Mu,Sigma):
return ( Amp / ((2*numpy.pi)**0.5 * Sigma) ) * numpy.exp( -( xSamples - Mu )**2 / ( 2 * (Sigma**2) ))
def Moments(self, ySamples, xSamples):
Pot = numpy.nansum( ySamples ) # Potencia, momento 0
yNorm = ySamples / Pot
Vr = numpy.nansum( yNorm * xSamples ) # Velocidad radial, mu, corrimiento doppler, primer momento
Sigma2 = abs(numpy.nansum( yNorm * ( xSamples - Vr )**2 )) # Segundo Momento
Desv = Sigma2**0.5 # Desv. Estandar, Ancho espectral
return numpy.array([Pot, Vr, Desv])
def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km = 0.93, Altitude=3350):
Velrange = dataOut.spcparam_range[2]
FrecRange = dataOut.spcparam_range[0]
dV= Velrange[1]-Velrange[0]
dF= FrecRange[1]-FrecRange[0]
if radar == "MIRA35C" :
self.spc = dataOut.data_pre[0].copy()
self.Num_Hei = self.spc.shape[2]
self.Num_Bin = self.spc.shape[1]
self.Num_Chn = self.spc.shape[0]
Ze = self.dBZeMODE2(dataOut)
else:
self.spc = dataOut.SPCparam[1].copy() #dataOut.data_pre[0].copy() #
"""NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX"""
self.spc[:,:,0:7]= numpy.NaN
"""##########################################"""
self.Num_Hei = self.spc.shape[2]
self.Num_Bin = self.spc.shape[1]
self.Num_Chn = self.spc.shape[0]
''' Se obtiene la constante del RADAR '''
self.Pt = Pt
self.Gt = Gt
self.Gr = Gr
self.Lambda = Lambda
self.aL = aL
self.tauW = tauW
self.ThetaT = ThetaT
self.ThetaR = ThetaR
Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
RadarConstant = 10e-26 * Numerator / Denominator #
''' ============================= '''
self.spc[0] = (self.spc[0]-dataOut.noise[0])
self.spc[1] = (self.spc[1]-dataOut.noise[1])
self.spc[2] = (self.spc[2]-dataOut.noise[2])
self.spc[ numpy.where(self.spc < 0)] = 0
SPCmean = (numpy.mean(self.spc,0) - numpy.mean(dataOut.noise))
SPCmean[ numpy.where(SPCmean < 0)] = 0
ETAn = numpy.zeros([self.Num_Bin,self.Num_Hei])
ETAv = numpy.zeros([self.Num_Bin,self.Num_Hei])
ETAd = numpy.zeros([self.Num_Bin,self.Num_Hei])
Pr = SPCmean[:,:]
VelMeteoro = numpy.mean(SPCmean,axis=0)
D_range = numpy.zeros([self.Num_Bin,self.Num_Hei])
SIGMA = numpy.zeros([self.Num_Bin,self.Num_Hei])
N_dist = numpy.zeros([self.Num_Bin,self.Num_Hei])
V_mean = numpy.zeros(self.Num_Hei)
del_V = numpy.zeros(self.Num_Hei)
Z = numpy.zeros(self.Num_Hei)
Ze = numpy.zeros(self.Num_Hei)
RR = numpy.zeros(self.Num_Hei)
Range = dataOut.heightList*1000.
for R in range(self.Num_Hei):
h = Range[R] + Altitude #Range from ground to radar pulse altitude
del_V[R] = 1 + 3.68 * 10**-5 * h + 1.71 * 10**-9 * h**2 #Density change correction for velocity
D_range[:,R] = numpy.log( (9.65 - (Velrange[0:self.Num_Bin] / del_V[R])) / 10.3 ) / -0.6 #Diameter range [m]x10**-3
'''NOTA: ETA(n) dn = ETA(f) df
dn = 1 Diferencial de muestreo
df = ETA(n) / ETA(f)
'''
ETAn[:,R] = RadarConstant * Pr[:,R] * (Range[R] )**2 #Reflectivity (ETA)
ETAv[:,R]=ETAn[:,R]/dV
ETAd[:,R]=ETAv[:,R]*6.18*exp(-0.6*D_range[:,R])
SIGMA[:,R] = Km * (D_range[:,R] * 1e-3 )**6 * numpy.pi**5 / Lambda**4 #Equivalent Section of drops (sigma)
N_dist[:,R] = ETAn[:,R] / SIGMA[:,R]
DMoments = self.Moments(Pr[:,R], Velrange[0:self.Num_Bin])
try:
popt01,pcov = curve_fit(self.gaus, Velrange[0:self.Num_Bin] , Pr[:,R] , p0=DMoments)
except:
popt01=numpy.zeros(3)
popt01[1]= DMoments[1]
if popt01[1]<0 or popt01[1]>20:
popt01[1]=numpy.NaN
V_mean[R]=popt01[1]
Z[R] = numpy.nansum( N_dist[:,R] * (D_range[:,R])**6 )#*10**-18
RR[R] = 0.0006*numpy.pi * numpy.nansum( D_range[:,R]**3 * N_dist[:,R] * Velrange[0:self.Num_Bin] ) #Rainfall rate
Ze[R] = (numpy.nansum( ETAn[:,R]) * Lambda**4) / ( 10**-18*numpy.pi**5 * Km)
RR2 = (Z/200)**(1/1.6)
dBRR = 10*numpy.log10(RR)
dBRR2 = 10*numpy.log10(RR2)
dBZe = 10*numpy.log10(Ze)
dBZ = 10*numpy.log10(Z)
dataOut.data_output = RR[8]
dataOut.data_param = numpy.ones([3,self.Num_Hei])
dataOut.channelList = [0,1,2]
dataOut.data_param[0]=dBZ
dataOut.data_param[1]=V_mean
dataOut.data_param[2]=RR
return dataOut
def dBZeMODE2(self, dataOut): # Processing for MIRA35C
NPW = dataOut.NPW
COFA = dataOut.COFA
SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
RadarConst = dataOut.RadarConst
#frequency = 34.85*10**9
ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
ETA = numpy.sum(SNR,1)
ETA = numpy.where(ETA is not 0. , ETA, numpy.NaN)
Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
for r in range(self.Num_Hei):
Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
#Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
return Ze
# def GetRadarConstant(self):
#
# """
# Constants:
#
# Pt: Transmission Power dB 5kW 5000
# Gt: Transmission Gain dB 24.7 dB 295.1209
# Gr: Reception Gain dB 18.5 dB 70.7945
# Lambda: Wavelenght m 0.6741 m 0.6741
# aL: Attenuation loses dB 4dB 2.5118
# tauW: Width of transmission pulse s 4us 4e-6
# ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
# ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
#
# """
#
# Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
# Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
# RadarConstant = Numerator / Denominator
#
# return RadarConstant
class FullSpectralAnalysis(Operation):
"""
Function that implements Full Spectral Analysis technique.
Input:
self.dataOut.data_pre : SelfSpectra and CrossSpectra data
self.dataOut.groupList : Pairlist of channels
self.dataOut.ChanDist : Physical distance between receivers
Output:
self.dataOut.data_output : Zonal wind, Meridional wind and Vertical wind
Parameters affected: Winds, height range, SNR
"""
def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRlimit=7, minheight=None, maxheight=None):
self.indice=int(numpy.random.rand()*1000)
spc = dataOut.data_pre[0].copy()
cspc = dataOut.data_pre[1]
"""Erick: NOTE THE RANGE OF THE PULSE TX MUST BE REMOVED"""
SNRspc = spc.copy()
SNRspc[:,:,0:7]= numpy.NaN
"""##########################################"""
nChannel = spc.shape[0]
nProfiles = spc.shape[1]
nHeights = spc.shape[2]
# first_height = 0.75 #km (ref: data header 20170822)
# resolution_height = 0.075 #km
'''
finding height range. check this when radar parameters are changed!
'''
if maxheight is not None:
# range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
else:
range_max = nHeights
if minheight is not None:
# range_min = int((minheight - first_height) / resolution_height) # theoretical
range_min = int(13.26 * minheight - 5) # empirical, works better
if range_min < 0:
range_min = 0
else:
range_min = 0
pairsList = dataOut.groupList
if dataOut.ChanDist is not None :
ChanDist = dataOut.ChanDist
else:
ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
FrecRange = dataOut.spc_range[0]
data_SNR=numpy.zeros([nProfiles])
noise = dataOut.noise
dataOut.data_snr = (numpy.mean(SNRspc,axis=1)- noise[0]) / noise[0]
dataOut.data_snr[numpy.where( dataOut.data_snr <0 )] = 1e-20
data_output=numpy.ones([spc.shape[0],spc.shape[2]])*numpy.NaN
velocityX=[]
velocityY=[]
velocityV=[]
dbSNR = 10*numpy.log10(dataOut.data_snr)
dbSNR = numpy.average(dbSNR,0)
'''***********************************************WIND ESTIMATION**************************************'''
for Height in range(nHeights):
if Height >= range_min and Height < range_max:
# error_code unused, yet maybe useful for future analysis.
[Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList, ChanDist, Height, noise, dataOut.spc_range, dbSNR[Height], SNRlimit)
else:
Vzon,Vmer,Vver = 0., 0., numpy.NaN
if abs(Vzon) < 100. and abs(Vzon) > 0. and abs(Vmer) < 100. and abs(Vmer) > 0.:
velocityX=numpy.append(velocityX, Vzon)
velocityY=numpy.append(velocityY, -Vmer)
else:
velocityX=numpy.append(velocityX, numpy.NaN)
velocityY=numpy.append(velocityY, numpy.NaN)
if dbSNR[Height] > SNRlimit:
velocityV=numpy.append(velocityV, -Vver) # reason for this minus sign -> convention? (taken from Ericks version)
else:
velocityV=numpy.append(velocityV, numpy.NaN)
'''Change the numpy.array (velocityX) sign when trying to process BLTR data (Erick)'''
data_output[0] = numpy.array(velocityX)
data_output[1] = numpy.array(velocityY)
data_output[2] = velocityV
dataOut.data_output = data_output
return dataOut
def moving_average(self,x, N=2):
""" convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
def gaus(self,xSamples,Amp,Mu,Sigma):
return ( Amp / ((2*numpy.pi)**0.5 * Sigma) ) * numpy.exp( -( xSamples - Mu )**2 / ( 2 * (Sigma**2) ))
def Moments(self, ySamples, xSamples):
'''***
Variables corresponding to moments of distribution.
Also used as initial coefficients for curve_fit.
Vr was corrected. Only a velocity when x is velocity, of course.
***'''
Pot = numpy.nansum( ySamples ) # Potencia, momento 0
yNorm = ySamples / Pot
x_range = (numpy.max(xSamples)-numpy.min(xSamples))
Vr = numpy.nansum( yNorm * xSamples )*x_range/len(xSamples) # Velocidad radial, mu, corrimiento doppler, primer momento
Sigma2 = abs(numpy.nansum( yNorm * ( xSamples - Vr )**2 )) # Segundo Momento
Desv = Sigma2**0.5 # Desv. Estandar, Ancho espectral
return numpy.array([Pot, Vr, Desv])
def StopWindEstimation(self, error_code):
'''
the wind calculation and returns zeros
'''
Vzon = 0
Vmer = 0
Vver = numpy.nan
return Vzon, Vmer, Vver, error_code
def AntiAliasing(self, interval, maxstep):
"""
function to prevent errors from aliased values when computing phaseslope
"""
antialiased = numpy.zeros(len(interval))*0.0
copyinterval = interval.copy()
antialiased[0] = copyinterval[0]
for i in range(1,len(antialiased)):
step = interval[i] - interval[i-1]
if step > maxstep:
copyinterval -= 2*numpy.pi
antialiased[i] = copyinterval[i]
elif step < maxstep*(-1):
copyinterval += 2*numpy.pi
antialiased[i] = copyinterval[i]
else:
antialiased[i] = copyinterval[i].copy()
return antialiased
def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit):
"""
Function that Calculates Zonal, Meridional and Vertical wind velocities.
Initial Version by <NAME>gra updated by <NAME> until Nov. 2019.
Input:
spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
pairsList : Pairlist of channels
ChanDist : array of xi_ij and eta_ij
Height : height at which data is processed
noise : noise in [channels] format for specific height
Abbsisarange : range of the frequencies or velocities
dbSNR, SNRlimit : signal to noise ratio in db, lower limit
Output:
Vzon, Vmer, Vver : wind velocities
error_code : int that states where code is terminated
0 : no error detected
1 : Gaussian of mean spc exceeds widthlimit
2 : no Gaussian of mean spc found
3 : SNR to low or velocity to high -> prec. e.g.
4 : at least one Gaussian of cspc exceeds widthlimit
5 : zero out of three cspc Gaussian fits converged
6 : phase slope fit could not be found
7 : arrays used to fit phase have different length
8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
"""
error_code = 0
SPC_Samples = numpy.ones([spc.shape[0],spc.shape[1]]) # for normalized spc values for one height
phase = numpy.ones([spc.shape[0],spc.shape[1]]) # phase between channels
CSPC_Samples = numpy.ones([spc.shape[0],spc.shape[1]],dtype=numpy.complex_) # for normalized cspc values
PhaseSlope = numpy.zeros(spc.shape[0]) # slope of the phases, channelwise
PhaseInter = numpy.ones(spc.shape[0]) # intercept to the slope of the phases, channelwise
xFrec = AbbsisaRange[0][0:spc.shape[1]] # frequency range
xVel = AbbsisaRange[2][0:spc.shape[1]] # velocity range
SPCav = numpy.average(spc, axis=0)-numpy.average(noise) # spc[0]-noise[0]
SPCmoments_vel = self.Moments(SPCav, xVel ) # SPCmoments_vel[1] corresponds to vertical velocity and is used to determine if signal corresponds to wind (if .. <3)
CSPCmoments = []
'''Getting Eij and Nij'''
Xi01, Xi02, Xi12 = ChanDist[:,0]
Eta01, Eta02, Eta12 = ChanDist[:,1]
# update nov 19
widthlimit = 7 # maximum width in Hz of the gaussian, empirically determined. Anything above 10 is unrealistic, often values between 1 and 5 correspond to proper fits.
'''************************* SPC is normalized ********************************'''
spc_norm = spc.copy() # need copy() because untouched spc is needed for normalization of cspc below
spc_norm = numpy.where(numpy.isfinite(spc_norm), spc_norm, numpy.NAN)
for i in range(spc.shape[0]):
spc_sub = spc_norm[i,:] - noise[i] # spc not smoothed here or in previous version.
Factor_Norm = 2*numpy.max(xFrec) / numpy.count_nonzero(~numpy.isnan(spc_sub)) # usually = Freq range / nfft
normalized_spc = spc_sub / (numpy.nansum(numpy.abs(spc_sub)) * Factor_Norm)
xSamples = xFrec # the frequency range is taken
SPC_Samples[i] = normalized_spc # Normalized SPC values are taken
'''********************** FITTING MEAN SPC GAUSSIAN **********************'''
""" the gaussian of the mean: first subtract noise, then normalize. this is legal because
you only fit the curve and don't need the absolute value of height for calculation,
only for estimation of width. for normalization of cross spectra, you need initial,
unnormalized self-spectra With noise.
Technically, you don't even need to normalize the self-spectra, as you only need the
width of the peak. However, it was left this way. Note that the normalization has a flaw:
due to subtraction of the noise, some values are below zero. Raw "spc" values should be
>= 0, as it is the modulus squared of the signals (complex * it's conjugate)
"""
SPCMean = numpy.average(SPC_Samples, axis=0)
popt = [1e-10,0,1e-10]
SPCMoments = self.Moments(SPCMean, xSamples)
if dbSNR > SNRlimit and numpy.abs(SPCmoments_vel[1]) < 3:
try:
popt,pcov = curve_fit(self.gaus,xSamples,SPCMean,p0=SPCMoments)#, bounds=(-numpy.inf, [numpy.inf, numpy.inf, 10])). Setting bounds does not make the code faster but only keeps the fit from finding the minimum.
if popt[2] > widthlimit: # CONDITION
return self.StopWindEstimation(error_code = 1)
FitGauss = self.gaus(xSamples,*popt)
except :#RuntimeError:
return self.StopWindEstimation(error_code = 2)
else:
return self.StopWindEstimation(error_code = 3)
'''***************************** CSPC Normalization *************************
new section:
The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
influence the norm which is not desired. First, a range is identified where the
wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
around it gets cut off and values replaced by mean determined by the boundary
data -> sum_noise (spc is not normalized here, thats why the noise is important)
The sums are then added and multiplied by range/datapoints, because you need
an integral and not a sum for normalization.
A norm is found according to Briggs 92.
'''
radarWavelength = 0.6741 # meters
count_limit_freq = numpy.abs(popt[1]) + widthlimit # Hz, m/s can be also used if velocity is desired abscissa.
# count_limit_freq = numpy.max(xFrec)
channel_integrals = numpy.zeros(3)
for i in range(spc.shape[0]):
'''
find the point in array corresponding to count_limit frequency.
sum over all frequencies in the range around zero Hz @ math.ceil(N_freq/2)
'''
N_freq = numpy.count_nonzero(~numpy.isnan(spc[i,:]))
count_limit_int = int(math.ceil( count_limit_freq / numpy.max(xFrec) * (N_freq / 2) )) # gives integer point
sum_wind = numpy.nansum( spc[i, (math.ceil(N_freq/2) - count_limit_int) : (math.ceil(N_freq / 2) + count_limit_int)] ) #N_freq/2 is where frequency (velocity) is zero, i.e. middle of spectrum.
sum_noise = (numpy.mean(spc[i, :4]) + numpy.mean(spc[i, -6:-2]))/2.0 * (N_freq - 2*count_limit_int)
channel_integrals[i] = (sum_noise + sum_wind) * (2*numpy.max(xFrec) / N_freq)
cross_integrals_peak = numpy.zeros(3)
# cross_integrals_totalrange = numpy.zeros(3)
for i in range(spc.shape[0]):
cspc_norm = cspc[i,:].copy() # cspc not smoothed here or in previous version
chan_index0 = pairsList[i][0]
chan_index1 = pairsList[i][1]
cross_integrals_peak[i] = channel_integrals[chan_index0]*channel_integrals[chan_index1]
normalized_cspc = cspc_norm / numpy.sqrt(cross_integrals_peak[i])
CSPC_Samples[i] = normalized_cspc
''' Finding cross integrals without subtracting any peaks:'''
# FactorNorm0 = 2*numpy.max(xFrec) / numpy.count_nonzero(~numpy.isnan(spc[chan_index0,:]))
# FactorNorm1 = 2*numpy.max(xFrec) / numpy.count_nonzero(~numpy.isnan(spc[chan_index1,:]))
# cross_integrals_totalrange[i] = (numpy.nansum(spc[chan_index0,:])) * FactorNorm0 * (numpy.nansum(spc[chan_index1,:])) * FactorNorm1
# normalized_cspc = cspc_norm / numpy.sqrt(cross_integrals_totalrange[i])
# CSPC_Samples[i] = normalized_cspc
phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0]), xSamples),
self.Moments(numpy.abs(CSPC_Samples[1]), xSamples),
self.Moments(numpy.abs(CSPC_Samples[2]), xSamples)])
'''***Sorting out NaN entries***'''
CSPCMask01 = numpy.abs(CSPC_Samples[0])
CSPCMask02 = numpy.abs(CSPC_Samples[1])
CSPCMask12 = numpy.abs(CSPC_Samples[2])
mask01 = ~numpy.isnan(CSPCMask01)
mask02 = ~numpy.isnan(CSPCMask02)
mask12 = ~numpy.isnan(CSPCMask12)
CSPCMask01 = CSPCMask01[mask01]
CSPCMask02 = CSPCMask02[mask02]
CSPCMask12 = CSPCMask12[mask12]
popt01, popt02, popt12 = [1e-10,1e-10,1e-10], [1e-10,1e-10,1e-10] ,[1e-10,1e-10,1e-10]
FitGauss01, FitGauss02, FitGauss12 = numpy.empty(len(xSamples))*0, numpy.empty(len(xSamples))*0, numpy.empty(len(xSamples))*0
'''*******************************FIT GAUSS CSPC************************************'''
try:
popt01,pcov = curve_fit(self.gaus,xSamples[mask01],numpy.abs(CSPCMask01),p0=CSPCmoments[0])
if popt01[2] > widthlimit: # CONDITION
return self.StopWindEstimation(error_code = 4)
popt02,pcov = curve_fit(self.gaus,xSamples[mask02],numpy.abs(CSPCMask02),p0=CSPCmoments[1])
if popt02[2] > widthlimit: # CONDITION
return self.StopWindEstimation(error_code = 4)
popt12,pcov = curve_fit(self.gaus,xSamples[mask12],numpy.abs(CSPCMask12),p0=CSPCmoments[2])
if popt12[2] > widthlimit: # CONDITION
return self.StopWindEstimation(error_code = 4)
FitGauss01 = self.gaus(xSamples, *popt01)
FitGauss02 = self.gaus(xSamples, *popt02)
FitGauss12 = self.gaus(xSamples, *popt12)
except:
return self.StopWindEstimation(error_code = 5)
'''************* Getting Fij ***************'''
#Punto en Eje X de la Gaussiana donde se encuentra el centro -- x-axis point of the gaussian where the center is located
# -> PointGauCenter
GaussCenter = popt[1]
ClosestCenter = xSamples[numpy.abs(xSamples-GaussCenter).argmin()]
PointGauCenter = numpy.where(xSamples==ClosestCenter)[0][0]
#Punto e^-1 hubicado en la Gaussiana -- point where e^-1 is located in the gaussian
PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # El punto mas cercano a "Peminus1" dentro de "FitGauss"
PointFij = numpy.where(FitGauss==FijClosest)[0][0]
Fij = numpy.abs(xSamples[PointFij] - xSamples[PointGauCenter])
'''********** Taking frequency ranges from mean SPCs **********'''
#GaussCenter = popt[1] #Primer momento 01
GauWidth = popt[2] * 3/2 #Ancho de banda de Gau01 -- Bandwidth of Gau01 TODO why *3/2?
Range = numpy.empty(2)
Range[0] = GaussCenter - GauWidth
Range[1] = GaussCenter + GauWidth
#Punto en Eje X de la Gaussiana donde se encuentra ancho de banda (min:max) -- Point in x-axis where the bandwidth is located (min:max)
ClosRangeMin = xSamples[numpy.abs(xSamples-Range[0]).argmin()]
ClosRangeMax = xSamples[numpy.abs(xSamples-Range[1]).argmin()]
PointRangeMin = numpy.where(xSamples==ClosRangeMin)[0][0]
PointRangeMax = numpy.where(xSamples==ClosRangeMax)[0][0]
Range = numpy.array([ PointRangeMin, PointRangeMax ])
FrecRange = xFrec[ Range[0] : Range[1] ]
'''************************** Getting Phase Slope ***************************'''
for i in range(1,3): # Changed to only compute two
if len(FrecRange) > 5 and len(FrecRange) < spc.shape[1] * 0.3:
# PhaseRange=self.moving_average(phase[i,Range[0]:Range[1]],N=1) #used before to smooth phase with N=3
PhaseRange = phase[i,Range[0]:Range[1]].copy()
mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
if len(FrecRange) == len(PhaseRange):
try:
slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
PhaseSlope[i] = slope
PhaseInter[i] = intercept
except:
return self.StopWindEstimation(error_code = 6)
else:
return self.StopWindEstimation(error_code = 7)
else:
return self.StopWindEstimation(error_code = 8)
'''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
'''Getting constant C'''
cC=(Fij*numpy.pi)**2
'''****** Getting constants F and G ******'''
MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
MijResult0 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
MijResult1 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
MijResults = numpy.array([MijResult0,MijResult1])
(cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
'''****** Getting constants A, B and H ******'''
W01 = numpy.nanmax( FitGauss01 )
W02 = numpy.nanmax( FitGauss02 )
W12 = numpy.nanmax( FitGauss12 )
WijResult0 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
WijResult1 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
WijResult2 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
WijResults = numpy.array([WijResult0, WijResult1, WijResult2])
WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
(cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
VxVy = numpy.array([[cA,cH],[cH,cB]])
VxVyResults = numpy.array([-cF,-cG])
(Vx,Vy) = numpy.linalg.solve(VxVy, VxVyResults)
Vzon = Vy
Vmer = Vx
# Vmag=numpy.sqrt(Vzon**2+Vmer**2) # unused
# Vang=numpy.arctan2(Vmer,Vzon) # unused
''' using frequency as abscissa. Due to three channels, the offzenith angle is zero
and Vrad equal to Vver. formula taken from Briggs 92, figure 4.
'''
if numpy.abs( popt[1] ) < 3.5 and len(FrecRange) > 4:
Vver = 0.5 * radarWavelength * popt[1] * 100 # *100 to get cm (/s)
else:
Vver = numpy.NaN
error_code = 0
return Vzon, Vmer, Vver, error_code
class SpectralMoments(Operation):
'''
Function SpectralMoments()
Calculates moments (power, mean, standard deviation) and SNR of the signal
Type of dataIn: Spectra
Configuration Parameters:
dirCosx : Cosine director in X axis
dirCosy : Cosine director in Y axis
elevation :
azimuth :
Input:
channelList : simple channel list to select e.g. [2,3,7]
self.dataOut.data_pre : Spectral data
self.dataOut.abscissaList : List of frequencies
self.dataOut.noise : Noise level per channel
Affected:
self.dataOut.moments : Parameters per channel
self.dataOut.data_snr : SNR per channel
'''
def run(self, dataOut):
data = dataOut.data_pre[0]
absc = dataOut.abscissaList[:-1]
noise = dataOut.noise
nChannel = data.shape[0]
data_param = numpy.zeros((nChannel, 4, data.shape[2]))
for ind in range(nChannel):
data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind] )
dataOut.moments = data_param[:,1:,:]
dataOut.data_snr = data_param[:,0]
dataOut.data_pow = data_param[:,1]
dataOut.data_dop = data_param[:,2]
dataOut.data_width = data_param[:,3]
return dataOut
def __calculateMoments(self, oldspec, oldfreq, n0,
nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
if (nicoh is None): nicoh = 1
if (graph is None): graph = 0
if (smooth is None): smooth = 0
elif (self.smooth < 3): smooth = 0
if (type1 is None): type1 = 0
if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
if (snrth is None): snrth = -3
if (dc is None): dc = 0
if (aliasing is None): aliasing = 0
if (oldfd is None): oldfd = 0
if (wwauto is None): wwauto = 0
if (n0 < 1.e-20): n0 = 1.e-20
freq = oldfreq
vec_power = numpy.zeros(oldspec.shape[1])
vec_fd = numpy.zeros(oldspec.shape[1])
vec_w = numpy.zeros(oldspec.shape[1])
vec_snr = numpy.zeros(oldspec.shape[1])
# oldspec = numpy.ma.masked_invalid(oldspec)
for ind in range(oldspec.shape[1]):
spec = oldspec[:,ind]
aux = spec*fwindow
max_spec = aux.max()
m = aux.tolist().index(max_spec)
#Smooth
if (smooth == 0):
spec2 = spec
else:
spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
# Calculo de Momentos
bb = spec2[numpy.arange(m,spec2.size)]
bb = (bb<n0).nonzero()
bb = bb[0]
ss = spec2[numpy.arange(0,m + 1)]
ss = (ss<n0).nonzero()
ss = ss[0]
if (bb.size == 0):
bb0 = spec.size - 1 - m
else:
bb0 = bb[0] - 1
if (bb0 < 0):
bb0 = 0
if (ss.size == 0):
ss1 = 1
else:
ss1 = max(ss) + 1
if (ss1 > m):
ss1 = m
valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
power = ((spec2[valid] - n0) * fwindow[valid]).sum()
fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
snr = (spec2.mean()-n0)/n0
if (snr < 1.e-20) :
snr = 1.e-20
vec_power[ind] = power
vec_fd[ind] = fd
vec_w[ind] = w
vec_snr[ind] = snr
return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
#------------------ Get SA Parameters --------------------------
def GetSAParameters(self):
#SA en frecuencia
pairslist = self.dataOut.groupList
num_pairs = len(pairslist)
vel = self.dataOut.abscissaList
spectra = self.dataOut.data_pre
cspectra = self.dataIn.data_cspc
delta_v = vel[1] - vel[0]
#Calculating the power spectrum
spc_pow = numpy.sum(spectra, 3)*delta_v
#Normalizing Spectra
norm_spectra = spectra/spc_pow
#Calculating the norm_spectra at peak
max_spectra = numpy.max(norm_spectra, 3)
#Normalizing Cross Spectra
norm_cspectra = numpy.zeros(cspectra.shape)
for i in range(num_chan):
norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
max_cspectra = numpy.max(norm_cspectra,2)
max_cspectra_index = numpy.argmax(norm_cspectra, 2)
for i in range(num_pairs):
cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
#------------------- Get Lags ----------------------------------
class SALags(Operation):
'''
Function GetMoments()
Input:
self.dataOut.data_pre
self.dataOut.abscissaList
self.dataOut.noise
self.dataOut.normFactor
self.dataOut.data_snr
self.dataOut.groupList
self.dataOut.nChannels
Affected:
self.dataOut.data_param
'''
def run(self, dataOut):
data_acf = dataOut.data_pre[0]
data_ccf = dataOut.data_pre[1]
normFactor_acf = dataOut.normFactor[0]
normFactor_ccf = dataOut.normFactor[1]
pairs_acf = dataOut.groupList[0]
pairs_ccf = dataOut.groupList[1]
nHeights = dataOut.nHeights
absc = dataOut.abscissaList
noise = dataOut.noise
SNR = dataOut.data_snr
nChannels = dataOut.nChannels
# pairsList = dataOut.groupList
# pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
for l in range(len(pairs_acf)):
data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
for l in range(len(pairs_ccf)):
data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
return
# def __getPairsAutoCorr(self, pairsList, nChannels):
#
# pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
#
# for l in range(len(pairsList)):
# firstChannel = pairsList[l][0]
# secondChannel = pairsList[l][1]
#
# #Obteniendo pares de Autocorrelacion
# if firstChannel == secondChannel:
# pairsAutoCorr[firstChannel] = int(l)
#
# pairsAutoCorr = pairsAutoCorr.astype(int)
#
# pairsCrossCorr = range(len(pairsList))
# pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
#
# return pairsAutoCorr, pairsCrossCorr
def __calculateTaus(self, data_acf, data_ccf, lagRange):
lag0 = data_acf.shape[1]/2
#Funcion de Autocorrelacion
mean_acf = stats.nanmean(data_acf, axis = 0)
#Obtencion Indice de TauCross
ind_ccf = data_ccf.argmax(axis = 1)
#Obtencion Indice de TauAuto
ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
ccf_lag0 = data_ccf[:,lag0,:]
for i in range(ccf_lag0.shape[0]):
ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
#Obtencion de TauCross y TauAuto
tau_ccf = lagRange[ind_ccf]
tau_acf = lagRange[ind_acf]
Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
tau_ccf[Nan1,Nan2] = numpy.nan
tau_acf[Nan1,Nan2] = numpy.nan
tau = numpy.vstack((tau_ccf,tau_acf))
return tau
def __calculateLag1Phase(self, data, lagTRange):
data1 = stats.nanmean(data, axis = 0)
lag1 = numpy.where(lagTRange == 0)[0][0] + 1
phase = numpy.angle(data1[lag1,:])
return phase
class SpectralFitting(Operation):
'''
Function GetMoments()
Input:
Output:
Variables modified:
'''
def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
if path != None:
sys.path.append(path)
self.dataOut.library = importlib.import_module(file)
#To be inserted as a parameter
groupArray = numpy.array(groupList)
# groupArray = numpy.array([[0,1],[2,3]])
self.dataOut.groupList = groupArray
nGroups = groupArray.shape[0]
nChannels = self.dataIn.nChannels
nHeights=self.dataIn.heightList.size
#Parameters Array
self.dataOut.data_param = None
#Set constants
constants = self.dataOut.library.setConstants(self.dataIn)
self.dataOut.constants = constants
M = self.dataIn.normFactor
N = self.dataIn.nFFTPoints
ippSeconds = self.dataIn.ippSeconds
K = self.dataIn.nIncohInt
pairsArray = numpy.array(self.dataIn.pairsList)
#List of possible combinations
listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
if getSNR:
listChannels = groupArray.reshape((groupArray.size))
listChannels.sort()
noise = self.dataIn.getNoise()
self.dataOut.data_snr = self.__getSNR(self.dataIn.data_spc[listChannels,:,:], noise[listChannels])
for i in range(nGroups):
coord = groupArray[i,:]
#Input data array
data = self.dataIn.data_spc[coord,:,:]/(M*N)
data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
#Cross Spectra data array for Covariance Matrixes
ind = 0
for pairs in listComb:
pairsSel = numpy.array([coord[x],coord[y]])
indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
ind += 1
dataCross = self.dataIn.data_cspc[indCross,:,:]/(M*N)
dataCross = dataCross**2/K
for h in range(nHeights):
#Input
d = data[:,h]
#Covariance Matrix
D = numpy.diag(d**2/K)
ind = 0
for pairs in listComb:
#Coordinates in Covariance Matrix
x = pairs[0]
y = pairs[1]
#Channel Index
S12 = dataCross[ind,:,h]
D12 = numpy.diag(S12)
#Completing Covariance Matrix with Cross Spectras
D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
ind += 1
Dinv=numpy.linalg.inv(D)
L=numpy.linalg.cholesky(Dinv)
LT=L.T
dp = numpy.dot(LT,d)
#Initial values
data_spc = self.dataIn.data_spc[coord,:,h]
if (h>0)and(error1[3]<5):
p0 = self.dataOut.data_param[i,:,h-1]
else:
p0 = numpy.array(self.dataOut.library.initialValuesFunction(data_spc, constants, i))
try:
#Least Squares
minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
# minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
#Chi square error
error0 = numpy.sum(infodict['fvec']**2)/(2*N)
#Error with Jacobian
error1 = self.dataOut.library.errorFunction(minp,constants,LT)
except:
minp = p0*numpy.nan
error0 = numpy.nan
error1 = p0*numpy.nan
#Save
if self.dataOut.data_param is None:
self.dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
self.dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
self.dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
self.dataOut.data_param[i,:,h] = minp
return
def __residFunction(self, p, dp, LT, constants):
fm = self.dataOut.library.modelFunction(p, constants)
fmp=numpy.dot(LT,fm)
return dp-fmp
def __getSNR(self, z, noise):
avg = numpy.average(z, axis=1)
SNR = (avg.T-noise)/noise
SNR = SNR.T
return SNR
def __chisq(p,chindex,hindex):
#similar to Resid but calculates CHI**2
[LT,d,fm]=setupLTdfm(p,chindex,hindex)
dp=numpy.dot(LT,d)
fmp=numpy.dot(LT,fm)
chisq=numpy.dot((dp-fmp).T,(dp-fmp))
return chisq
class WindProfiler(Operation):
__isConfig = False
__initime = None
__lastdatatime = None
__integrationtime = None
__buffer = None
__dataReady = False
__firstdata = None
n = None
def __init__(self):
Operation.__init__(self)
def __calculateCosDir(self, elev, azim):
zen = (90 - elev)*numpy.pi/180
azim = azim*numpy.pi/180
cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
signX = numpy.sign(numpy.cos(azim))
signY = numpy.sign(numpy.sin(azim))
cosDirX = numpy.copysign(cosDirX, signX)
cosDirY = numpy.copysign(cosDirY, signY)
return cosDirX, cosDirY
def __calculateAngles(self, theta_x, theta_y, azimuth):
dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
zenith_arr = numpy.arccos(dir_cosw)
azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
#
if horOnly:
A = numpy.c_[dir_cosu,dir_cosv]
else:
A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
A = numpy.asmatrix(A)
A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
return A1
def __correctValues(self, heiRang, phi, velRadial, SNR):
listPhi = phi.tolist()
maxid = listPhi.index(max(listPhi))
minid = listPhi.index(min(listPhi))
rango = list(range(len(phi)))
# rango = numpy.delete(rango,maxid)
heiRang1 = heiRang*math.cos(phi[maxid])
heiRangAux = heiRang*math.cos(phi[minid])
indOut = (heiRang1 < heiRangAux[0]).nonzero()
heiRang1 = numpy.delete(heiRang1,indOut)
velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
SNR1 = numpy.zeros([len(phi),len(heiRang1)])
for i in rango:
x = heiRang*math.cos(phi[i])
y1 = velRadial[i,:]
f1 = interpolate.interp1d(x,y1,kind = 'cubic')
x1 = heiRang1
y11 = f1(x1)
y2 = SNR[i,:]
f2 = interpolate.interp1d(x,y2,kind = 'cubic')
y21 = f2(x1)
velRadial1[i,:] = y11
SNR1[i,:] = y21
return heiRang1, velRadial1, SNR1
def __calculateVelUVW(self, A, velRadial):
#Operacion Matricial
# velUVW = numpy.zeros((velRadial.shape[1],3))
# for ind in range(velRadial.shape[1]):
# velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
# velUVW = velUVW.transpose()
velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
velUVW[:,:] = numpy.dot(A,velRadial)
return velUVW
# def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
def techniqueDBS(self, kwargs):
"""
Function that implements Doppler Beam Swinging (DBS) technique.
Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
Direction correction (if necessary), Ranges and SNR
Output: Winds estimation (Zonal, Meridional and Vertical)
Parameters affected: Winds, height range, SNR
"""
velRadial0 = kwargs['velRadial']
heiRang = kwargs['heightList']
SNR0 = kwargs['SNR']
if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
theta_x = numpy.array(kwargs['dirCosx'])
theta_y = numpy.array(kwargs['dirCosy'])
else:
elev = numpy.array(kwargs['elevation'])
azim = numpy.array(kwargs['azimuth'])
theta_x, theta_y = self.__calculateCosDir(elev, azim)
azimuth = kwargs['correctAzimuth']
if 'horizontalOnly' in kwargs:
horizontalOnly = kwargs['horizontalOnly']
else: horizontalOnly = False
if 'correctFactor' in kwargs:
correctFactor = kwargs['correctFactor']
else: correctFactor = 1
if 'channelList' in kwargs:
channelList = kwargs['channelList']
if len(channelList) == 2:
horizontalOnly = True
arrayChannel = numpy.array(channelList)
param = param[arrayChannel,:,:]
theta_x = theta_x[arrayChannel]
theta_y = theta_y[arrayChannel]
azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
#Calculo de Componentes de la velocidad con DBS
winds = self.__calculateVelUVW(A,velRadial1)
return winds, heiRang1, SNR1
def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
nPairs = len(pairs_ccf)
posx = numpy.asarray(posx)
posy = numpy.asarray(posy)
#Rotacion Inversa para alinear con el azimuth
if azimuth!= None:
azimuth = azimuth*math.pi/180
posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
else:
posx1 = posx
posy1 = posy
#Calculo de Distancias
distx = numpy.zeros(nPairs)
disty = numpy.zeros(nPairs)
dist = numpy.zeros(nPairs)
ang = numpy.zeros(nPairs)
for i in range(nPairs):
distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
ang[i] = numpy.arctan2(disty[i],distx[i])
return distx, disty, dist, ang
#Calculo de Matrices
# nPairs = len(pairs)
# ang1 = numpy.zeros((nPairs, 2, 1))
# dist1 = numpy.zeros((nPairs, 2, 1))
#
# for j in range(nPairs):
# dist1[j,0,0] = dist[pairs[j][0]]
# dist1[j,1,0] = dist[pairs[j][1]]
# ang1[j,0,0] = ang[pairs[j][0]]
# ang1[j,1,0] = ang[pairs[j][1]]
#
# return distx,disty, dist1,ang1
def __calculateVelVer(self, phase, lagTRange, _lambda):
Ts = lagTRange[1] - lagTRange[0]
velW = -_lambda*phase/(4*math.pi*Ts)
return velW
def __calculateVelHorDir(self, dist, tau1, tau2, ang):
nPairs = tau1.shape[0]
nHeights = tau1.shape[1]
vel = numpy.zeros((nPairs,3,nHeights))
dist1 = numpy.reshape(dist, (dist.size,1))
angCos = numpy.cos(ang)
angSin = numpy.sin(ang)
vel0 = dist1*tau1/(2*tau2**2)
vel[:,0,:] = (vel0*angCos).sum(axis = 1)
vel[:,1,:] = (vel0*angSin).sum(axis = 1)
ind = numpy.where(numpy.isinf(vel))
vel[ind] = numpy.nan
return vel
# def __getPairsAutoCorr(self, pairsList, nChannels):
#
# pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
#
# for l in range(len(pairsList)):
# firstChannel = pairsList[l][0]
# secondChannel = pairsList[l][1]
#
# #Obteniendo pares de Autocorrelacion
# if firstChannel == secondChannel:
# pairsAutoCorr[firstChannel] = int(l)
#
# pairsAutoCorr = pairsAutoCorr.astype(int)
#
# pairsCrossCorr = range(len(pairsList))
# pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
#
# return pairsAutoCorr, pairsCrossCorr
# def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
def techniqueSA(self, kwargs):
"""
Function that implements Spaced Antenna (SA) technique.
Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
Direction correction (if necessary), Ranges and SNR
Output: Winds estimation (Zonal, Meridional and Vertical)
Parameters affected: Winds
"""
position_x = kwargs['positionX']
position_y = kwargs['positionY']
azimuth = kwargs['azimuth']
if 'correctFactor' in kwargs:
correctFactor = kwargs['correctFactor']
else:
correctFactor = 1
groupList = kwargs['groupList']
pairs_ccf = groupList[1]
tau = kwargs['tau']
_lambda = kwargs['_lambda']
#Cross Correlation pairs obtained
# pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
# pairsArray = numpy.array(pairsList)[pairsCrossCorr]
# pairsSelArray = numpy.array(pairsSelected)
# pairs = []
#
# #Wind estimation pairs obtained
# for i in range(pairsSelArray.shape[0]/2):
# ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
# ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
# pairs.append((ind1,ind2))
indtau = tau.shape[0]/2
tau1 = tau[:indtau,:]
tau2 = tau[indtau:-1,:]
# tau1 = tau1[pairs,:]
# tau2 = tau2[pairs,:]
phase1 = tau[-1,:]
#---------------------------------------------------------------------
#Metodo Directo
distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
winds = stats.nanmean(winds, axis=0)
#---------------------------------------------------------------------
#Metodo General
# distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
# #Calculo Coeficientes de Funcion de Correlacion
# F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
# #Calculo de Velocidades
# winds = self.calculateVelUV(F,G,A,B,H)
#---------------------------------------------------------------------
winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
winds = correctFactor*winds
return winds
def __checkTime(self, currentTime, paramInterval, outputInterval):
dataTime = currentTime + paramInterval
deltaTime = dataTime - self.__initime
if deltaTime >= outputInterval or deltaTime < 0:
self.__dataReady = True
return
def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
'''
Function that implements winds estimation technique with detected meteors.
Input: Detected meteors, Minimum meteor quantity to wind estimation
Output: Winds estimation (Zonal and Meridional)
Parameters affected: Winds
'''
#Settings
nInt = (heightMax - heightMin)/2
nInt = int(nInt)
winds = numpy.zeros((2,nInt))*numpy.nan
#Filter errors
error = numpy.where(arrayMeteor[:,-1] == 0)[0]
finalMeteor = arrayMeteor[error,:]
#Meteor Histogram
finalHeights = finalMeteor[:,2]
hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
nMeteorsPerI = hist[0]
heightPerI = hist[1]
#Sort of meteors
indSort = finalHeights.argsort()
finalMeteor2 = finalMeteor[indSort,:]
# Calculating winds
ind1 = 0
ind2 = 0
for i in range(nInt):
nMet = nMeteorsPerI[i]
ind1 = ind2
ind2 = ind1 + nMet
meteorAux = finalMeteor2[ind1:ind2,:]
if meteorAux.shape[0] >= meteorThresh:
vel = meteorAux[:, 6]
zen = meteorAux[:, 4]*numpy.pi/180
azim = meteorAux[:, 3]*numpy.pi/180
n = numpy.cos(zen)
# m = (1 - n**2)/(1 - numpy.tan(azim)**2)
# l = m*numpy.tan(azim)
l = numpy.sin(zen)*numpy.sin(azim)
m = numpy.sin(zen)*numpy.cos(azim)
A = numpy.vstack((l, m)).transpose()
A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
windsAux = numpy.dot(A1, vel)
winds[0,i] = windsAux[0]
winds[1,i] = windsAux[1]
return winds, heightPerI[:-1]
def techniqueNSM_SA(self, **kwargs):
metArray = kwargs['metArray']
heightList = kwargs['heightList']
timeList = kwargs['timeList']
rx_location = kwargs['rx_location']
groupList = kwargs['groupList']
azimuth = kwargs['azimuth']
dfactor = kwargs['dfactor']
k = kwargs['k']
azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
d = dist*dfactor
#Phase calculation
metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
velEst = numpy.zeros((heightList.size,2))*numpy.nan
azimuth1 = azimuth1*numpy.pi/180
for i in range(heightList.size):
h = heightList[i]
indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
metHeight = metArray1[indH,:]
if metHeight.shape[0] >= 2:
velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
iazim = metHeight[:,1].astype(int)
azimAux =
|
numpy.asmatrix(azimuth1[iazim])
|
numpy.asmatrix
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def velocity(sigma, xs, ys, X, Y):
"""
Generalizing the one source case:
xs, ys --> (1,)
X, Y --> (nx, ny)
sigma --> (1,)
To the several sources one:
xs, ys --> (ns, 1, 1)
X, Y --> (ns, nx, ny)
sigma --> (ns, 1, 1)
"""
sigma = np.atleast_1d(sigma)
xs = np.atleast_1d(xs)
ys = np.atleast_1d(ys)
# Handling one or n sourcefs arrays' dimensions
nx = np.size(xs) # Number of sources
ny = np.size(ys) # Number of sources
assert (nx == ny), "xs and ys must have the same length"
xs = xs.reshape((nx, 1, 1))
ys = ys.reshape((ny, 1, 1))
assert (xs.shape == (nx, 1, 1))
# Creates 3D reapeating the 2D grid. So we
# get the velocity field for each source.
# If the broadcasting works as I think it
# may be unecessary
ns = np.size(sigma) # Number of sources
assert (nx == ns), "sigma and xs must have the same length"
XX = np.repeat(X[None, ...], ns, axis=0)
YY = np.repeat(Y[None, ...], ns, axis=0)
sigma = sigma.reshape((ns, 1, 1))
# Using uu given that it may have several
# velocity fields which will be superposed
# later
uu = (sigma / (2 * np.pi)) * ((XX - xs) /
((XX - xs)**2 + (YY - ys)**2))
vv = (sigma / (2 * np.pi)) * ((YY - ys) /
((XX - xs)**2 + (YY - ys)**2))
assert (uu.shape == XX.shape)
assert (vv.shape == XX.shape)
# Superposing the velocity solutions
# for each source
u = np.sum(uu, axis=0, dtype=float)
v = np.sum(vv, axis=0, dtype=float)
return u, v
def stream_function(sigma, xs, ys, X, Y):
"""
Generalizing the one source case:
xs, ys --> (1,)
X, Y --> (nx, ny)
sigma --> (1,)
To the several sources one:
xs, ys --> (ns, 1, 1)
X, Y --> (ns, nx, ny)
sigma --> (ns, 1, 1)
"""
sigma = np.atleast_1d(sigma)
xs = np.atleast_1d(xs)
ys = np.atleast_1d(ys)
# Handling one or n sourcefs arrays' dimensions
nx = np.size(xs) # Number of sources
ny = np.size(ys) # Number of sources
assert (nx == ny), "xs and ys must have the same length"
xs = xs.reshape((nx, 1, 1))
ys = ys.reshape((ny, 1, 1))
# Creates 3D reapeating the 2D grid. So we
# get the velocity field for each source.
# If the broadcasting works as I think it
# may be unecessary
ns = np.size(sigma) # Number of sources
assert (nx == ns), "sigma and xs must have the same length"
XX = np.repeat(X[None, ...], ns, axis=0)
YY = np.repeat(Y[None, ...], ns, axis=0)
sigma = sigma.reshape((ns, 1, 1))
psi = (sigma / (2 * np.pi)) * np.arctan2((YY - ys), (XX - xs))
assert (psi.shape == XX.shape)
# Superposing the stream function solutions
# for each source
psi = np.sum(psi, axis=0, dtype=float)
return psi
def plot_velocity(u, v, psi, x_start, x_end, y_start, y_end):
size = 10
plt.figure(figsize=(size, (y_end - y_start) / (x_end - x_start) * size))
plt.grid(True)
plt.xlabel('x', fontsize=16)
plt.ylabel('y', fontsize=16)
plt.xlim(x_start, x_end)
plt.ylim(y_start, y_end)
plt.streamplot(X, Y, u, v, density=2, linewidth=1,
arrowsize=1, arrowstyle='->')
plt.contour(X, Y, psi,
colors='#CD2305', linewidths=2, linestyles='solid')
plt.scatter(xs, ys, color='#CD2305', s=10, marker='o')
def plot_cp(cp, x_start, x_end, y_start, y_end):
size = 10
plt.figure(figsize=(1.1 * size,
(y_end - y_start) / (x_end - x_start) * size))
plt.xlabel('x', fontsize=16)
plt.ylabel('y', fontsize=16)
plt.xlim(x_start, x_end)
plt.ylim(y_start, y_end)
levels = np.linspace(np.min(cp),
|
np.max(cp)
|
numpy.max
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson, norm, bernoulli, expon, uniform, beta, gamma, multinomial
from scipy.special import digamma
import random
from scipy.special import gamma as gamma_function
from scipy.special import gammaln
from scipy.special import factorial
from scipy.special import beta as beta_function
from scipy.stats import dirichlet
def beta_function_k(alphas):
return np.prod([gamma_function(alpha1) for alpha1 in alphas])/gamma_function(np.sum(alphas))
def log_beta_function_k(alphas):
return np.sum([gammaln(alpha1) for alpha1 in alphas])-gammaln(np.sum(alphas))
# test for pies
def logprob_pie(N,pie,alpha,beta,data):
K=len(pie)
dj=alpha.shape[1]
db=beta.shape[1]
logprob=0.0
for n in range(N):
jn=np.argmax(data[n][0])
bn=np.argmax(data[n][1])
logprob+=np.log(pie[0]*alpha[0,jn]*beta[0,bn]+pie[1]*alpha[1,jn]*beta[1,bn])#np.log(alpha[0,jn]*beta[0,bn])-np.log(pie[0]*alpha[0,jn]*beta[0,bn]+pie[1]*alpha[1,jn]*beta[1,bn])
return logprob
def test_for_pies(N,pie_vals,alpha,beta,data):
logprobs=np.zeros(len(pie_vals))
for nval, pie_val in enumerate(pie_vals):
logprobs[nval]=logprob_pie(N,pie_val,alpha,beta,data)
return logprobs
# parameter estimations
# benchmark
def benchmark(data,alphas_MC,betas_MC,f1_MC):
N=len(data)
K=alphas_MC.shape[0]
dj=alphas_MC.shape[1]
db=betas_MC.shape[1]
alphas_tuned=np.zeros(alphas_MC.shape)
betas_tuned=np.zeros(betas_MC.shape)
for j in range(dj):
alphas_tuned[:,j]=(np.sum(data[:,0]==j+min(data[:,0]))/(N*(1-f1_MC)*alphas_MC[0,j]+N*f1_MC*alphas_MC[1,j]))*alphas_MC[:,j]
for b in range(db):
betas_tuned[:,b]=(np.sum(data[:,1]==b+min(data[:,1]))/(N*(1-f1_MC)*betas_MC[0,b]+N*f1_MC*betas_MC[1,b]))*betas_MC[:,b]
return alphas_tuned, betas_tuned
# EM
def do_E_step_EM(pies,alphas,betas,data):
N=len(data)
K=len(pies)
gammas=np.zeros((N,K))
for n in range(N):
gammas_aux=np.zeros(K)
for k in range(K):
gammas_aux[k]=pies[k]*multinomial(n=1,p=alphas[k]).pmf(data[n][0])*multinomial(n=1,p=betas[k]).pmf(data[n][1])
gammas[n]=gammas_aux/np.sum(gammas_aux)
return np.asarray(gammas)
def do_M_step_EM(gammas,data):
N=gammas.shape[0]
K=gammas.shape[1]
pies=np.zeros(K)
Nk=np.sum(gammas,axis=0)
pies=Nk/N
dj=len(data[0][0])
db=len(data[0][1])
Nkj=np.zeros((K,dj))
Nkb=np.zeros((K,db))
for n in range(N):
Nkj[:,np.argmax(data[n][0])]+=gammas[n,:]
Nkb[:,np.argmax(data[n][1])]+=gammas[n,:]
alphas=np.zeros((K,dj))
betas=np.zeros((K,db))
for k in range(K):
alphas[k]=Nkj[k,:]/Nk[k]
betas[k]=Nkb[k,:]/Nk[k]
return pies, alphas, betas
def logprob_EM(pies,alphas,betas,data):
N=len(data)
K=len(pies)
logprob=0.0
for n in range(N):
aux=0.0
for k in range(K):
aux+=pies[k]*multinomial(n=1,p=alphas[k]).pmf(data[n][0])*multinomial(n=1,p=betas[k]).pmf(data[n][1])
logprob+=np.log(aux)/N #normalizo por dato
return logprob
def do_EM_algorithm(N,K,T, thresh,pie_0,alpha_0,beta_0, data):
pies=np.zeros((T+1,K))
dj=alpha_0.shape[1]
db=beta_0.shape[1]
alphas=np.zeros((T+1,K,dj))
betas=np.zeros((T+1,K,db))
gammas=np.zeros((T,N,K))
probs=np.zeros(T+1)
pies[0]=pie_0
alphas[0]=alpha_0
betas[0]=beta_0
probs[0]=logprob_EM(pies[0],alphas[0],betas[0],data[:N])
for t in range(T):
#print("Starting step ", t+1)
#print("Do E Step")
gammas[t]=do_E_step_EM(pies[t],alphas[t],betas[t],data[:N])
#print("Do M Step")
pies_aux, alphas_aux, betas_aux = do_M_step_EM(gammas[t],data[:N])
#print("Getting new Likelihood")
logprob_aux=logprob_EM(pies_aux,alphas_aux,betas_aux,data[:N])
#print("Checking convergence")
if(logprob_aux-probs[t]>-thresh):
probs[t+1]=logprob_aux
pies[t+1]=pies_aux
alphas[t+1]=alphas_aux
betas[t+1]=betas_aux
else:
break
return pies, alphas, betas, gammas, probs
# EM + priors
def do_E_step_EM_priors(pies,alphas,betas,data):
N=len(data)
K=len(pies)
gammas=np.zeros((N,K))
for n in range(N):
gammas_aux=np.zeros(K)
for k in range(K):
gammas_aux[k]=pies[k]*multinomial(n=1,p=alphas[k]).pmf(data[n][0])*multinomial(n=1,p=betas[k]).pmf(data[n][1])
gammas[n]=gammas_aux/np.sum(gammas_aux)
return np.asarray(gammas)
def do_M_step_EM_priors(gammas,eta_pie,eta_alpha,eta_beta,data):
N=gammas.shape[0]
K=gammas.shape[1]
pies=np.zeros(K)
Nk=np.sum(gammas,axis=0)
eta_pie_0=np.sum(eta_pie)
eta_alpha_0=np.sum(eta_alpha,axis=1)
eta_beta_0=np.sum(eta_beta,axis=1)
pies=np.asarray(list(map(lambda k: (Nk[k]+eta_pie[k]-1.0)/(N+eta_pie_0-K), range(K))))
dj=len(data[0][0])
db=len(data[0][1])
Nkj=np.zeros((K,dj))
Nkb=np.zeros((K,db))
for n in range(N):
Nkj[:,np.argmax(data[n][0])]+=gammas[n,:]
Nkb[:,np.argmax(data[n][1])]+=gammas[n,:]
alphas=np.zeros((K,dj))
betas=np.zeros((K,db))
for k in range(K):
alphas[k]=np.asarray(list(map(lambda j: (Nkj[k,j]+eta_alpha[k,j]-1.0)/(Nk[k]+eta_alpha_0[k]-dj), range(dj))))
betas[k]=np.asarray(list(map(lambda b: (Nkb[k,b]+eta_beta[k,b]-1.0)/(Nk[k]+eta_beta_0[k]-db), range(db))))
return pies, alphas, betas
def logprob_EM_priors(pies,alphas,betas,data):
N=len(data)
K=len(pies)
logprob=0.0
for n in range(N):
aux=0.0
for k in range(K):
aux+=pies[k]*multinomial(n=1,p=alphas[k]).pmf(data[n][0])*multinomial(n=1,p=betas[k]).pmf(data[n][1])
logprob+=np.log(aux)/N #normalizo por dato
return logprob
def do_EM_priors_algorithm(N,K,T, thresh,pie_0,alpha_0,beta_0, eta_pie, eta_alpha, eta_beta, data):
pies=np.zeros((T+1,K))
dj=alpha_0.shape[1]
db=beta_0.shape[1]
alphas=np.zeros((T+1,K,dj))
betas=np.zeros((T+1,K,db))
gammas=np.zeros((T,N,K))
probs=np.zeros(T+1)
pies[0]=pie_0
alphas[0]=alpha_0
betas[0]=beta_0
probs[0]=logprob_EM_priors(pies[0],alphas[0],betas[0],data[:N])
for t in range(T):
#print("Starting step ", t+1)
#print("Do E Step")
gammas[t]=do_E_step_EM_priors(pies[t],alphas[t],betas[t],data[:N])
#print("Do M Step")
pies_aux, alphas_aux, betas_aux = do_M_step_EM_priors(gammas[t],eta_pie, eta_alpha, eta_beta, data[:N])
#print("Getting new Likelihood")
logprob_aux=logprob_EM_priors(pies_aux,alphas_aux,betas_aux,data[:N])
#print("Checking convergence")
if(logprob_aux-probs[t]>-thresh):
probs[t+1]=logprob_aux
pies[t+1]=pies_aux
alphas[t+1]=alphas_aux
betas[t+1]=betas_aux
else:
break
return pies, alphas, betas, gammas, probs
# VB
def do_E_step_VB(gamma_pie,gamma_alpha,gamma_beta,data):
N=len(data)
K=len(gamma_pie)
rho=np.zeros((N,K))
r=np.zeros((N,K))
for n in range(N):
jn=np.argmax(data[n][0])
bn=np.argmax(data[n][1])
for k in range(K):
rho[n,k]=np.exp(digamma(gamma_pie[k])-digamma(np.sum(gamma_pie))+digamma(gamma_alpha[k,jn])-digamma(np.sum(gamma_alpha[k]))+digamma(gamma_beta[k,bn])-digamma(np.sum(gamma_beta[k])))
r[n,:]=rho[n,:]/(np.sum(rho[n]))
return r
def do_M_step_VB(eta_pie,eta_alpha,eta_beta,r,data):
N=len(data)
K=len(eta_pie)
dj=eta_alpha.shape[1]
db=eta_beta.shape[1]
Nkj=np.zeros((K,dj))
Nkb=np.zeros((K,db))
Nk=np.sum(r,axis=0)
for n in range(N):
jn=np.argmax(data[n][0])
bn=np.argmax(data[n][1])
Nkj[:,jn]+=r[n,:]
Nkb[:,bn]+=r[n,:]
if(np.allclose(np.sum(Nkj,axis=1),Nk) and np.allclose(np.sum(Nkb,axis=1),Nk)):
return eta_pie+Nk, eta_alpha + Nkj, eta_beta + Nkb
else:
return "Error"
def ELBO(eta_pie,eta_alpha,eta_beta,gamma_pie, gamma_alpha, gamma_beta, r,data):
score = 0
N=len(data)
K=len(eta_pie)
dj=eta_alpha.shape[1]
db=eta_beta.shape[1]
Nkj=np.zeros((K,dj))
Nkb=np.zeros((K,db))
Nk=np.sum(r,axis=0)
for n in range(N):
jn=np.argmax(data[n][0])
bn=np.argmax(data[n][1])
Nkj[:,jn]+=r[n,:]
Nkb[:,bn]+=r[n,:]
#E[log(p(X|Z,alpha,beta))]
for k in range(K):
#tmp1=Nk[k]*(digamma(gamma_pie[k])-digamma(np.sum(gamma_pie)))
tmp1=0.0
tmp2=0.0
for j in range(dj):
tmp2+=Nkj[k,j]*(digamma(gamma_alpha[k,j])-digamma(np.sum(gamma_alpha[k])))
tmp3=0.0
for b in range(db):
tmp3+=Nkb[k,b]*(digamma(gamma_beta[k,b])-digamma(np.sum(gamma_beta[k])))
score+=tmp1+tmp2+tmp3
#E[log(p(z|eta)-log(q(Z|gamma))]
tmp1=0.0
for n in range(N):
for k in range(K):
tmp1+=r[n,k]*(digamma(gamma_pie[k])-digamma(np.sum(gamma_pie))-np.log(r[n,k]))
score+=tmp1
#compensate
#score=score/N
#E[log(p(pi|eta))-log(q(pi|gamma))]
#tmp1=0.0
#tmp1=np.log(beta_function_k(gamma_pie))-np.log(beta_function_k(eta_pie))
tmp1=log_beta_function_k(gamma_pie)-log_beta_function_k(eta_pie)
for k in range(K):
tmp1+=(eta_pie[k]-gamma_pie[k])*(digamma(gamma_pie[k])-digamma(np.sum(gamma_pie)))
score+=tmp1
#E[log(p(alpha|eta))-log(q(alpha|gamma))]
tmp1=0.0
for k in range(K):
#tmp2=0.0
#tmp2=np.log(beta_function_k(gamma_alpha[k]))-np.log(beta_function_k(eta_alpha[k]))
tmp2=log_beta_function_k(gamma_alpha[k])-log_beta_function_k(eta_alpha[k])
for j in range(dj):
tmp2+=(eta_alpha[k,j]-gamma_alpha[k,j])*(digamma(gamma_alpha[k,j])-digamma(np.sum(gamma_alpha[k])))
tmp1+=tmp2
score+=tmp1
#E[log(p(beta|eta))-log(q(beta|gamma))]
tmp1=0.0
for k in range(K):
#tmp2=0.0
#tmp2=np.log(beta_function_k(gamma_beta[k]))-np.log(beta_function_k(eta_beta[k]))
tmp2=log_beta_function_k(gamma_beta[k])-log_beta_function_k(eta_beta[k])
for b in range(db):
tmp2+=(eta_beta[k,b]-gamma_beta[k,b])*(digamma(gamma_beta[k,b])-digamma(np.sum(gamma_beta[k])))
tmp1+=tmp2
score+=tmp1
return score
def do_VB_algorithm(N,K,T, thresh,gamma_pie_0,gamma_alpha_0,gamma_beta_0, eta_pie, eta_alpha, eta_beta, X):
pies=np.zeros((T+1,K))
dj=gamma_alpha_0.shape[1]
db=gamma_beta_0.shape[1]
## posterior definition
gamma_pie=np.zeros((T+1,K))
gamma_alpha=np.zeros((T+1,K,dj))
gamma_beta=np.zeros((T+1,K,db))
rmatrix=np.zeros((T,N,K))
probs=np.zeros(T+1)
## initialize gammas
gamma_pie[0]=gamma_pie_0
gamma_alpha[0]=gamma_alpha_0
gamma_beta[0]=gamma_beta_0
for t in range(T):
#print("Starting step ", t+1)
#print("Do E Step")
rmatrix[t]=do_E_step_VB(gamma_pie[t],gamma_alpha[t],gamma_beta[t],X[:N])
#print("Calculatin logprob from E step")
probs[t]=ELBO(eta_pie,eta_alpha,eta_beta,gamma_pie[t],gamma_alpha[t],gamma_beta[t],rmatrix[t],X[:N])
#print("Do M Step")
gamma_pie_aux, gamma_alpha_aux, gamma_beta_aux = do_M_step_VB(eta_pie,eta_alpha,eta_beta,rmatrix[t],X[:N])
#print("Getting new Likelihood")
logprob_aux=ELBO(eta_pie,eta_alpha,eta_beta,gamma_pie_aux,gamma_alpha_aux,gamma_beta_aux,rmatrix[t],X[:N])
#print("Checking convergence")
#if(1.0>0.0):
#if(np.allclose(gamma_pie[t],gamma_pie_aux)==False or np.allclose(gamma_alpha[t],gamma_alpha_aux)==False or np.allclose(gamma_beta[t],gamma_beta_aux)==False or logprob_aux >= probs[t] ):
if(abs(logprob_aux) <= (1.0-thresh)*abs(probs[t]) ):
probs[t+1]=logprob_aux
gamma_pie[t+1]=gamma_pie_aux
gamma_alpha[t+1]=gamma_alpha_aux
gamma_beta[t+1]=gamma_beta_aux
else:
break
return gamma_pie, gamma_alpha, gamma_beta, rmatrix, probs
# Gibbs sampler
def one_Gibbs_step(Zini,data,eta_pie,eta_alpha,eta_beta):
N=Zini.shape[0]
K=Zini.shape[1]
dj=len(data[0][0])
db=len(data[0][1])
Nk=np.sum(Zini,axis=0)
Nkj=np.zeros((K,dj))
Nkb=np.zeros((K,db))
for n in range(N):
jn=np.argmax(data[n][0])
bn=np.argmax(data[n][1])
Nkj[:,jn]+=Zini[n]
Nkb[:,bn]+=Zini[n]
pie=dirichlet.rvs(alpha=eta_pie+Nk,size=1)[0]
alpha0=dirichlet.rvs(alpha=eta_alpha[0]+Nkj[0],size=1)[0]
alpha1=dirichlet.rvs(alpha=eta_alpha[1]+Nkj[1],size=1)[0]
beta0=dirichlet.rvs(alpha=eta_beta[0]+Nkb[0],size=1)[0]
beta1=dirichlet.rvs(alpha=eta_beta[1]+Nkb[1],size=1)[0]
alpha=np.hstack([alpha0.reshape(-1,1),alpha1.reshape(-1,1)]).T
beta=np.hstack([beta0.reshape(-1,1),beta1.reshape(-1,1)]).T
Zfin=
|
np.zeros((N,K))
|
numpy.zeros
|
import numpy as np
from numpy.testing import assert_
from ._bvcs import bvcs as _bvcs
from ._bvlag import bvlag as _bvlag
from ._bvtcg import bvtcg as _bvtcg
from ._cpqp import cpqp as _cpqp
from ._lctcg import lctcg as _lctcg
from ._nnls import nnls as _nnls
def bvcs(xpt, kopt, gq, curv, xl, xu, delta, *args, **kwargs):
"""
Evaluate Cauchy step on the absolute value of a Lagrange polynomial, subject
to bound constraints on its coordinates and its length.
Parameters
----------
xpt : numpy.ndarray, shape (npt, n)
Set of points. Each row of `xpt` stores the coordinates of a point.
kopt : int
Index of the point from which the Cauchy step is evaluated.
gq : array_like, shape (n,)
Gradient of the Lagrange polynomial of the points in `xpt` (not
necessarily the `kopt`-th one) at ``xpt[kopt, :]``.
curv : callable
Function providing the curvature of the Lagrange polynomial.
``curv(x, *args) -> float``
where ``x`` is an array with shape (n,) and ``args`` is the tuple of
fixed parameters needed to specify the function.
xl : array_like, shape (n,)
Lower-bound constraints on the decision variables. Use ``-numpy.inf`` to
disable the bounds on some variables.
xu : array_like, shape (n,)
Upper-bound constraints on the decision variables. Use ``numpy.inf`` to
disable the bounds on some variables.
delta : float
Upper bound on the length of the Cauchy step.
*args : tuple, optional
Parameters to forward to the curvature function.
Returns
-------
step : numpy.ndarray, shape (n,)
Cauchy step.
cauchy : float
Square of the Lagrange polynomial evaluation at the Cauchy point.
Other Parameters
----------------
debug : bool, optional
Whether to make debugging tests during the execution, which is
not recommended in production (the default is False).
Raises
------
AssertionError
The vector ``xpt[kopt, :]`` is not feasible (only in debug mode).
See Also
--------
bvlag : Bounded variable absolute Lagrange polynomial maximization
Notes
-----
The method is adapted from the ALTMOV algorithm [1]_, and the vector
``xpt[kopt, :]`` must be feasible.
References
----------
.. [1] <NAME>. The BOBYQA algorithm for bound constrained
optimization without derivatives. Tech. rep. DAMTP 2009/NA06. Cambridge,
UK: Department of Applied Mathematics and Theoretical Physics, University
of Cambridge, 2009.
"""
xpt = np.atleast_2d(xpt)
if xpt.dtype.kind in np.typecodes['AllInteger']:
xpt = np.asarray(xpt, dtype=float)
xpt = np.asfortranarray(xpt)
gq = np.atleast_1d(gq).astype(float)
xl = np.atleast_1d(xl).astype(float)
xu = np.atleast_1d(xu).astype(float)
# Check the sizes of the inputs.
assert_(xpt.ndim == 2)
assert_(gq.ndim == 1)
assert_(xl.ndim == 1)
assert_(xu.ndim == 1)
assert_(gq.size == xpt.shape[1])
assert_(xl.size == xpt.shape[1])
assert_(xu.size == xpt.shape[1])
def curv_safe(x):
cx = np.float64(curv(x, *args))
return cx
debug = kwargs.get('debug', False)
step, cauchy = _bvcs(xpt, kopt, gq, curv_safe, xl, xu, delta, debug) # noqa
return np.array(step, dtype=float), cauchy
def bvlag(xpt, kopt, klag, gq, xl, xu, delta, alpha, **kwargs):
"""
Estimate a point that maximizes a lower bound on the denominator of the
updating formula, subject to bound constraints on its coordinates and its
length.
Parameters
----------
xpt : numpy.ndarray, shape (npt, n)
Set of points. Each row of `xpt` stores the coordinates of a point.
kopt : int
Index of a point in `xpt`. The estimated point will lie on a line
joining ``xpt[kopt, :]`` to another point in `xpt`.
klag : int
Index of the point in `xpt`.
gq : array_like, shape (n,)
Gradient of the `klag`-th Lagrange polynomial at ``xpt[kopt, :]``.
xl : array_like, shape (n,)
Lower-bound constraints on the decision variables. Use ``-numpy.inf`` to
disable the bounds on some variables.
xu : array_like, shape (n,)
Upper-bound constraints on the decision variables. Use ``numpy.inf`` to
disable the bounds on some variables.
delta : float
Upper bound on the length of the step.
alpha : float
Real parameter.
Returns
-------
step : numpy.ndarray, shape (n,)
Step from ``xpt[kopt, :]`` towards the estimated point.
Other Parameters
----------------
debug : bool, optional
Whether to make debugging tests during the execution, which is
not recommended in production (the default is False).
Raises
------
AssertionError
The vector ``xpt[kopt, :]`` is not feasible (only in debug mode).
See Also
--------
bvcs : Bounded variable Cauchy step
Notes
-----
The denominator of the updating formula is given in Equation (3.9) of [2]_,
and the parameter `alpha` is the referred in Equation (4.12) of [1]_.
References
----------
.. [1] <NAME>. "The NEWUOA software for unconstrained optimization
without derivatives." In: Large-Scale Nonlinear Optimization. Ed. by <NAME> and <NAME>. New York, NY, US: Springer, 2006, pp. 255-–297.
.. [2] <NAME>. The BOBYQA algorithm for bound constrained
optimization without derivatives. Tech. rep. DAMTP 2009/NA06. Cambridge,
UK: Department of Applied Mathematics and Theoretical Physics, University
of Cambridge, 2009.
"""
xpt = np.atleast_2d(xpt).astype(float)
xpt = np.asfortranarray(xpt)
gq = np.atleast_1d(gq)
if gq.dtype.kind in np.typecodes['AllInteger']:
gq = np.asarray(gq, dtype=float)
xl = np.atleast_1d(xl).astype(float)
xu = np.atleast_1d(xu).astype(float)
# Check the sizes of the inputs.
assert_(xpt.ndim == 2)
assert_(gq.ndim == 1)
assert_(xl.ndim == 1)
assert_(xu.ndim == 1)
assert_(gq.size == xpt.shape[1])
assert_(xl.size == xpt.shape[1])
assert_(xu.size == xpt.shape[1])
debug = kwargs.get('debug', False)
step = _bvlag(xpt, kopt, klag, gq, xl, xu, delta, alpha, debug) # noqa
return np.array(step, dtype=float)
def bvtcg(xopt, gq, hessp, xl, xu, delta, *args, **kwargs):
"""
Minimize approximately a quadratic function subject to bound and
trust-region constraints using a truncated conjugate gradient.
Parameters
----------
xopt : numpy.ndarray, shape (n,)
Point around which the Taylor expansions of the quadratic function is
defined.
gq : array_like, shape (n,)
Gradient of the quadratic function at `xopt`.
hessp : callable
Function providing the product of the Hessian matrix of the quadratic
function with any vector.
``hessp(x, *args) -> array_like, shape(n,)``
where ``x`` is an array with shape (n,) and `args` is a tuple of
parameters to forward to the objective function. It is assumed that the
Hessian matrix implicitly defined by `hessp` is symmetric, but not
necessarily positive semidefinite.
xl : array_like, shape (n,)
Lower-bound constraints on the decision variables. Use ``-numpy.inf`` to
disable the bounds on some variables.
xu : array_like, shape (n,)
Upper-bound constraints on the decision variables. Use ``numpy.inf`` to
disable the bounds on some variables.
delta : float
Upper bound on the length of the step from `xopt`.
*args : tuple, optional
Parameters to forward to the Hessian product function.
Returns
-------
step : numpy.ndarray, shape (n,)
Step from `xopt` towards the estimated point.
Other Parameters
----------------
debug : bool, optional
Whether to make debugging tests during the execution, which is
not recommended in production (the default is False).
Raises
------
ValueError
The vector `xopt` is not feasible (only in debug mode).
See Also
--------
cpqp : Convex piecewise quadratic programming
lctcg : Linear constrained truncated conjugate gradient
nnls : Nonnegative least squares
Notes
-----
The method is adapted from the TRSBOX algorithm [1]_.
References
----------
.. [1] <NAME>. The BOBYQA algorithm for bound constrained
optimization without derivatives. Tech. rep. DAMTP 2009/NA06. Cambridge,
UK: Department of Applied Mathematics and Theoretical Physics, University
of Cambridge, 2009.
"""
xopt = np.atleast_1d(xopt)
if xopt.dtype.kind in np.typecodes['AllInteger']:
xopt = np.asarray(xopt, dtype=float)
gq = np.atleast_1d(gq).astype(float)
xl = np.atleast_1d(xl).astype(float)
xu = np.atleast_1d(xu).astype(float)
# Check the sizes of the inputs.
assert_(xopt.ndim == 1)
assert_(gq.ndim == 1)
assert_(xl.ndim == 1)
assert_(xu.ndim == 1)
assert_(gq.size == xopt.size)
assert_(xl.size == xopt.size)
assert_(xu.size == xopt.size)
def hessp_safe(x):
hx = np.atleast_1d(hessp(x, *args))
if hx.dtype.kind in np.typecodes['AllInteger']:
hx = np.asarray(hx, dtype=np.float64)
return hx
debug = kwargs.get('debug', False)
step = _bvtcg(xopt, gq, hessp_safe, xl, xu, delta, debug) # noqa
return np.array(step, dtype=float)
def cpqp(xopt, Aub, bub, Aeq, beq, xl, xu, delta, **kwargs):
r"""
Minimize approximately a convex piecewise quadratic function subject to
bound and trust-region constraints using a truncated conjugate gradient.
The method minimizes the function
.. math::
\frac{1}{2} ( \| [ \mathtt{Aub} \times x - \mathtt{bub} ]_+\|_2^2 +
\| \mathtt{Aeq} \times x - \mathtt{beq} \|_2^2 ),
where :math:`[ \cdot ]_+` denotes the componentwise positive part operator.
Parameters
----------
xopt : numpy.ndarray, shape (n,)
Center of the trust-region constraint.
Aub : array_like, shape (mlub, n)
Matrix `Aub` as shown above.
bub : array_like, shape (mlub,)
Vector `bub` as shown above.
Aeq : array_like, shape (mleq, n)
Matrix `Aeq` as shown above.
beq : array_like, shape (meq,)
Vector `beq` as shown above.
xl : array_like, shape (n,)
Lower-bound constraints on the decision variables. Use ``-numpy.inf`` to
disable the bounds on some variables.
xu : array_like, shape (n,)
Upper-bound constraints on the decision variables. Use ``numpy.inf`` to
disable the bounds on some variables.
delta : float
Upper bound on the length of the step from `xopt`.
Returns
-------
step : numpy.ndarray, shape (n,)
Step from `xopt` towards the estimated point.
Other Parameters
----------------
debug : bool, optional
Whether to make debugging tests during the execution, which is
not recommended in production (the default is False).
Raises
------
AssertionError
The vector `xopt` is not feasible (only in debug mode).
See Also
--------
bvtcg : Bounded variable truncated conjugate gradient
lctcg : Linear constrained truncated conjugate gradient
nnls : Nonnegative least squares
Notes
-----
The method is adapted from the TRSTEP algorithm [1]_. To cope with the
convex piecewise quadratic objective function, the method minimizes
.. math::
\frac{1}{2} ( \| \mathtt{Aeq} \times x - \mathtt{beq} \|_2^2 +
\| y \|_2^2 )
subject to the original constraints, where the slack variable :math:`y` is
lower bounded by zero and :math:`\mathtt{Aub} \times x - \mathtt{bub}`.
References
----------
.. [1] <NAME>. "On fast trust region methods for quadratic models
with linear constraints." In: Math. Program. Comput. 7 (2015), pp.
237–-267.
"""
xopt = np.atleast_1d(xopt)
if xopt.dtype.kind in np.typecodes['AllInteger']:
xopt = np.asarray(xopt, dtype=float)
Aub = np.atleast_2d(Aub).astype(float)
Aub = np.asfortranarray(Aub)
bub = np.atleast_1d(bub).astype(float)
Aeq = np.atleast_2d(Aeq).astype(float)
Aeq =
|
np.asfortranarray(Aeq)
|
numpy.asfortranarray
|
"""
Functions that aid testing in various ways. A typical use would be::
lowcore = create_named_configuration('LOWBD2-CORE')
times = numpy.linspace(-3, +3, 13) * (numpy.pi / 12.0)
frequency = numpy.array([1e8])
channel_bandwidth = numpy.array([1e7])
# Define the component and give it some polarisation and spectral behaviour
f = numpy.array([100.0])
flux = numpy.array([f])
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
compabsdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000')
comp = create_skycomponent(flux=flux, frequency=frequency, direction=compabsdirection,
polarisation_frame=PolarisationFrame('stokesI'))
image_graph = create_test_image)(frequency=frequency, phasecentre=phasecentre,
cellsize=0.001,
polarisation_frame=PolarisationFrame('stokesI')
vis = create_visibility(lowcore, times=times, frequency=frequency,
channel_bandwidth=channel_bandwidth,
phasecentre=phasecentre, weight=1,
polarisation_frame=PolarisationFrame('stokesI'),
integration_time=1.0)
"""
import numpy
import csv
from typing import List
import astropy.units as u
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
from scipy import interpolate
from arl.calibration.operations import create_gaintable_from_blockvisibility, apply_gaintable
from arl.data.data_models import Configuration, Image, GainTable, Skycomponent, BlockVisibility
from arl.data.parameters import arl_path
from arl.data.polarisation import PolarisationFrame
from arl.image.operations import import_image_from_fits, create_image_from_array, \
reproject_image, create_empty_image_like
from arl.util.coordinate_support import xyz_at_latitude
from arl.visibility.base import create_blockvisibility
from arl.visibility.coalesce import convert_visibility_to_blockvisibility
from arl.imaging import predict_timeslice, predict_skycomponent_blockvisibility
from arl.data.parameters import get_parameter
import logging
log = logging.getLogger(__name__)
def create_configuration_from_file(antfile: str, name: str = None, location: EarthLocation = None,
mount: str = 'altaz',
names: str = "%d", frame: str = 'local',
diameter=35.0,
meta: dict = None,
rmax=None, **kwargs) -> Configuration:
""" Define from a file
:param names:
:param antfile: Antenna file name
:param name: Name of array e.g. 'LOWBD2'
:param location:
:param mount: mount type: 'altaz', 'xy'
:param frame: 'local' | 'global'
:param diameter: Effective diameter of station or antenna
:param meta: Any meta info
:return: Configuration
"""
antxyz =
|
numpy.genfromtxt(antfile, delimiter=",")
|
numpy.genfromtxt
|
"""
Tests BLAS functions. Since it supports C as well as Fortran
matrix, that leads to various combinations of matrices to test.
"""
from __future__ import print_function
from itertools import product
from functools import partial
from unittest import TestCase, skipIf
import numpy as np
from numpy.testing import (run_module_suite, assert_allclose, assert_,
assert_raises)
from pkg_resources import parse_version
import gulinalg
M = 75
N = 50
K = 100
n_batch = 8
class TestInner1d(TestCase):
def test_real(self):
a = np.random.randn(N)
b = np.random.randn(N)
res = gulinalg.inner1d(a, b)
ref = np.sum(a * b)
assert_allclose(res, ref)
def test_complex(self):
a = np.random.randn(N) + 1j * np.random.randn(N)
b = np.random.randn(N) + 1j * np.random.randn(N)
res = gulinalg.inner1d(a, b)
ref = np.sum(a * b)
assert_allclose(res, ref)
def test_real_vector(self):
a = np.random.randn(n_batch, N)
b = np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.inner1d(a, b, workers=workers)
ref = np.sum(a * b, axis=-1)
assert_allclose(res, ref)
def test_complex_vector(self):
a = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
b = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.inner1d(a, b, workers=workers)
ref = np.sum(a * b, axis=-1)
assert_allclose(res, ref)
class TestDotc1d(TestCase):
def test_complex(self):
a = np.random.randn(N) + 1j * np.random.randn(N)
b = np.random.randn(N) + 1j * np.random.randn(N)
res = gulinalg.dotc1d(a, b)
ref = np.sum(np.conj(a) * b)
assert_allclose(res, ref)
def test_complex_vector(self):
a = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
b = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.dotc1d(a, b, workers=workers)
ref = np.sum(np.conj(a) * b, axis=-1)
assert_allclose(res, ref)
class TestInnerwt(TestCase):
def test_real(self):
a = np.random.randn(N)
b = np.random.randn(N)
c = np.random.randn(N)
res = gulinalg.innerwt(a, b, c)
ref = np.sum(a * b * c)
assert_allclose(res, ref)
def test_complex(self):
a = np.random.randn(N) + 1j * np.random.randn(N)
b = np.random.randn(N) + 1j * np.random.randn(N)
c = np.random.randn(N) + 1j * np.random.randn(N)
res = gulinalg.innerwt(a, b, c)
ref = np.sum(a * b * c)
assert_allclose(res, ref)
def test_real_vector(self):
a = np.random.randn(n_batch, N)
b = np.random.randn(n_batch, N)
c = np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.innerwt(a, b, c, workers=workers)
ref = np.sum(a * b * c, axis=-1)
assert_allclose(res, ref)
def test_complex_vector(self):
a = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
b = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
c = np.random.randn(n_batch, N) + 1j * np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.innerwt(a, b, c, workers=workers)
ref = np.sum(a * b * c, axis=-1)
assert_allclose(res, ref)
class TestMatvecMultiplyNoCopy(TestCase):
"""
Tests the cases that code can handle without copy-rearranging of any of
the input/output arguments.
"""
def test_matvec_multiply_c(self):
"""Multiply C layout matrix with vector"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.random.randn(N)
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_f(self):
"""Multiply FORTRAN layout matrix with vector"""
a = np.asfortranarray(np.random.randn(M, N))
b = np.random.randn(N)
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_cv_c(self):
"""Test for explicit C array output for C layout input matrix"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(N))
res = np.zeros(M, order='C')
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_fv_c(self):
"""Test for explicit C array output for FORTRAN layout input matrix"""
a = np.asfortranarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(N))
res = np.zeros(M, order='C')
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_cv_f(self):
"""Test for explicit FORTRAN array output for C layout input matrix"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(N))
res = np.zeros(M, order='F')
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_fv_f(self):
"""Test for explicit FORTRAN array output for F layout input matrix"""
a = np.asfortranarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(N))
res = np.zeros(M, order='F')
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_for_complex_numbers(self):
"""Test for complex numbers input."""
a = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + -8j]])
b = np.array([1 - 2j, 4 + 5j])
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_matvec_size_zero_matrix(self):
"""Test matrix of size zero"""
a = np.random.randn(0, 2)
b = np.random.randn(2)
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_matvec_size_zero_vector(self):
"""Test vector of size zero"""
a = np.random.randn(2, 0)
b = np.random.randn(0)
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_size_one_vector(self):
"""Test vector of size one"""
a = np.random.randn(1, 1)
b = np.random.randn(1)
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_batch_b(self):
"""Multiply C layout matrix with stack of vectors"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.matvec_multiply(a, b)
ref = np.matmul(a, b[:, :, np.newaxis])[..., 0]
assert_allclose(res, ref)
def test_matvec_multiply_batch_a(self):
"""Multiply C layout stack of matrices with a vector"""
a = np.ascontiguousarray(np.random.randn(n_batch, M, N))
b = np.random.randn(N)
for workers in [1, -1]:
res = gulinalg.matvec_multiply(a, b)
ref = np.matmul(a, b[:, np.newaxis])[..., 0]
assert_allclose(res, ref)
def test_matvec_multiply_batch_both(self):
"""Multiply C layout stack of matrices and vectors"""
a = np.ascontiguousarray(np.random.randn(n_batch, M, N))
b = np.random.randn(n_batch, N)
for workers in [1, -1]:
res = gulinalg.matvec_multiply(a, b)
ref = np.matmul(a, b[:, :, np.newaxis])[..., 0]
assert_allclose(res, ref)
def test_matvec_multiply_batch_both_out(self):
"""Multiply C layout stack of matrices and vectors"""
a = np.ascontiguousarray(np.random.randn(n_batch, M, N))
b = np.random.randn(n_batch, N)
res = np.zeros((n_batch, M), dtype=a.dtype)
for workers in [1, -1]:
gulinalg.matvec_multiply(a, b, out=res)
ref = np.matmul(a, b[:, :, np.newaxis])[..., 0]
assert_allclose(res, ref)
class TestMatvecMultiplyWithCopy(TestCase):
"""
Test the cases where there is at least one operand/output that requires
copy/rearranging.
"""
def test_input_non_contiguous_1(self):
"""First input not contiguous"""
a = np.ascontiguousarray(np.random.randn(M, N, 2))[:, :, 0]
b = np.ascontiguousarray(np.random.randn(N))
res = np.zeros(M, order='C')
assert not a.flags.c_contiguous and not a.flags.f_contiguous
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_input_non_contiguous_2(self):
"""Second input not contiguous"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(N, 2))[:, 0]
res = np.zeros(M, order='C')
assert not b.flags.c_contiguous and not b.flags.f_contiguous
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_input_non_contiguous_3(self):
"""Neither input contiguous"""
a = np.ascontiguousarray(np.random.randn(M, N, 2))[:, :, 0]
b = np.ascontiguousarray(np.random.randn(N, 2))[:, 0]
res = np.zeros(M, order='C')
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_output_non_contiguous(self):
"""Output not contiguous"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(N))
res = np.zeros((M, 2), order='C')[:, 0]
assert not res.flags.c_contiguous and not res.flags.f_contiguous
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_all_non_contiguous(self):
"""Neither input nor output contiguous"""
a = np.ascontiguousarray(np.random.randn(M, N, 2))[:, :, 0]
b = np.ascontiguousarray(np.random.randn(N, 2))[:, 0]
res = np.zeros((M, 2), order='C')[:, 0]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
assert not res.flags.c_contiguous and not res.flags.f_contiguous
gulinalg.matvec_multiply(a, b, out=res)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_stride_tricks(self):
"""Test that matrices that are contiguous but have their dimension
overlapped *copy*, as BLAS does not support them"""
a = np.ascontiguousarray(np.random.randn(M + N))
a = np.lib.stride_tricks.as_strided(a,
shape=(M, N),
strides=(a.itemsize, a.itemsize))
b = np.ascontiguousarray(np.random.randn(N))
res = gulinalg.matvec_multiply(a, b)
ref = np.dot(a, b)
assert_allclose(res, ref)
def test_matvec_multiply_batch_both_out_non_contiguous(self):
"""Multiply C layout stack of matrices and vectors"""
a = np.random.randn(n_batch, M, N, 2)[..., 0]
b = np.random.randn(n_batch, N, 2)[..., 0]
res = np.zeros((n_batch, M, 2), dtype=a.dtype)[..., 0]
for workers in [1, -1]:
gulinalg.matvec_multiply(a, b, out=res)
ref = np.matmul(a, b[:, :, np.newaxis])[..., 0]
assert_allclose(res, ref)
class TestMatvecMultiplyVector(TestCase):
"""Tests showing that the gufunc stuff works"""
def test_vector(self):
"""test vectorized matrix multiply"""
a = np.ascontiguousarray(np.random.randn(10, M, N))
b = np.ascontiguousarray(np.random.randn(10, N))
res = gulinalg.matvec_multiply(a, b)
assert res.shape == (10, M)
ref = np.stack([np.dot(a[i], b[i]) for i in range(len(a))])
assert_allclose(res, ref)
def test_broadcast(self):
"""test broadcast matrix multiply"""
a = np.ascontiguousarray(np.random.randn(M, N))
b = np.ascontiguousarray(np.random.randn(10, N))
res = gulinalg.matvec_multiply(a, b)
assert res.shape == (10, M)
ref = np.stack([np.dot(a, b[i]) for i in range(len(b))])
assert_allclose(res, ref)
def test_nan_handling(self):
"""NaN in one output shouldn't contaminate remaining outputs"""
a = np.eye(2)
b = np.array([[1.0, 2.0], [np.nan, 1.0]])
ref = np.array([[1., 2.], [np.nan, np.nan]])
res = gulinalg.matvec_multiply(a, b)
assert_allclose(res, ref)
def test_infinity_handling(self):
"""Infinity in one output shouldn't contaminate remaining outputs"""
a = np.eye(2)
b = np.array([[1.0, 2.0], [np.inf, 1.0]])
ref = np.array([[1., 2.], [np.inf, np.nan]])
res = gulinalg.matvec_multiply(a, b)
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_size_zero_vector(self):
"""Test broadcasting for vector of size zero"""
a = np.ascontiguousarray(np.random.randn(10, 2, 0))
b = np.ascontiguousarray(np.random.randn(10, 0))
res = gulinalg.matvec_multiply(a, b)
assert res.shape == (10, 2)
ref = np.stack([np.dot(a[i], b[i]) for i in range(len(a))])
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_size_zero_matrix(self):
"""Test broadcasting for matrix of size zero"""
a = np.ascontiguousarray(np.random.randn(10, 0, 2))
b = np.ascontiguousarray(np.random.randn(10, 2))
res = gulinalg.matvec_multiply(a, b)
assert res.shape == (10, 0)
ref = np.stack([np.dot(a[i], b[i]) for i in range(len(a))])
assert_allclose(res, ref)
def test_size_one_vector(self):
"""Test broadcasting for vector of size one"""
a = np.ascontiguousarray(np.random.randn(10, 1, 1))
b = np.ascontiguousarray(np.random.randn(10, 1))
res = gulinalg.matvec_multiply(a, b)
assert res.shape == (10, 1)
ref = np.stack([np.dot(a[i], b[i]) for i in range(len(a))])
assert_allclose(res, ref)
class TestUpdateRank1Copy(TestCase):
"""
Tests the cases that code can handle without copy-rearranging of any of
the input/output arguments.
"""
def test_update_rank1_c(self):
"""Rank update on C layout matrix"""
a = np.random.randn(M)
b = np.random.randn(N)
c = np.ascontiguousarray(np.random.randn(M, N))
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(a.reshape(M, 1), b.reshape(1, N)) + c
assert_allclose(res, ref)
def test_update_rank1_f(self):
"""Rank update on F layout matrix"""
a = np.random.randn(M)
b = np.random.randn(N)
c = np.asfortranarray(np.random.randn(M, N))
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(a.reshape(M, 1), b.reshape(1, N)) + c
assert_allclose(res, ref)
def test_update_rank1_for_complex_numbers(self):
"""Test for complex numbers"""
a = np.array([1 + 3j, 3 - 4j])
b = np.array([1 - 2j, 4 + 5j])
c = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + -8j]])
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(a.reshape(2, 1), b.conj().reshape(1, 2)) + c
assert_allclose(res, ref)
def test_update_rank1_for_complex_numbers_no_conjugate_transpose(self):
"""Test for complex numbers but no conjuage transpose"""
a = np.array([1 + 3j, 3 - 4j])
b = np.array([1 - 2j, 4 + 5j])
c = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + -8j]])
res = gulinalg.update_rank1(a, b, c, conjugate=False)
ref = np.dot(a.reshape(2, 1), b.reshape(1, 2)) + c
assert_allclose(res, ref)
def test_update_rank1_c_c(self):
"""Rank1 update on C layout matrix, explicit C array output"""
a = np.array([2, 3, 4])
b = np.array([1, 3, 4, 5])
c = np.arange(1, 13).reshape(3, 4)
res = np.zeros((3, 4), order='C')
gulinalg.update_rank1(a, b, c, out=res)
ref = np.dot(a.reshape(3, 1), b.reshape(1, 4)) + c
assert_allclose(res, ref)
def test_update_rank1_f_c(self):
"""Rank1 update on F layout matrix, explicit C array output"""
a = np.array([2, 3, 4])
b = np.array([1, 3, 4, 5])
c = np.asfortranarray(np.arange(1, 13).reshape(3, 4))
res = np.zeros((3, 4), order='C')
gulinalg.update_rank1(a, b, c, out=res)
ref = np.dot(a.reshape(3, 1), b.reshape(1, 4)) + c
assert_allclose(res, ref)
def test_update_rank1_c_f(self):
"""Rank1 update on C layout matrix, explicit F array output"""
a = np.array([2, 3, 4])
b = np.array([1, 3, 4, 5])
c = np.arange(1, 13).reshape(3, 4)
res = np.zeros((3, 4), order='F')
gulinalg.update_rank1(a, b, c, out=res)
ref = np.dot(a.reshape(3, 1), b.reshape(1, 4)) + c
assert_allclose(res, ref)
def test_update_rank1_f_f(self):
"""Rank1 update on F layout matrix, explicit F array output"""
a = np.array([2, 3, 4])
b = np.array([1, 3, 4, 5])
c = np.asfortranarray(np.arange(1, 13).reshape(3, 4))
res = np.zeros((3, 4), order='F')
gulinalg.update_rank1(a, b, c, out=res)
ref = np.dot(a.reshape(3, 1), b.reshape(1, 4)) + c
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_size_zero_vector(self):
"""Test vector input of size zero"""
a = np.zeros(1)
b = np.zeros(0)
c = np.ascontiguousarray(np.random.randn(1, 0))
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(np.zeros((1, 0)), np.zeros((0, 0))) + c
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_size_zero_matrix(self):
"""Test matrix input of size zero"""
a = np.zeros(0)
b = np.zeros(2)
c = np.full((0, 2), np.nan)
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(np.zeros((0, 0)), np.zeros((0, 2))) + c
assert_allclose(res, ref)
def test_size_one_vector(self):
"""Test vector inputs of size one"""
a = np.random.randn(1)
b = np.random.randn(1)
c = np.ascontiguousarray(np.random.randn(1, 1))
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(a.reshape(1, 1), b.reshape(1, 1)) + c
assert_allclose(res, ref)
class TestUpdateRank1WithCopy(TestCase):
"""
Test the cases where there is at least one operand/output that requires
copy/rearranging.
"""
def test_input_non_contiguous_vectors(self):
"""Not contiguous vector inputs"""
a = np.ascontiguousarray(np.random.randn(M, N, 2))[:, 0, 0]
b = np.ascontiguousarray(np.random.randn(M, N, 2))[0, :, 0]
c = np.ascontiguousarray(np.random.randn(M, N))
assert not a.flags.c_contiguous and not a.flags.f_contiguous
assert not b.flags.c_contiguous and not b.flags.f_contiguous
for workers in [1, -1]:
res = gulinalg.update_rank1(a, b, c, workers=workers)
ref = np.dot(a.reshape(M, 1), b.reshape(1, N)) + c
assert_allclose(res, ref)
def test_input_non_contiguous_matrix(self):
"""Non contiguous matrix input"""
a = np.random.randn(M)
b = np.random.randn(N)
c = np.ascontiguousarray(np.random.randn(M, N, 2))[:, :, 0]
assert not c.flags.c_contiguous and not c.flags.f_contiguous
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(a.reshape(M, 1), b.reshape(1, N)) + c
assert_allclose(res, ref)
def test_output_non_contiguous(self):
"""Output not contiguous"""
a = np.random.randn(M)
b = np.random.randn(N)
c = np.ascontiguousarray(np.random.randn(M, N))
res = np.zeros((M, N, 2), order='C')[:, :, 0]
gulinalg.update_rank1(a, b, c, out=res)
ref = np.dot(a.reshape(M, 1), b.reshape(1, N)) + c
assert_allclose(res, ref)
def test_stride_tricks(self):
"""test that matrices that are contiguous but have their dimension
overlapped *copy*, as BLAS does not support them"""
a = np.random.randn(M)
b = np.random.randn(N)
c = np.ascontiguousarray(np.random.randn(M + N))
c = np.lib.stride_tricks.as_strided(a,
shape=(M, N),
strides=(c.itemsize, c.itemsize))
res = gulinalg.update_rank1(a, b, c)
ref = np.dot(a.reshape(M, 1), b.reshape(1, N)) + c
assert_allclose(res, ref)
class TestUpdateRank1Vector(TestCase):
"""Tests showing that the gufunc stuff works"""
def test_vector(self):
"""test vectorized rank1 update"""
a = np.ascontiguousarray(np.random.randn(10, M))
b = np.ascontiguousarray(np.random.randn(10, N))
c = np.ascontiguousarray(np.random.randn(10, M, N))
for workers in [1, -1]:
res = gulinalg.update_rank1(a, b, c, workers=workers)
assert res.shape == (10, M, N)
ref = np.stack([np.dot(a[i].reshape(M, 1), b[i].reshape(1, N)) + c[i]
for i in range(len(c))])
assert_allclose(res, ref)
def test_broadcast(self):
"""test broadcast rank1 update"""
a = np.ascontiguousarray(np.random.randn(10, M))
b = np.ascontiguousarray(np.random.randn(10, N))
c = np.ascontiguousarray(np.random.randn(M, N))
for workers in [1, -1]:
res = gulinalg.update_rank1(a, b, c, workers=workers)
assert res.shape == (10, M, N)
ref = np.stack([np.dot(a[i].reshape(M, 1), b[i].reshape(1, N)) + c
for i in range(len(b))])
assert_allclose(res, ref)
def test_nan_handling(self):
"""NaN in one output shouldn't contaminate remaining outputs"""
a = np.array([[1, 2], [1, np.nan]])
b = np.array([3, 4])
c = np.array([[1, 2], [3, 4]])
ref = np.array([[[4, 6], [9, 12]],
[[4, 6], [np.nan, np.nan]]])
res = gulinalg.update_rank1(a, b, c)
assert_allclose(res, ref)
def test_infinity_handling(self):
"""Infinity in one output shouldn't contaminate remaining outputs"""
a = np.array([[1, 2], [1, np.inf]])
b = np.array([3, 4])
c = np.array([[1, 2], [3, 4]])
ref = np.array([[[4, 6], [9, 12]],
[[4, 6], [np.inf, np.inf]]])
res = gulinalg.update_rank1(a, b, c)
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_size_zero_vector(self):
"""Test broadcasting for matrix input of size zero"""
a = np.ascontiguousarray(np.random.randn(10, 1))
b = np.ascontiguousarray(np.random.randn(10, 0))
c = np.ascontiguousarray(np.random.randn(10, 1, 0))
res = gulinalg.update_rank1(a, b, c)
assert res.shape == (10, 1, 0)
ref = np.stack([np.dot(np.zeros((1, 0)), np.zeros((0, 0))) + c[i]
for i in range(len(c))])
assert_allclose(res, ref)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_size_zero_matrix(self):
"""Test broadcasting for matrix input of size zero"""
a = np.ascontiguousarray(np.random.randn(10, 0))
b = np.ascontiguousarray(np.random.randn(10, 2))
c = np.ascontiguousarray(np.random.randn(10, 0, 2))
res = gulinalg.update_rank1(a, b, c)
assert res.shape == (10, 0, 2)
ref = np.stack([np.dot(np.zeros((0, 0)), np.zeros((0, 2))) + c[i]
for i in range(len(c))])
assert_allclose(res, ref)
def test_size_one_vector(self):
"""Test broadcasting for vector inputs of size one"""
a = np.ascontiguousarray(np.random.randn(10, 1))
b = np.ascontiguousarray(np.random.randn(10, 1))
c = np.ascontiguousarray(np.random.randn(10, 1, 1))
res = gulinalg.update_rank1(a, b, c)
assert res.shape == (10, 1, 1)
ref = np.stack([np.dot(a[i].reshape(1, 1), b[i].reshape(1, 1)) + c[i]
for i in range(len(c))])
assert_allclose(res, ref)
class TestSyrk(TestCase):
def test_syrk_zeros_c(self):
gufunc = partial(gulinalg.update_rankk, sym_out=False)
for a_trans in [True, False]:
for dtype in [np.float32, np.float64]:
a = np.array([[1., 0.],
[0., -2.],
[2., 3.]], dtype=dtype)
if a_trans:
a = a.T
c = np.zeros((a.shape[0],)*2, dtype=dtype)
expected = np.dot(a, a.T) + c
# test upper triangular case
r = gufunc(a, c, transpose_type='N', UPLO='U')
assert_allclose(np.triu(expected), r)
assert_(r.dtype == a.dtype)
# test lower triangular case
r = gufunc(a, c, transpose_type='N', UPLO='L')
assert_allclose(np.tril(expected), r)
assert_(r.dtype == a.dtype)
# test upper triangular case with transpose_type='T'
r = gufunc(a.T, c, transpose_type='T', UPLO='U')
assert_allclose(np.triu(expected), r)
assert_(r.dtype == a.dtype)
# test lower triangular case with transpose_type='T'
r = gufunc(a.T, c, transpose_type='T', UPLO='L')
assert_allclose(np.tril(expected), r)
assert_(r.dtype == a.dtype)
def test_syrk_ones_c(self):
gufunc = partial(gulinalg.update_rankk, sym_out=False)
for a_trans in [True, False]:
for dtype in [np.float32, np.float64]:
a = np.array([[1., 0.],
[0., -2.],
[2., 3.]], dtype=dtype)
if a_trans:
a = a.T
c = np.ones((a.shape[0],)*2, dtype=dtype)
tmp = np.dot(a, a.T) + c
mask_upper = np.triu(c) > 0
mask_lower = np.tril(c) > 0
expected_lower = tmp.copy()
expected_lower[~mask_lower] = c[~mask_lower]
expected_upper = tmp.copy()
expected_upper[~mask_upper] = c[~mask_upper]
# test upper triangular case
r = gufunc(a, c, transpose_type='N', UPLO='U')
assert_allclose(expected_upper, r)
assert_(r.dtype == a.dtype)
# test lower triangular case
r = gufunc(a, c, transpose_type='N', UPLO='L')
assert_allclose(expected_lower, r)
assert_(r.dtype == a.dtype)
# test upper triangular case, with transpose_type='T'
r = gufunc(a.T, c, transpose_type='T', UPLO='U')
assert_allclose(expected_upper, r)
assert_(r.dtype == a.dtype)
# test lower triangular case, with transpose_type='T'
r = gufunc(a.T, c, transpose_type='T', UPLO='L')
assert_allclose(expected_lower, r)
assert_(r.dtype == a.dtype)
def test_syrk_broadcasted(self):
for sym_out, workers in product([False, True], [1, -1]):
gufunc = partial(gulinalg.update_rankk, sym_out=sym_out,
workers=workers)
for a_trans in [True, False]:
for dtype in [np.float32, np.float64]:
a = np.array([[1., 0.],
[0., -2.],
[2., 3.]], dtype=dtype)
if a_trans:
a = a.T
c = np.zeros((a.shape[0],)*2, dtype=dtype)
expected = np.dot(a, a.swapaxes(-1, -2)) + c
a = np.stack((a, ) * n_batch, axis=0) # stack
# test upper triangular case
r = gufunc(a, c, transpose_type='N', UPLO='U')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.triu(expected), r[i])
assert_(r.dtype == a.dtype)
# test upper triangular case with extra c dimensions
c_4d = c[np.newaxis, np.newaxis, ...]
r = gufunc(a, c_4d, transpose_type='N', UPLO='U')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[0][i])
else:
assert_allclose(np.triu(expected), r[0][i])
assert_(r.dtype == a.dtype)
# test lower triangular case
r = gufunc(a, c, transpose_type='N', UPLO='L')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.tril(expected), r[i])
assert_(r.dtype == a.dtype)
# test upper triangular case with transpose_type='T'
r = gufunc(a.swapaxes(-1, -2), c, transpose_type='T',
UPLO='U')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.triu(expected), r[i])
assert_(r.dtype == a.dtype)
# test lower triangular case
r = gufunc(a, c, transpose_type='N', UPLO='L')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.tril(expected), r[i])
assert_(r.dtype == a.dtype)
# test upper triangular case with transpose_type='T'
r = gufunc(a.swapaxes(-1, -2), c, transpose_type='T',
UPLO='U')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.triu(expected), r[i])
assert_(r.dtype == a.dtype)
# test lower triangular case with transpose_type='T'
r = gufunc(a.swapaxes(-1, -2), c, transpose_type='T',
UPLO='L')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.tril(expected), r[i])
assert_(r.dtype == a.dtype)
def test_syrk_no_c_broadcasted(self):
for sym_out, workers in product([False, True], [1, -1]):
gufunc = partial(gulinalg.update_rankk, sym_out=sym_out,
workers=workers)
for a_trans in [True, False]:
for dtype in [np.float32, np.float64]:
a = np.array([[1., 0.],
[0., -2.],
[2., 3.]], dtype=dtype)
if a_trans:
a = a.T
c = np.zeros((a.shape[0],)*2, dtype=dtype)
expected = np.dot(a, a.swapaxes(-1, -2)) + c
a = np.stack((a, ) * n_batch, axis=0) # stack
# test upper triangular case
r = gufunc(a, transpose_type='N', UPLO='U')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.triu(expected), r[i])
assert_(r.dtype == a.dtype)
# test lower triangular case
r = gufunc(a, transpose_type='N', UPLO='L')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.tril(expected), r[i])
assert_(r.dtype == a.dtype)
# test upper triangular case with transpose_type='T'
r = gufunc(a.swapaxes(-1, -2), transpose_type='T',
UPLO='U')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.triu(expected), r[i])
assert_(r.dtype == a.dtype)
# test lower triangular case
r = gufunc(a, transpose_type='N', UPLO='L')
for i in range(n_batch):
if sym_out:
assert_allclose(expected, r[i])
else:
assert_allclose(np.tril(expected), r[i])
assert_(r.dtype == a.dtype)
def test_syrk_wrong_shape(self):
for a_trans in [True, False]:
for dtype in [np.float32, np.float64]:
a = np.array([[1., 0.],
[0., -2.],
[2., 3.]], dtype=dtype)
if a_trans:
a = a.T
# use wrong axis size for c when transpose_type='N'
c = np.zeros((a.shape[-1],)*2, dtype=dtype)
with assert_raises(ValueError):
gulinalg.update_rankk(a, c, transpose_type='N')
# use wrong axis size for c when transpose_type='T'
c = np.zeros((a.shape[0],)*2, dtype=dtype)
with assert_raises(ValueError):
gulinalg.update_rankk(a, c, transpose_type='T')
def test_syrk_wrong_dtype(self):
for dtype in [np.complex64, np.complex128]:
a = np.ones((3, 3), dtype=dtype)
c = np.zeros_like(a)
with assert_raises(NotImplementedError):
gulinalg.update_rankk(a, c)
def test_syrk_invalid_uplo(self):
a = np.ones((3, 3), dtype=np.float64)
c = np.zeros_like(a)
with assert_raises(ValueError):
gulinalg.update_rankk(a, c, UPLO='X')
def test_syrk_invalid_transpose_type(self):
a = np.ones((3, 3), dtype=np.float64)
c = np.zeros_like(a)
with assert_raises(ValueError):
gulinalg.update_rankk(a, c, transpose_type='X')
def test_syrk_invalid_workers(self):
a = np.ones((3, 3), dtype=np.float64)
with assert_raises(ValueError):
gulinalg.update_rankk(a, workers=0)
class TestQuadraticForm(TestCase):
"""Tests for Quadratic Form u * Q * v"""
def test_quadratic_c(self):
"""test vectorized matrix multiply"""
u = np.random.randn(M)
q = np.random.randn(M, N)
v = np.random.randn(N)
res = gulinalg.quadratic_form(u, q, v)
ref = np.dot(u[np.newaxis, :], np.dot(q, v[:, np.newaxis]))
assert_allclose(res, ref)
def test_quadratic_broadcast_u(self):
"""test vectorized matrix multiply"""
u = np.random.randn(n_batch, M)
q = np.random.randn(M, N)
v = np.random.randn(N)
for workers in [1, -1]:
res = gulinalg.quadratic_form(u, q, v, workers=workers)
ref = [np.squeeze(np.dot(u[i:i+1, :], np.dot(q, v[:, np.newaxis])))
for i in range(n_batch)]
assert_allclose(res, ref)
def test_quadratic_broadcast_uv(self):
"""test vectorized matrix multiply"""
u = np.random.randn(n_batch, M)
q =
|
np.random.randn(M, N)
|
numpy.random.randn
|
#
# Particle swarm optimisation (PSO).
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
# Some code in this file was adapted from Myokit (see http://myokit.org)
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import logging
import numpy as np
import pints
class PSO(pints.PopulationBasedOptimiser):
"""
Finds the best parameters using the PSO method described in [1].
Particle Swarm Optimisation (PSO) is a global search method (so refinement
with a local optimiser is advised!) that works well for problems in high
dimensions and with many local minima. Because it treats each parameter
independently, it does not require preconditioning of the search space.
Detailed description:
In a particle swarm optimization, the parameter space is explored by ``n``
independent particles. The particles perform a pseudo-random walk through
the parameter space, guided by their own personal best score and the global
optimum found so far.
The method starts by creating a swarm of ``n`` particles and assigning each
an initial position and initial velocity (see the explanation of the
arguments ``hints`` and ``v`` for details). Each particle's score is
calculated and set as the particle's current best local score ``pl``. The
best score of all the particles is set as the best global score ``pg``.
Next, an iterative procedure is run that updates each particle's velocity
``v`` and position ``x`` using::
v[k] = v[k-1] + al * (pl - x[k-1]) + ag * (pg - x[k-1])
x[k] = v[k]
Here, ``x[t]`` is the particle's current position and ``v[t]`` its current
velocity. The values ``al`` and ``ag`` are scalars randomly sampled from a
uniform distribution, with values bound by ``r * 4.1`` and
``(1 - r) * 4.1``. Thus a swarm with ``r = 1`` will only use local
information, while a swarm with ``r = 0`` will only use global information.
The de facto standard is ``r = 0.5``. The random sampling is done each time
``al`` and ``ag`` are used: at each time step every particle performs ``m``
samplings, where ``m`` is the dimensionality of the search space.
Pseudo-code algorithm::
almax = r * 4.1
agmax = 4.1 - almax
while stopping criterion not met:
for i in [1, 2, .., n]:
if f(x[i]) < f(p[i]):
p[i] = x[i]
pg = min(p[1], p[2], .., p[n])
for j in [1, 2, .., m]:
al = uniform(0, almax)
ag = uniform(0, agmax)
v[i,j] += al * (p[i,j] - x[i,j]) + ag * (pg[i,j] - x[i,j])
x[i,j] += v[i,j]
*Extends:* :class:`PopulationBasedOptimiser`
References:
[1] Kennedy, Eberhart (1995) Particle Swarm Optimization.
IEEE International Conference on Neural Networks
"""
def __init__(self, x0, sigma0=None, boundaries=None):
super(PSO, self).__init__(x0, sigma0, boundaries)
# Set initial state
self._running = False
self._ready_for_tell = False
# Set default settings
self.set_local_global_balance()
# Python logger
self._logger = logging.getLogger(__name__)
def ask(self):
""" See :meth:`Optimiser.ask()`. """
# Initialise on first call
if not self._running:
self._initialise()
# Ready for tell now
self._ready_for_tell = True
# Return points
return self._user_xs
def fbest(self):
""" See :meth:`Optimiser.fbest()`. """
if self._running:
return self._fg
return float('inf')
def _initialise(self):
"""
Initialises the optimiser for the first iteration.
"""
assert(not self._running)
# Initialize swarm
self._xs = [] # Particle coordinate vectors
self._vs = [] # Particle velocity vectors
self._fl = [] # Best local score
self._pl = [] # Best local position
# Set initial positions
self._xs.append(np.array(self._x0, copy=True))
if self._boundaries is not None:
# Attempt to sample n - 1 points from the boundaries
try:
self._xs.extend(
self._boundaries.sample(self._population_size - 1))
except NotImplementedError:
# Not all boundaries implement sampling
pass
# If we couldn't sample from the boundaries, use gaussian sampling
# around x0.
for i in range(1, self._population_size):
self._xs.append(np.random.normal(self._x0, self._sigma0))
self._xs = np.array(self._xs, copy=True)
# Set initial velocities
for i in range(self._population_size):
self._vs.append(1e-1 * self._sigma0 *
np.random.uniform(0, 1, self._n_parameters))
# Set initial scores and local best
for i in range(self._population_size):
self._fl.append(float('inf'))
self._pl.append(self._xs[i])
# Set global best position and score
self._fg = float('inf')
self._pg = self._xs[0]
# Create boundary transform, or use manual boundary checking
self._manual_boundaries = False
self._boundary_transform = None
if isinstance(self._boundaries, pints.RectangularBoundaries):
self._boundary_transform = pints.TriangleWaveTransform(
self._boundaries)
elif self._boundaries is not None:
self._manual_boundaries = True
# Create safe xs to pass to user
if self._boundary_transform is not None:
# Rectangular boundaries? Then apply transform to xs
self._xs = self._boundary_transform(self._xs)
if self._manual_boundaries:
# Manual boundaries? Then filter out out-of-bounds points from xs
self._user_ids = np.nonzero(
[self._boundaries.check(x) for x in self._xs])
self._user_xs = self._xs[self._user_ids]
if len(self._user_xs) == 0: # pragma: no cover
self._logger.warning(
'All initial PSO particles are outside the boundaries.')
else:
self._user_xs = np.array(self._xs, copy=True)
# Set user points as read-only
self._user_xs.setflags(write=False)
# Set local/global exploration balance
self.set_local_global_balance()
# Update optimiser state
self._running = True
def _log_init(self, logger):
""" See :meth:`Loggable._log_init()`. """
# Show best position of each particle
for i in range(self._population_size):
logger.add_float('f' + str(i), file_only=True)
def _log_write(self, logger):
""" See :meth:`Loggable._log_write()`. """
# Show best position of each particle
for f in self._fl:
logger.log(f)
def name(self):
""" See :meth:`Optimiser.name()`. """
return 'Particle Swarm Optimisation (PSO)'
def running(self):
""" See :meth:`Optimiser.running()`. """
return self._running
def set_local_global_balance(self, r=0.5):
"""
Set the balance between local and global exploration for each particle,
using a parameter `r` such that `r = 1` is a fully local search and
`r = 0` is a fully global search.
"""
if self._running:
raise Exception('Cannot change settings during run.')
# Check r
r = float(r)
if r < 0 or r > 1:
raise ValueError('Parameter r must be in the range 0-1.')
# Set almax and agmax based on r
_amax = 4.1
self._almax = r * _amax
self._agmax = _amax - self._almax
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 2
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[population_size,
local_global_balance]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_population_size(x[0])
self.set_local_global_balance(x[1])
def _suggested_population_size(self):
""" See :meth:`Optimiser._suggested_population_size(). """
return 4 + int(3 * np.log(self._n_parameters))
def tell(self, fx):
""" See :meth:`Optimiser.tell()`. """
if not self._ready_for_tell:
raise Exception('ask() not called before tell()')
self._ready_for_tell = False
# Manual boundaries? Then reconstruct full fx vector
if self._manual_boundaries and len(fx) < self._population_size:
user_fx = fx
fx = np.ones((self._population_size, )) * float('inf')
fx[self._user_ids] = user_fx
# Update particles
for i in range(self._population_size):
# Update best local position and score
if fx[i] < self._fl[i]:
self._fl[i] = fx[i]
self._pl[i] = np.array(self._xs[i], copy=True)
# Calculate "velocity"
al = np.random.uniform(0, self._almax, self._n_parameters)
ag = np.random.uniform(0, self._agmax, self._n_parameters)
self._vs[i] += (
al * (self._pl[i] - self._xs[i]) +
ag * (self._pg - self._xs[i]))
# Reduce speed if going too fast, as indicated by going out of
# bounds.
# This is not in the original algorithm but seems to work well
if self._boundaries is not None:
if not self._boundaries.check(self._xs[i] + self._vs[i]):
self._vs[i] *= 0.5
# Update position
self._xs[i] += self._vs[i]
# Create safe xs to pass to user
if self._boundary_transform is not None:
# Rectangular boundaries? Then apply transform to xs
self._user_xs = self._xs = self._boundary_transform(self._xs)
elif self._manual_boundaries:
# Manual boundaries? Then filter out out-of-bounds points from xs
self._user_ids = np.nonzero(
[self._boundaries.check(x) for x in self._xs])
self._user_xs = self._xs[self._user_ids]
if len(self._user_xs) == 0: # pragma: no cover
self._logger.warning(
'All PSO particles are outside the boundaries.')
else:
self._user_xs = np.array(self._xs, copy=True)
# Update global best score
i = np.argmin(self._fl)
if self._fl[i] < self._fg:
self._fg = self._fl[i]
self._pg =
|
np.array(self._pl[i], copy=True)
|
numpy.array
|
import logging
import numpy as np
from collections import OrderedDict
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.nnet import conv2d, ConvOp
from theano.gpuarray.blas import GpuCorrMM
from theano.gpuarray.basic_ops import gpu_contiguous
from blocks.bricks.cost import SquaredError
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.graph import add_annotation, Annotation
from blocks.roles import add_role, PARAMETER, WEIGHT, BIAS
from utils import shared_param, AttributeDict
from nn import maxpool_2d, global_meanpool_2d, BNPARAM, softmax_n
logger = logging.getLogger('main.model')
floatX = theano.config.floatX
class LadderAE():
def __init__(self, p):
self.p = p
self.init_weights_transpose = False
self.default_lr = p.lr
self.shareds = OrderedDict()
self.rstream = RandomStreams(seed=p.seed)
self.rng = np.random.RandomState(seed=p.seed)
n_layers = len(p.encoder_layers)
assert n_layers > 1, "Need to define encoder layers"
assert n_layers == len(p.denoising_cost_x), (
"Number of denoising costs does not match with %d layers: %s" %
(n_layers, str(p.denoising_cost_x)))
def one_to_all(x):
""" (5.,) -> 5 -> (5., 5., 5.)
('relu',) -> 'relu' -> ('relu', 'relu', 'relu')
"""
if type(x) is tuple and len(x) == 1:
x = x[0]
if type(x) is float:
x = (np.float32(x),) * n_layers
if type(x) is str:
x = (x,) * n_layers
return x
p.decoder_spec = one_to_all(p.decoder_spec)
p.f_local_noise_std = one_to_all(p.f_local_noise_std)
acts = one_to_all(p.get('act', 'relu'))
assert n_layers == len(p.decoder_spec), "f and g need to match"
assert (n_layers == len(acts)), (
"Not enough activations given. Requires %d. Got: %s" %
(n_layers, str(acts)))
acts = acts[:-1] + ('softmax',)
def parse_layer(spec):
""" 'fc:5' -> ('fc', 5)
'5' -> ('fc', 5)
5 -> ('fc', 5)
'convv:3:2:2' -> ('convv', [3,2,2])
"""
if type(spec) is not str:
return "fc", spec
spec = spec.split(':')
l_type = spec.pop(0) if len(spec) >= 2 else "fc"
spec = map(int, spec)
spec = spec[0] if len(spec) == 1 else spec
return l_type, spec
enc = map(parse_layer, p.encoder_layers)
self.layers = list(enumerate(zip(enc, p.decoder_spec, acts)))
def weight(self, init, name, cast_float32=True, for_conv=False):
weight = self.shared(init, name, cast_float32, role=WEIGHT)
if for_conv:
return weight.dimshuffle('x', 0, 'x', 'x')
return weight
def bias(self, init, name, cast_float32=True, for_conv=False):
b = self.shared(init, name, cast_float32, role=BIAS)
if for_conv:
return b.dimshuffle('x', 0, 'x', 'x')
return b
def shared(self, init, name, cast_float32=True, role=PARAMETER, **kwargs):
p = self.shareds.get(name)
if p is None:
p = shared_param(init, name, cast_float32, role, **kwargs)
self.shareds[name] = p
return p
def counter(self):
name = 'counter'
p = self.shareds.get(name)
update = []
if p is None:
p_max_val = np.float32(10)
p = self.shared(np.float32(1), name, role=BNPARAM)
p_max = self.shared(p_max_val, name + '_max', role=BNPARAM)
update = [(p, T.clip(p + np.float32(1), np.float32(0), p_max)),
(p_max, p_max_val)]
return (p, update)
def noise_like(self, x):
noise = self.rstream.normal(size=x.shape, avg=0.0, std=1.0)
return T.cast(noise, dtype=floatX)
def rand_init(self, in_dim, out_dim):
""" Random initialization for fully connected layers """
W = self.rng.randn(in_dim, out_dim) / np.sqrt(in_dim)
return W
def rand_init_conv(self, dim):
""" Random initialization for convolution filters """
fan_in = np.prod(dtype=floatX, a=dim[1:])
bound = np.sqrt(3. / max(1.0, (fan_in)))
W = np.asarray(
self.rng.uniform(low=-bound, high=bound, size=dim), dtype=floatX)
return W
def new_activation_dict(self):
return AttributeDict({'z': {}, 'h': {}, 's': {}, 'm': {}})
def annotate_update(self, update, tag_to):
a = Annotation()
for (var, up) in update:
a.updates[var] = up
add_annotation(tag_to, a)
def apply(self, input_labeled, target_labeled, input_unlabeled):
self.layer_counter = 0
input_dim = self.p.encoder_layers[0]
# Store the dimension tuples in the same order as layers.
layers = self.layers
self.layer_dims = {0: input_dim}
self.lr = self.default_lr
self.costs = costs = AttributeDict()
self.costs.denois = AttributeDict()
self.act = AttributeDict()
self.error = AttributeDict()
top = len(layers) - 1
if input_labeled is None:
N = 0
else:
N = input_labeled.shape[0]
self.join = lambda l, u: T.concatenate([l, u], axis=0) if l else u
self.labeled = lambda x: x[:N] if x is not None else x
self.unlabeled = lambda x: x[N:] if x is not None else x
self.split_lu = lambda x: (self.labeled(x), self.unlabeled(x))
input_concat = self.join(input_labeled, input_unlabeled)
def encoder(input_, path_name, input_noise_std=0, noise_std=[]):
h = input_
logger.info(' 0: noise %g' % input_noise_std)
if input_noise_std > 0.:
h = h + self.noise_like(h) * input_noise_std
d = AttributeDict()
d.unlabeled = self.new_activation_dict()
d.labeled = self.new_activation_dict()
d.labeled.z[0] = self.labeled(h)
d.unlabeled.z[0] = self.unlabeled(h)
prev_dim = input_dim
for i, (spec, _, act_f) in layers[1:]:
d.labeled.h[i - 1], d.unlabeled.h[i - 1] = self.split_lu(h)
noise = noise_std[i] if i < len(noise_std) else 0.
curr_dim, z, m, s, h = self.f(h, prev_dim, spec, i, act_f,
path_name=path_name,
noise_std=noise)
assert self.layer_dims.get(i) in (None, curr_dim)
self.layer_dims[i] = curr_dim
d.labeled.z[i], d.unlabeled.z[i] = self.split_lu(z)
d.unlabeled.s[i] = s
d.unlabeled.m[i] = m
prev_dim = curr_dim
d.labeled.h[i], d.unlabeled.h[i] = self.split_lu(h)
return d
# Clean, supervised
logger.info('Encoder: clean, labeled')
clean = self.act.clean = encoder(input_concat, 'clean')
# Corrupted, supervised
logger.info('Encoder: corr, labeled')
corr = self.act.corr = encoder(input_concat, 'corr',
input_noise_std=self.p.super_noise_std,
noise_std=self.p.f_local_noise_std)
est = self.act.est = self.new_activation_dict()
# Decoder path in opposite order
logger.info('Decoder: z_corr -> z_est')
for i, ((_, spec), l_type, act_f) in layers[::-1]:
z_corr = corr.unlabeled.z[i]
z_clean = clean.unlabeled.z[i]
z_clean_s = clean.unlabeled.s.get(i)
z_clean_m = clean.unlabeled.m.get(i)
fspec = layers[i+1][1][0] if len(layers) > i+1 else (None, None)
if i == top:
ver = corr.unlabeled.h[i]
ver_dim = self.layer_dims[i]
top_g = True
else:
ver = est.z.get(i + 1)
ver_dim = self.layer_dims.get(i + 1)
top_g = False
z_est = self.g(z_lat=z_corr,
z_ver=ver,
in_dims=ver_dim,
out_dims=self.layer_dims[i],
l_type=l_type,
num=i,
fspec=fspec,
top_g=top_g)
if z_est is not None:
# Denoising cost
if z_clean_s and self.p.zestbn == 'bugfix':
z_est_norm = (z_est - z_clean_m) / T.sqrt(z_clean_s + np.float32(1e-10))
elif z_clean_s is None or self.p.zestbn == 'no':
z_est_norm = z_est
else:
assert False, 'Not supported path'
se = SquaredError('denois' + str(i))
costs.denois[i] = se.apply(z_est_norm.flatten(2),
z_clean.flatten(2)) \
/ np.prod(self.layer_dims[i], dtype=floatX)
costs.denois[i].name = 'denois' + str(i)
denois_print = 'denois %.2f' % self.p.denoising_cost_x[i]
else:
denois_print = ''
# Store references for later use
est.h[i] = self.apply_act(z_est, act_f)
est.z[i] = z_est
est.s[i] = None
est.m[i] = None
logger.info(' g%d: %10s, %s, dim %s -> %s' % (
i, l_type,
denois_print,
self.layer_dims.get(i+1),
self.layer_dims.get(i)
))
# Costs
y = target_labeled.flatten()
costs.class_clean = CategoricalCrossEntropy().apply(y, clean.labeled.h[top])
costs.class_clean.name = 'cost_class_clean'
costs.class_corr = CategoricalCrossEntropy().apply(y, corr.labeled.h[top])
costs.class_corr.name = 'cost_class_corr'
# This will be used for training
costs.total = costs.class_corr * 1.0
for i in range(top + 1):
if costs.denois.get(i) and self.p.denoising_cost_x[i] > 0:
costs.total += costs.denois[i] * self.p.denoising_cost_x[i]
costs.total.name = 'cost_total'
# Classification error
mr = MisclassificationRate()
self.error.clean = mr.apply(y, clean.labeled.h[top]) * np.float32(100.)
self.error.clean.name = 'error_rate_clean'
def apply_act(self, input, act_name):
if input is None:
return input
act = {
'relu': lambda x: T.maximum(0, x),
'leakyrelu': lambda x: T.switch(x > 0., x, 0.1 * x),
'linear': lambda x: x,
'softplus': lambda x: T.log(1. + T.exp(x)),
'sigmoid': lambda x: T.nnet.sigmoid(x),
'softmax': lambda x: softmax_n(x),
}.get(act_name)
assert act, 'unknown act %s' % act_name
if act_name == 'softmax':
input = input.flatten(2)
return act(input)
def annotate_bn(self, var, id, var_type, mb_size, size, norm_ax):
var_shape = np.array((1,) + size)
out_dim = np.prod(var_shape) / np.prod(var_shape[list(norm_ax)])
# Flatten the var - shared variable updating is not trivial otherwise,
# as theano seems to believe a row vector is a matrix and will complain
# about the updates
orig_shape = var.shape
var = var.flatten()
# Here we add the name and role, the variables will later be identified
# by these values
var.name = id + '_%s_clean' % var_type
add_role(var, BNPARAM)
shared_var = self.shared(np.zeros(out_dim),
name='shared_%s' % var.name, role=None)
# Update running average estimates. When the counter is reset to 1, it
# will clear its memory
cntr, c_up = self.counter()
one = np.float32(1)
run_avg = lambda new, old: one / cntr * new + (one - one / cntr) * old
if var_type == 'mean':
new_value = run_avg(var, shared_var)
elif var_type == 'var':
mb_size = T.cast(mb_size, 'float32')
new_value = run_avg(mb_size / (mb_size - one) * var, shared_var)
else:
raise NotImplemented('Unknown batch norm var %s' % var_type)
# Add the counter update to the annotated update if it is the first
# instance of a counter
self.annotate_update([(shared_var, new_value)] + c_up, var)
return var.reshape(orig_shape)
def f(self, h, in_dim, spec, num, act_f, path_name, noise_std=0):
# Generates identifiers used for referencing shared variables.
# E.g. clean and corrupted encoders will end up using the same
# variable name and hence sharing parameters
gen_id = lambda s: '_'.join(['f', str(num), s])
layer_type, _ = spec
# Pooling
if layer_type in ['maxpool', 'globalmeanpool']:
z, output_size = self.f_pool(h, spec, in_dim)
norm_ax = (0, -2, -1)
# after pooling, no activation func for now unless its softmax
act_f = "linear" if act_f != "softmax" else act_f
# Convolution
elif layer_type in ['convv', 'convf']:
z, output_size = self.f_conv(h, spec, in_dim, gen_id('W'))
norm_ax = (0, -2, -1)
# Fully connected
elif layer_type == "fc":
h = h.flatten(2) if h.ndim > 2 else h
_, dim = spec
W = self.weight(self.rand_init(np.prod(in_dim), dim), gen_id('W'))
z, output_size = T.dot(h, W), (dim,)
norm_ax = (0,)
else:
raise ValueError("Unknown layer spec: %s" % layer_type)
m = s = None
is_normalizing = True
if is_normalizing:
keep_dims = True
z_l = self.labeled(z)
z_u = self.unlabeled(z)
m = z_u.mean(norm_ax, keepdims=keep_dims)
s = z_u.var(norm_ax, keepdims=keep_dims)
m_l = z_l.mean(norm_ax, keepdims=keep_dims)
s_l = z_l.var(norm_ax, keepdims=keep_dims)
if path_name == 'clean':
# Batch normalization estimates the mean and variance of
# validation and test sets based on the training set
# statistics. The following annotates the computation of
# running average to the graph.
m_l = self.annotate_bn(m_l, gen_id('bn'), 'mean', z_l.shape[0],
output_size, norm_ax)
s_l = self.annotate_bn(s_l, gen_id('bn'), 'var', z_l.shape[0],
output_size, norm_ax)
z = self.join(
(z_l - m_l) / T.sqrt(s_l + np.float32(1e-10)),
(z_u - m) / T.sqrt(s + np.float32(1e-10)))
if noise_std > 0:
z += self.noise_like(z) * noise_std
# z for lateral connection
z_lat = z
b_init, c_init = 0.0, 1.0
b_c_size = output_size[0]
# Add bias
if act_f != 'linear':
z += self.bias(b_init * np.ones(b_c_size), gen_id('b'),
for_conv=len(output_size) > 1)
if is_normalizing:
# Add free parameter (gamma in original Batch Normalization paper)
# if needed by the activation. For instance ReLU does't need one
# and we only add it to softmax if hyperparameter top_c is set.
if (act_f not in ['relu', 'leakyrelu', 'linear', 'softmax'] or
(act_f == 'softmax' and self.p.top_c is True)):
c = self.weight(c_init * np.ones(b_c_size), gen_id('c'),
for_conv=len(output_size) > 1)
z *= c
h = self.apply_act(z, act_f)
logger.info(' f%d: %s, %s,%s noise %.2f, params %s, dim %s -> %s' % (
num, layer_type, act_f, ' BN,' if is_normalizing else '',
noise_std, spec[1], in_dim, output_size))
return output_size, z_lat, m, s, h
def f_pool(self, x, spec, in_dim):
layer_type, dims = spec
num_filters = in_dim[0]
if "globalmeanpool" == layer_type:
y, output_size = global_meanpool_2d(x, num_filters)
# scale the variance to match normal conv layers with xavier init
y = y * np.float32(in_dim[-1]) * np.float32(np.sqrt(3))
else:
assert dims[0] != 1 or dims[1] != 1
y, output_size = maxpool_2d(x, in_dim,
poolsize=(dims[1], dims[1]),
poolstride=(dims[0], dims[0]))
return y, output_size
def f_conv(self, x, spec, in_dim, weight_name):
layer_type, dims = spec
num_filters = dims[0]
filter_size = (dims[1], dims[1])
stride = (dims[2], dims[2])
bm = 'full' if 'convf' in layer_type else 'valid'
num_channels = in_dim[0]
W = self.weight(self.rand_init_conv(
(num_filters, num_channels) + filter_size), weight_name)
if stride != (1, 1):
f = GpuCorrMM(subsample=stride, border_mode=bm, pad=(0, 0))
y = f(gpu_contiguous(x), gpu_contiguous(W))
else:
assert self.p.batch_size == self.p.valid_batch_size
y = conv2d(x, W, image_shape=(2*self.p.batch_size, ) + in_dim,
filter_shape=((num_filters, num_channels) +
filter_size), border_mode=bm)
output_size = ((num_filters,) +
ConvOp.getOutputShape(in_dim[1:], filter_size,
stride, bm))
return y, output_size
def g(self, z_lat, z_ver, in_dims, out_dims, l_type, num, fspec, top_g):
f_layer_type, dims = fspec
is_conv = f_layer_type is not None and ('conv' in f_layer_type or
'pool' in f_layer_type)
gen_id = lambda s: '_'.join(['g', str(num), s])
in_dim = np.prod(dtype=floatX, a=in_dims)
out_dim = np.prod(dtype=floatX, a=out_dims)
num_filters = out_dims[0] if is_conv else out_dim
if l_type[-1] in ['0']:
g_type, u_type = l_type[:-1], l_type[-1]
else:
g_type, u_type = l_type, None
# Mapping from layer above: u
if u_type in ['0'] or z_ver is None:
if z_ver is None and u_type not in ['0']:
logger.warn('Decoder %d:%s without vertical input' %
(num, g_type))
u = None
else:
if top_g:
u = z_ver
elif is_conv:
u = self.g_deconv(z_ver, in_dims, out_dims, gen_id('W'), fspec)
else:
W = self.weight(self.rand_init(in_dim, out_dim), gen_id('W'))
u = T.dot(z_ver, W)
# Batch-normalize u
if u is not None:
norm_ax = (0,) if u.ndim <= 2 else (0, -2, -1)
keep_dims = True
u -= u.mean(norm_ax, keepdims=keep_dims)
u /= T.sqrt(u.var(norm_ax, keepdims=keep_dims) +
np.float32(1e-10))
# Define the g function
if not is_conv:
z_lat = z_lat.flatten(2)
bi = lambda inits, name: self.bias(inits *
|
np.ones(num_filters)
|
numpy.ones
|
import ctypes
from .lib import SMLM
import numpy as np
import numpy.ctypeslib as ctl
class PostProcessMethods:
def __init__(self, ctx):
self.lib = ctx.smlm.lib
#CDLL_EXPORT void LinkLocalizations(int numspots, int* frames, Vector2f* xyI, float maxDist, int frameskip, int *linkedSpots)
self._LinkLocalizations = self.lib.LinkLocalizations
self._LinkLocalizations.argtypes = [
ctypes.c_int32, # numspots
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # framenum
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # xyI
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # crlbXYI
ctypes.c_float, # maxdist (in crlbs)
ctypes.c_float, # max intensity distance (in crlb's)
ctypes.c_int32, # frameskip
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # linkedspots
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # startframes
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # framecounts
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # linkedXYI
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # linkedCRLBXYI
]
self._LinkLocalizations.restype = ctypes.c_int32
#(const Vector2f* xy, const int* spotFramenum, int numspots,
#float sigma, int maxiterations, Vector2f* driftXY, float gradientStep, float maxdrift, float* scores, int flags)
self.ProgressCallback = ctypes.CFUNCTYPE(
ctypes.c_int32, # continue
ctypes.c_int32, # iteration
ctypes.c_char_p
)
self._MinEntropyDriftEstimate = self.lib.MinEntropyDriftEstimate
self._MinEntropyDriftEstimate.argtypes = [
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # xy: float[numspots, dims]
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # crlb: float[numspots, dims] or float[dims]
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # framenum
ctypes.c_int32, # numspots
ctypes.c_int32, #maxit
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # drift XY
ctypes.c_int32, # framesperbin
ctypes.c_float, # gradientstep
ctypes.c_float, # maxdrift
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # scores
ctypes.c_int32, # flags
ctypes.c_int32, # maxneighbors
self.ProgressCallback] # flags
self._MinEntropyDriftEstimate.restype = ctypes.c_int32
#void ComputeContinuousFRC(const float* data, int dims, int numspots, const float* rho, int nrho, float* frc, float maxDistance, bool useCuda)
self._ComputeContinuousFRC = self.lib.ComputeContinuousFRC
self._ComputeContinuousFRC.argtypes = [
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # data (xy or xyz)
ctypes.c_int32, # number of dimensions
ctypes.c_int32, # numspots
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # rho
ctypes.c_int32, # nrho
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # output frc
ctypes.c_float, # maxdistance
ctypes.c_bool, # usecuda
ctypes.c_float, # cutoffDist
ctypes.c_float # cutoffSigma
]
#(int startA, int numspotsA, const int* counts, const int* indicesB, int numIndicesB);
self.FindNeighborCallback = ctypes.CFUNCTYPE(
ctypes.c_int32, # continue
ctypes.c_int32, # startA
ctypes.c_int32, # numspotsA
ctypes.POINTER(ctypes.c_int32),
ctypes.POINTER(ctypes.c_int32),
ctypes.c_int32 # numindices
)
#(int startA, int numspotsA, const int* counts, const int* indicesB, int numIndicesB);
self._ClusterLocsCallback = ctypes.CFUNCTYPE(
ctypes.c_int32, # continue
ctypes.POINTER(ctypes.c_int32),
ctypes.POINTER(ctypes.c_float),
)
# int FindNeighbors(int numspotsA, const float* coordsA, int numspotsB, const float* coordsB, int dims, float maxDistance, int minBatchSize,
#FindNeighborCallback cb)
self._FindNeighbors = self.lib.FindNeighbors
self._FindNeighbors.argtypes = [
ctypes.c_int32, # numspotsA
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # coordsA
ctypes.c_int32, # numspotsB
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # coordsB
ctypes.c_int32, # dims
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # maxDistance[dims]
ctypes.c_int32, # minBatchSize
self.FindNeighborCallback
]
self._FindNeighbors.restype = ctypes.c_int32
#int ClusterLocs(int dims, float* pos, int* mappingToNew, const float* distance, int numspots, ClusterLocsCallback callback)
self._ClusterLocs = self.lib.ClusterLocs
self._ClusterLocs.argtypes = [
ctypes.c_int32,
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # pos
ctl.ndpointer(np.int32, flags="aligned, c_contiguous"), # mapping
ctl.ndpointer(np.float32, flags="aligned, c_contiguous"), # distance
ctypes.c_int32,
self._ClusterLocsCallback
]
self._ClusterLocs.restype= ctypes.c_int32
def ClusterLocs(self, points, crlb, dist, cb=None):
"""
points: [numspots, numdims]
crlb: [numspots, numdims]
"""
numdims = points.shape[1]
newpts = np.ascontiguousarray(points*1,dtype=np.float32)
newcrlb = np.ascontiguousarray(crlb*1,dtype=np.float32)
mapping = np.zeros(len(points), dtype=np.int32)
if np.isscalar(dist):
dist = (np.ones(numdims)*dist).astype(np.float32)
else:
dist = np.ascontiguousarray(dist,dtype=np.float32)
def callback_(mappingPtr, centerPtr):
mapping = ctl.as_array(mappingPtr, (len(points),))
nclust=np.max(mapping)+1
centers = ctl.as_array(centerPtr, (nclust,numdims))
if cb is not None:
r = cb(mapping,centers)
if r is None:
return 1
return r
return 1
newcount = self._ClusterLocs(numdims, newpts, mapping, dist, len(points), self._ClusterLocsCallback(callback_))
if newcount >= 0:
return newpts[:newcount], newcrlb[:newcount], mapping
raise ValueError('Something went wrong in ClusterLocs')
def FindNeighborsIterative(self, pointsA, pointsB, maxDistance, minBatchSize, callback):
pointsA = np.ascontiguousarray(pointsA,dtype=np.float32)
pointsB = np.ascontiguousarray(pointsB,dtype=np.float32)
assert len(pointsA.shape)==2
assert len(pointsB.shape)==2
dims = pointsA.shape[1]
assert(pointsB.shape[1] == dims)
def callback_(startA, numspotsA, countsPtr, indicesPtr, numIndices):
#print(f"num indices: {numIndices}. numspotsA: {numspotsA}")
counts = ctl.as_array(countsPtr, (numspotsA,))
if numIndices == 0:
indices = np.zeros(0,dtype=np.int32)
else:
indices = ctl.as_array(indicesPtr, (numIndices,))
r = callback(startA, counts, indices)
if r is None:
return 1
return r
if np.isscalar(maxDistance):
maxDistance = np.ones(dims,dtype=np.float32)*maxDistance
else:
maxDistance = np.ascontiguousarray(maxDistance, dtype=np.float32)
assert(len(maxDistance) == dims)
cb = self.FindNeighborCallback(callback_)
self._FindNeighbors(len(pointsA), pointsA, len(pointsB), pointsB, pointsA.shape[1], maxDistance, minBatchSize, cb)
def FindNeighbors(self, pointsA, pointsB, maxDistance):
counts = []
indices = []
def callback(startA, counts_, indices_):
counts.append(counts_.copy())
indices.append(indices_.copy())
self.FindNeighborsIterative(pointsA, pointsB, maxDistance, minBatchSize=10000, callback=callback)
if len(counts) == 0:
return [],[]
return np.concatenate(counts), np.concatenate(indices)
def LinkLocalizations(self, xyI, crlbXYI, framenum, maxdist, maxIntensityDist, frameskip):
"""
linked: int [numspots], all spots that are linked will have the same index in linked array.
"""
xyI = np.ascontiguousarray(xyI,dtype=np.float32)
crlbXYI = np.ascontiguousarray(crlbXYI,dtype=np.float32)
framenum = np.ascontiguousarray(framenum, dtype=np.int32)
linked = np.zeros(len(xyI),dtype=np.int32)
framecounts = np.zeros(len(xyI),dtype=np.int32)
startframes = np.zeros(len(xyI),dtype=np.int32)
resultXYI = np.zeros(xyI.shape,dtype=np.float32)
resultCRLBXYI = np.zeros(crlbXYI.shape,dtype=np.float32)
assert crlbXYI.shape[1] == 3
assert xyI.shape[1] == 3
assert len(xyI) == len(crlbXYI)
nlinked = self._LinkLocalizations(len(xyI), framenum, xyI, crlbXYI, maxdist, maxIntensityDist,
frameskip, linked, startframes, framecounts, resultXYI, resultCRLBXYI)
startframes = startframes[:nlinked]
framecounts = framecounts[:nlinked]
resultXYI = resultXYI[:nlinked]
resultCRLBXYI = resultCRLBXYI[:nlinked]
return linked, framecounts,startframes, resultXYI, resultCRLBXYI
def MinEntropyDriftEstimate(self, positions, framenum, drift, crlb, iterations,
stepsize, maxdrift, framesPerBin=1, cuda=False, progcb=None,flags=0,
maxneighbors=10000):
positions = np.ascontiguousarray(positions,dtype=np.float32)
framenum = np.ascontiguousarray(framenum,dtype=np.int32)
drift = np.ascontiguousarray(drift,dtype=np.float32)
nframes = np.max(framenum)+1
assert len(drift)>=nframes and drift.shape[1]==positions.shape[1]
if len(drift)>nframes:
drift = drift[:nframes]
drift =
|
np.ascontiguousarray(drift,dtype=np.float32)
|
numpy.ascontiguousarray
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build stitched Spitzer/IRS spectral cube
Inter-calibrate Spitzer/IRS spectra with Spitzer/IRAC4 (SL) & MIPS1 (LL)
"""
# import logging, sys
# logging.basicConfig(stream=sys.stderr, level=logging.ERROR)
# print(logging.getLogger())
# logging.disable(sys.maxsize) # disable IDL print
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
from tqdm import tqdm, trange
import os
import numpy as np
from scipy.optimize import curve_fit
from scipy.integrate import trapz
from matplotlib.ticker import ScalarFormatter, NullFormatter
## rapyuta
from rapyuta.inout import fclean, fitsext, read_fits, write_fits
from rapyuta.imaging import ( iconvolve, iswarp, iuncert, concatenate,
imontage, improve, Jy_per_pix_to_MJy_per_sr )
from rapyuta.astrom import fixwcs
from rapyuta.arrays import closest
from rapyuta.calib import intercalib
from rapyuta.maths import f_lin, f_lin0, f_lin1
from rapyuta.plots import pplot
##----------------------------------------------------------
## Preliminaries
##----------------------------------------------------------
## Local
from buildinfo import ( src, Nmc, verbose, coadd_tool, coadd_fp,
chnl, fits_irs, out_irs, out_irc,
path_idl, path_ker, path_conv, path_phot, path_cal,
fits_ker, csv_ker, path_tmp, path_fig
)
## Banner
print('\n============================================================\n')
print(' MIRAGE - Spitzer/IRS cube builder - '+src)
print('\n============================================================\n')
Nch = len(chnl)
## Do not stitch SL3 and LL3
if ('SL3' in chnl) and ('LL3' in chnl):
Nch_s = Nch - 2
else:
Nch_s = Nch
# Nmc = 2
if ('SH' in chnl) or ('LH' in chnl):
phot = ['MIPS1']
else:
phot = ['IRAC4', 'MIPS1']
Nphot = len(phot)
##----------------------------------------------------------
## Coadd observations
##----------------------------------------------------------
irs_coadd = input("Coadd IRS observations (y/n)? ")
if irs_coadd=='y':
## Coadd with adding MC unc
##--------------------------
if coadd_tool=='swarp':
## <iswarp> coadding
##===================
swp = iswarp(refheader=coadd_fp,
tmpdir=path_tmp, verbose=verbose)
for i in trange(Nch, #leave=False,
desc='<iswarp> IRS Coadding ({} chnl)'.format(Nch)):
for j in trange(Nmc+1, leave=False,
desc='<iswarp> IRS Coadding [MC]'):
if j==0:
swp.combine(fits_irs[i], combtype='wgt_avg',
keepedge=True, cropedge=False,
tmpdir=path_tmp+'MC_no/',
filOUT=path_tmp+src+'_'+chnl[i])
else:
swp.combine(fits_irs[i], combtype='wgt_avg',
keepedge=True, cropedge=False, uncpdf='norm',
tmpdir=path_tmp+'MC_'+str(j)+'/',
filOUT=path_tmp+src+'_'+chnl[i]+'_'+str(j))
elif coadd_tool=='reproject':
## <imontage> coadding
##=====================
mtg = imontage('exact', tmpdir=path_tmp, verbose=verbose)
for i in trange(Nch, #leave=False,
desc='<imontage> IRS Coadding ({} chnl)'.format(Nch)):
mtg.coadd(fits_irs[i], refheader=coadd_fp,
dist='norm', Nmc=Nmc,
filOUT=path_tmp+src+'_'+chnl[i])
## PSF Convolution
##-----------------
for i in trange(Nch,
desc='<iconvolve> IRS Smoothing ({} chnl)'.format(Nch)):
for j in trange(Nmc+1, leave=False,
desc='<iconvolve> IRS Smoothing [MC]'):
if j==0:
conv = iconvolve(path_tmp+src+'_'+chnl[i],
kfile=fits_ker, klist=csv_ker, convdir=path_conv,
filOUT=path_tmp+src+'_'+chnl[i]+'_conv')
else:
conv = iconvolve(path_tmp+src+'_'+chnl[i]+'_'+str(j),
kfile=fits_ker, klist=csv_ker, convdir=path_conv,
filOUT=path_tmp+src+'_'+chnl[i]+'_'+str(j)+'_conv')
conv.do_conv(idldir=path_idl)
## Cal unc (chnl)
##----------------
# for i in trange(Nch,
# desc='IRS Cal unc ({} chnl)'.format(Nch)):
# mcimage = []
# for j in trange(Nmc+1, leave=False,
# desc='IRS Reading [MC]'):
# if j==0:
# hd = read_fits(path_tmp+src+'_'+chnl[i]+'_conv')
# header = hd.header
# wvl = hd.wave
# else:
# hd = read_fits(path_tmp+src+'_'+chnl[i]+'_'+str(j)+'_conv')
# mcimage.append(hd.data)
# if Nmc>1:
# mcimage = np.array(mcimage)
# unc = np.nanstd(mcimage, axis=0)
# write_fits(path_tmp+src+'_'+chnl[i]+'_conv_unc', header, unc, wvl)
##----------------------------------------------------------
## Stitch IRS spectra
##----------------------------------------------------------
concat_irs = input("Stitch IRS spectra (y/n)? ")
lores_match = input(" - Match SL2-SL1 and LL2-LL1 (y/n)? ")
hires_match = input(" - Match SH-LH (y/n)? ")
keep_frag = input(" - Keep fragmentary spectra (y/n)? ")
if keep_frag=='y':
keepfrag = True
cropedge = False
else:
keepfrag = False
crop_edge_frag = input(" - Crop edge if not keeping frag spectra (y/n)? ")
if crop_edge_frag=='y':
cropedge = True
else:
cropedge = False
## Match SL2-SL1 and LL2-LL1
##---------------------------
if lores_match=='y':
## SL3
data_sl3 = read_fits(path_tmp+src+'_SL3_conv').data
data_sl2 = read_fits(path_tmp+src+'_SL2_conv').data
data_sl1 = read_fits(path_tmp+src+'_SL1_conv').data
wvl_sl3 = read_fits(path_tmp+src+'_SL3_conv').wave
wvl_sl2 = read_fits(path_tmp+src+'_SL2_conv').wave
wvl_sl1 = read_fits(path_tmp+src+'_SL1_conv').wave
iwmax_sl2 = closest(wvl_sl3, wvl_sl2[-1], side='left') + 1 # SL3 index
iwmin_sl1 = closest(wvl_sl3, wvl_sl1[0], side='right') # SL3 index
left_sl3 = trapz(data_sl3[:iwmax_sl2], wvl_sl3[:iwmax_sl2],
dx=wvl_sl3[1]-wvl_sl3[0], axis=0)
right_sl3 = trapz(data_sl3[iwmin_sl1:], wvl_sl3[iwmin_sl1:],
dx=wvl_sl3[1]-wvl_sl3[0], axis=0)
## LL3
data_ll3 = read_fits(path_tmp+src+'_LL3_conv').data
data_ll2 = read_fits(path_tmp+src+'_LL2_conv').data
data_ll1 = read_fits(path_tmp+src+'_LL1_conv').data
wvl_ll3 = read_fits(path_tmp+src+'_LL3_conv').wave
wvl_ll2 = read_fits(path_tmp+src+'_LL2_conv').wave
wvl_ll1 = read_fits(path_tmp+src+'_LL1_conv').wave
iwmax_ll2 = closest(wvl_ll3, wvl_ll2[-1], side='left') + 1 # LL3 index
iwmin_ll1 = closest(wvl_ll3, wvl_ll1[0], side='right') # LL3 index
left_ll3 = trapz(data_ll3[:iwmax_ll2], wvl_ll3[:iwmax_ll2],
dx=wvl_ll3[1]-wvl_ll3[0], axis=0)
right_ll3 = trapz(data_ll3[iwmin_ll1:], wvl_ll3[iwmin_ll1:],
dx=wvl_ll3[1]-wvl_ll3[0], axis=0)
iwmin_sl3 = closest(wvl_sl2, wvl_sl3[0], side='right') # SL2 index
iwmax_sl3 = closest(wvl_sl1, wvl_sl3[-1], side='left') + 1 # SL1 index
iwmin_ll3 = closest(wvl_ll2, wvl_ll3[0], side='right') # LL2 index
iwmax_ll3 = closest(wvl_ll1, wvl_ll3[-1], side='left') + 1 # LL1 index
right_sl2 = trapz(data_sl2[iwmin_sl3:], wvl_sl2[iwmin_sl3:],
dx=wvl_sl2[1]-wvl_sl2[0], axis=0)
left_sl1 = trapz(data_sl1[:iwmax_sl3], wvl_sl1[:iwmax_sl3],
dx=wvl_sl1[1]-wvl_sl1[0], axis=0)
right_ll2 = trapz(data_ll2[iwmin_ll3:], wvl_ll2[iwmin_ll3:],
dx=wvl_ll2[1]-wvl_ll2[0], axis=0)
left_ll1 = trapz(data_ll1[:iwmax_ll3], wvl_ll1[:iwmax_ll3],
dx=wvl_ll1[1]-wvl_ll1[0], axis=0)
Ny, Nx = data_sl2.shape[1:]
scale = np.zeros((4,Ny,Nx))
scale[0] = left_sl3 / right_sl2
scale[1] = right_sl3 / left_sl1
scale[2] = left_ll3 / right_ll2
scale[3] = right_ll3 / left_ll1
## Display scaling factor map
mask_sca = ~
|
np.isnan(scale[0])
|
numpy.isnan
|
"""
Defines the Circuit class
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import itertools as _itertools
import warnings as _warnings
import numpy as _np
from pygsti.baseobjs.label import Label as _Label, CircuitLabel as _CircuitLabel
from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat
from pygsti.tools import internalgates as _itgs
from pygsti.tools import slicetools as _slct
#Internally:
# when static: a tuple of Label objects labelling each top-level circuit layer
# when editable: a list of lists, one per top-level layer, holding just
# the non-LabelTupTup (non-compound) labels.
#Externally, we'd like to do thinks like:
# c = Circuit( LabelList )
# c.append_line("Q0")
# c.append_layer(layer_label)
# c[2]['Q0'] = 'Gx' # puts Gx:Q0 into circuit (at 3rd layer)
# c[2,'Q0'] = 'Gx'
# c[2,('Q0','Q1')] = Label('Gcnot') # puts Gcnot:Q0:Q1 into circuit
# c[2,('Q1','Q0')] = 'Gcnot' # puts Gcnot:Q1:Q0 into circuit
# c[2] = (Label('Gx','Q0'), Label('Gy','Q1')) # assigns a circuit layer
# c[2,:] = (Label('Gx','Q0'), Label('Gy','Q1')) # assigns a circuit layer
# del c[2]
# c.insert(2, (Label('Gx','Q0'), Label('Gy','Q1')) ) # inserts a layer
# c[:,'Q0'] = ('Gx','Gy','','Gx') # assigns the Q0 line
# c[1:3,'Q0'] = ('Gx','Gy') # assigns to a part of the Q0 line
def _np_to_quil_def_str(name, input_array):
"""
Write a DEFGATE block for RQC quil for an arbitrary one- or two-qubit unitary gate.
(quil/pyquil currently does not offer support for arbitrary n-qubit gates for
n>2.)
Parameters
----------
name : str
The name of the gate (e.g., 'Gc0' for the 0th Clifford gate)
input_array : array_like
The representation of the gate as a unitary map.
E.g., for name = 'Gc0',input_array = np.array([[1,0],[0,1]])
Returns
-------
output : str
A block of quil code (as a string) that should be included before circuit
declaration in any quil circuit that uses the specified gate.
"""
output = 'DEFGATE {}:\n'.format(name)
for line in input_array:
output += ' '
output += ', '.join(map(_num_to_rqc_str, line))
output += '\n'
return output
def _num_to_rqc_str(num):
"""Convert float to string to be included in RQC quil DEFGATE block
(as written by _np_to_quil_def_str)."""
num = _np.complex(
|
_np.real_if_close(num)
|
numpy.real_if_close
|
import collections
import random
import numpy as np
import configparser
import json
import datasets
#
class MyConfigParser(configparser.ConfigParser):
def optionxform(self, optionstr):
return optionstr
class MyDict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
class RemovedConfig(object):
def __init__(self, config_path):
config = MyConfigParser()
config.read(config_path, encoding="utf-8")
self.configs = self.dictobj2obj(config.__dict__["_sections"])
def dictobj2obj(self, dictobj):
if not isinstance(dictobj, dict):
return dictobj
d = MyDict()
for k,v in dictobj.items():
d[k] = self.dictobj2obj(v)
return d
def get_configs(self):
return self.configs
class Config(object):
def __init__(self, config_path):
configs = json.load(open(config_path, "r", encoding="utf-8"))
self.configs = self.dictobj2obj(configs)
def dictobj2obj(self, dictobj):
if not isinstance(dictobj, dict):
return dictobj
d = MyDict()
for k,v in dictobj.items():
d[k] = self.dictobj2obj(v)
return d
def get_configs(self):
return self.configs
class Vocabulary(object):
def __init__(self, data_path, max_len=200, min_len=5, word_drop=5, encoding='utf8'):
if type(data_path) == str:
data_path = [data_path]
self._data_path = data_path
self._max_len = max_len
self._min_len = min_len
self._word_drop = word_drop
self._encoding = encoding
self.token_num = 0
self.vocab_size_raw = 0
self.vocab_size = 0
self.w2i = {}
self.i2w = {}
self.start_words = []
self._build_vocabulary()
def _build_vocabulary(self):
self.w2i['_PAD'] = 0
self.w2i['_UNK'] = 1
self.w2i['_BOS'] = 2
self.w2i['_EOS'] = 3
self.i2w[0] = '_PAD'
self.i2w[1] = '_UNK'
self.i2w[2] = '_BOS'
self.i2w[3] = '_EOS'
words_all = []
start_words = []
for data_path in self._data_path:
with open(data_path, 'r', encoding=self._encoding) as f:
sentences = f.readlines()
for sentence in sentences:
# _ = list(filter(lambda x: x not in [None, ''], sentence.split()))
_ = sentence.split()
if (len(_) >= self._min_len) and (len(_) <= self._max_len):
words_all.extend(_)
start_words.append(_[0])
self.token_num = len(words_all)
word_distribution = sorted(collections.Counter(words_all).items(), key=lambda x: x[1], reverse=True)
self.vocab_size_raw = len(word_distribution)
for (word, value) in word_distribution:
if value > self._word_drop:
self.w2i[word] = len(self.w2i)
self.i2w[len(self.i2w)] = word
self.vocab_size = len(self.i2w)
start_word_distribution = sorted(collections.Counter(start_words).items(), key=lambda x: x[1], reverse=True)
self.start_words = [_[0] for _ in start_word_distribution]
class UNK_Vocabulary(object):
def __init__(self, data_path, max_len=200, min_len=5, word_drop=5, encoding='utf8'):
if type(data_path) == str:
data_path = [data_path]
self._data_path = data_path
self._max_len = max_len
self._min_len = min_len
self._word_drop = word_drop
self._encoding = encoding
self.token_num = 0
self.vocab_size_raw = 0
self.vocab_size = 0
self.w2i = {}
self.i2w = {}
self.start_words = []
self._build_vocabulary()
def _build_vocabulary(self):
# self.w2i['_PAD'] = 0
# self.w2i['_UNK'] = 1
# self.w2i['_BOS'] = 2
# self.w2i['_EOS'] = 3
# self.i2w[0] = '_PAD'
# self.i2w[1] = '_UNK'
# self.i2w[2] = '_BOS'
# self.i2w[3] = '_EOS'
words_all = []
start_words = []
for data_path in self._data_path:
with open(data_path, 'r', encoding=self._encoding) as f:
sentences = f.readlines()
for sentence in sentences:
# _ = list(filter(lambda x: x not in [None, ''], sentence.split()))
_ = sentence.split()
if (len(_) >= self._min_len) and (len(_) <= self._max_len):
words_all.extend(_)
start_words.append(_[0])
self.token_num = len(words_all)
word_distribution = sorted(collections.Counter(words_all).items(), key=lambda x: x[1], reverse=True)
self.vocab_size_raw = len(word_distribution)
for (word, value) in word_distribution:
if value <= self._word_drop:
self.w2i[word] = len(self.w2i)
self.i2w[len(self.i2w)] = word
self.vocab_size = len(self.i2w)
self.unk_distribution = np.zeros(self.vocab_size)
for (w, c) in word_distribution:
if c <= self._word_drop:
self.unk_distribution[self.w2i[w]] = c
self.unk_distribution = self.unk_distribution/np.sum(self.unk_distribution)
start_word_distribution = sorted(collections.Counter(start_words).items(), key=lambda x: x[1], reverse=True)
self.start_unk_distribution = []
for (w,c) in start_word_distribution:
if c <= self._word_drop:
self.start_unk_distribution.append(c)
self.start_unk_distribution = np.array(self.start_unk_distribution)
self.start_unk_distribution = self.start_unk_distribution/np.sum(self.start_unk_distribution)
self.start_words = [_[0] for _ in start_word_distribution]
def sample(self):
cand_ = [i for i in range(self.vocab_size)]
id = np.random.choice(cand_,1, p=self.unk_distribution)[0]
return id
def start_sample(self):
cand_ = [i for i in range(len(self.start_unk_distribution))]
id = np.random.choice(cand_,1, p=self.start_unk_distribution)[0]
return id
class Corpus(object):
def __init__(self, data_path, vocabulary, max_len=200, min_len=5):
if type(data_path) == str:
data_path = [data_path]
self._data_path = data_path
self._vocabulary = vocabulary
self._max_len = max_len
self._min_len = min_len
self.corpus = []
self.corpus_length = []
self.labels = []
self.sentence_num = 0
self.max_sentence_length = 0
self.min_sentence_length = 0
self._build_corpus()
def _build_corpus(self):
def _transfer(word):
try:
return self._vocabulary.w2i[word]
except:
return self._vocabulary.w2i['_UNK']
label = -1
for data_path in self._data_path:
label += 1
with open(data_path, 'r', encoding='utf8') as f:
sentences = f.readlines()
# sentences = list(filter(lambda x: x not in [None, ''], sentences))
for sentence in sentences:
# sentence = list(filter(lambda x: x not in [None, ''], sentence.split()))
sentence = sentence.split()
if (len(sentence) >= self._min_len) and (len(sentence) <= self._max_len):
sentence = ['_BOS'] + sentence + ['_EOS']
self.corpus.append(list(map(_transfer, sentence)))
self.labels.append(label)
self.corpus_length = [len(i) for i in self.corpus]
self.max_sentence_length = max(self.corpus_length)
self.min_sentence_length = min(self.corpus_length)
self.sentence_num = len(self.corpus)
def split_corpus(data_path, train_path, test_path, max_len=200, min_len=5, ratio=0.8, seed=0, encoding='utf8',is_inverse=False, inverse_mode=0):
with open(data_path, 'r', encoding=encoding) as f:
sentences = f.readlines()
sentences = [_ for _ in filter(lambda x: x not in [None, ''], sentences)
if len(_.split()) <= max_len and len(_.split()) >= min_len]
np.random.seed(seed)
np.random.shuffle(sentences)
train = sentences[:int(len(sentences) * ratio)]
test = sentences[int(len(sentences) * ratio):]
if is_inverse:
if inverse_mode == 0:
with open(train_path, 'w', encoding='utf8') as f:
for sentence in train:
f.write(" ".join(sentence.split()[::-1]) + "\n")
with open(test_path, 'w', encoding='utf8') as f:
for sentence in test:
f.write(" ".join(sentence.split()[::-1]) + "\n")
if inverse_mode == 1:
new_sentences = []
for sentence in sentences:
words = sentence.split()
for i in range(len(words)):
new_sentences.append(" ".join(words[:i+1][::-1]) + "\n")
np.random.shuffle(new_sentences)
new_sentences = new_sentences[:2000000] # down sampling
train = new_sentences[:int(len(new_sentences) * ratio)]
test = new_sentences[int(len(new_sentences) * ratio):]
with open(train_path, 'w', encoding='utf8') as f:
for sentence in train:
f.write(sentence)
with open(test_path, 'w', encoding='utf8') as f:
for sentence in test:
f.write(sentence)
else:
with open(train_path, 'w', encoding='utf8') as f:
for sentence in train:
f.write(sentence)
with open(test_path, 'w', encoding='utf8') as f:
for sentence in test:
f.write(sentence)
class Generator(object):
def __init__(self, data):
self._data = data
def build_generator(self, batch_size, sequence_len, shuffle=True):
if shuffle:
np.random.shuffle(self._data)
data_ = []
for _ in self._data:
data_.extend(_)
batch_num = len(data_) // (batch_size * sequence_len)
data = data_[:batch_size * batch_num * sequence_len]
data =
|
np.array(data)
|
numpy.array
|
"""主にnumpy配列(rows×cols×channels(RGB))の画像処理関連。
uint8のRGBで0~255として扱うのを前提とする。
あとグレースケールの場合もrows×cols×1の配列で扱う。
"""
import atexit
import functools
import io
import pathlib
import random
import shutil
import tempfile
import typing
import warnings
import cv2
import numba
import numpy as np
import PIL.Image
import pytoolkit as tk
_load_cache = None
_diskcache_load_failed = False
def _clear_cache(dc):
"""キャッシュのクリア。"""
cache_dir = dc.directory
dc.close()
shutil.rmtree(cache_dir, ignore_errors=True)
def _float_to_uint8(func):
"""floatからnp.uint8への変換。"""
@functools.wraps(func)
def float_to_uint8_func(*args, **kwargs):
return np.clip(func(*args, **kwargs), 0, 255).astype(np.uint8)
return float_to_uint8_func
def load_with_cache(
path_or_array: typing.Union[np.ndarray, io.IOBase, str, pathlib.Path],
grayscale=False,
use_cache=True,
max_size=None,
use_temp_dir=None,
) -> np.ndarray:
"""画像の読み込み。
Args:
path_or_array: 画像ファイルへのパス or npy/npzファイルへのパス or ndarray
grascale: Trueならグレースケールで読み込み、FalseならRGB
use_cache: 読み込み結果をdiskcacheライブラリでキャッシュするならTrue
max_size: このサイズを超えるなら縮小する。int or tuple。tupleは(height, width)
use_temp_dir: キャッシュを保存する場所。Noneだったらtempfile.gettempdir()の場所を使う
Returns:
読み込み結果のndarray。
"""
max_size = tk.utils.normalize_tuple(max_size, 2) if max_size is not None else None
def _load():
img = load(path_or_array, grayscale=grayscale)
if max_size is not None and (
img.shape[0] > max_size[0] or img.shape[1] > max_size[1]
):
r0 = max_size[0] / img.shape[0]
r1 = max_size[1] / img.shape[1]
r = min(r0, r1)
img = resize(
img, int(round(img.shape[1] * r)), int(round(img.shape[0] * r))
)
return img
if use_cache and isinstance(path_or_array, (str, pathlib.Path)):
global _load_cache
global _diskcache_load_failed
if _load_cache is None and not _diskcache_load_failed:
# 何回も呼び出されないかこれ?
# マルチプロセスのときとか怖い
temp_dir = tempfile.mkdtemp(suffix="pytoolkit",dir=use_temp_dir) if use_temp_dir is None else use_temp_dir
try:
import diskcache
_load_cache = diskcache.Cache(temp_dir)
atexit.register(_clear_cache, _load_cache)
except BaseException:
pathlib.Path(temp_dir).rmdir()
_diskcache_load_failed = True
tk.log.get(__name__).warning("diskcache load failed.", exc_info=True)
if _load_cache is not None:
key = f"{path_or_array}::{max_size}"
img = _load_cache.get(key)
if img is None:
img = _load()
_load_cache.set(key, img)
return img
return _load()
def load(
path_or_array: typing.Union[np.ndarray, io.IOBase, str, pathlib.Path],
grayscale=False,
) -> np.ndarray:
"""画像の読み込みの実装。"""
assert path_or_array is not None
if isinstance(path_or_array, np.ndarray):
# ndarrayならそのまま画像扱い
img = np.copy(path_or_array) # 念のためコピー
assert img.dtype == np.uint8, f"ndarray dtype error: {img.dtype}"
else:
suffix = (
pathlib.Path(path_or_array).suffix.lower()
if isinstance(path_or_array, (str, pathlib.Path))
else None
)
if suffix in (".npy", ".npz"):
# .npyなら読み込んでそのまま画像扱い
img = np.load(str(path_or_array))
if isinstance(img, np.lib.npyio.NpzFile):
if len(img.files) != 1:
raise ValueError(
f'Image load failed: "{path_or_array}"" has multiple keys. ({img.files})'
)
img = img[img.files[0]]
assert img.dtype == np.uint8, f"{suffix} dtype error: {img.dtype}"
else:
# PILで読み込む
try:
with PIL.Image.open(path_or_array) as pil_img:
target_mode = "L" if grayscale else "RGB"
if pil_img.mode != target_mode:
pil_img = pil_img.convert(target_mode)
img = np.asarray(pil_img, dtype=np.uint8)
except BaseException as e:
raise ValueError(f"Image load failed: {path_or_array}") from e
if img is None:
raise ValueError(f"Image load failed: {path_or_array}")
if len(img.shape) == 2:
img = np.expand_dims(img, axis=-1)
if len(img.shape) != 3:
raise ValueError(f"Image load failed: shape={path_or_array}")
return img
def save(path: typing.Union[str, pathlib.Path], img: np.ndarray, quality: int = 75):
"""画像の保存。
やや余計なお世話だけど0~255にクリッピング(飽和)してから保存する。
Args:
path: 保存先ファイルパス。途中のディレクトリが無ければ自動的に作成。
img: 画像のndarray。shape=(height, width, 3)のRGB画像。dtypeはnp.uint8
quality: 独自追加 画像の保存クオリティ jpgのみ使用
"""
assert len(img.shape) == 3
if img.dtype != np.uint8:
warnings.warn(f"Invalid dtype: {img.dtype} (shape={img.shape})")
path = pathlib.Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
img =
|
np.clip(img, 0, 255)
|
numpy.clip
|
"""
Notes: Currently uses an NFW fit as the caustic surface*
CausticMass.py contains 3 classes/objects each with a list of attributes and functions
Caustic:
functions: zdistance(), findangle(), set_sample(), shiftgapper(), gaussian_kernel()
attributes: self.clus_ra, self.clus_dec, self.clus_z, self.r200, self.r, self.v, self.data, self.data_set,
self.ang_d, self.angle, self.x_scale, self.y_scale, self.x_range, self.y_range, self.ksize_x,
self.ksize_y, self.img, self.img_grad, self.img_inf
CausticSurface:
functions: findvdisp(), findvesc(), findphi(), findAofr(), restrict_gradient2(), identifyslot(), NFWfit()
attributes: self.levels, self.r200, self.halo_scale_radius, self.halo_scale_radius_e, self.gal_vdisp,
self.vvar, self.vesc, self.skr, self.level_elem, self.level_final, self.Ar_finalD,
self.halo_scale_density, self.halo_scale_density_e, self.vesc_fit
MassCalc:
functions:
attributes: self.crit, self.g_b, self.conc, self.f_beta, self.massprofile, self.avg_density, self.r200_est,
self.M200
Github: https://github.com/giffordw/CausticMass
"""
import matplotlib
matplotlib.use('Agg')
import numpy as np
import cosmolopy.distance as cd
from cosmolopy import magnitudes, fidcosmo
from matplotlib.pyplot import *
import astStats
import scipy.ndimage as ndi
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from skimage import measure
import pdb
import warnings
warnings.filterwarnings('ignore')
c = 300000.0
h0= 73
class Caustic:
"""
Required input: Galaxy RA,DEC,Z which must be first 3 columns in data input
Optional input: Galaxy mags,memberflag Cluster RA,DEC,Z,rlimit,vlimit,H0
- if the optional Cluster inputs are not given, average values are calculated. It is far better for the user
to calculate their own values and feed them to the module than rely on these estimates. The defaults for
rlimit = 4 and vlimit = +/- 3500km/s
- User can submit a 2D data array if there are additional galaxy attribute columns not offered by default
that can be carried through in the opperations for later.
data -- 2d array with columns starting with RA,DEC,Z
"""
def __init__(self):
pass
def run_caustic(self,data,gal_mags=None,gal_memberflag=None,clus_ra=None,
clus_dec=None,clus_z=None,gal_r=None,gal_v=None,r200=None,
clus_vdisp=None,rlimit=4.0,vlimit=3500,q=10.0,H0=h0,xmax=6.0,ymax=5000.0,
cut_sample=True,edge_int_remove=False,gapper=True,mirror=True,absflag=False,
inflection=False,edge_perc=0.1,fbr=0.65):
self.S = CausticSurface()
self.clus_ra = clus_ra
self.clus_dec = clus_dec
self.clus_z = clus_z
self.fbr=fbr
if gal_r == None:
if self.clus_ra == None:
#calculate average ra from galaxies
self.clus_ra = np.average(data[:,0])
if self.clus_dec == None:
#calculate average dec from galaxies
self.clus_dec = np.average(data[:,1])
#Reduce data set to only valid redshifts
data_spec = data[np.where((np.isfinite(data[:,2])) & (data[:,2] > 0.0) & (data[:,2] < 5.0))]
if self.clus_z == None:
#calculate average z from galaxies
self.clus_z = np.average(data_spec[:,2])
#calculate angular diameter distance.
#Variable self.ang_d
self.ang_d,self.lum_d = self.zdistance(self.clus_z,H0)
#calculate the spherical angles of galaxies from cluster center.
#Variable self.angle
self.angle = self.findangle(data_spec[:,0],data_spec[:,1],self.clus_ra,self.clus_dec)
self.r = self.angle*self.ang_d
self.v = c*(data_spec[:,2] - self.clus_z)/(1+self.clus_z)
else:
data_spec = data[np.where(np.isfinite(gal_v))]
self.r = gal_r
self.v = gal_v
#calculate H(z)
self.Hz = H0*np.sqrt(0.25*(1+self.clus_z)**3 + 0.75)
self.hz = self.Hz / 100.0 #little h(z)
#package galaxy data, USE ASTROPY TABLE HERE!!!!!
if gal_memberflag is None:
self.data_table = np.vstack((self.r,self.v,data_spec.T)).T
else:
self.data_table = np.vstack((self.r,self.v,data_spec.T,gal_memberflag)).T
#reduce sample within limits
if cut_sample == True:
self.data_set = self.set_sample(self.data_table,rlimit=rlimit,vlimit=vlimit)
else:
self.data_set = self.data_table
if self.data_set.shape[0] < 2:
print('Encountered Error: Data set has too few elements. Check the r and v objects. Could indicate wrong cluster/galaxy positions or redshifts')
return 0
#further select sample via shifting gapper
if gapper == True:
self.data_set = self.shiftgapper(self.data_set)
print('DATA SET SIZE',self.data_set[:,0].size)
##tries to identify double groups that slip through the gapper process
#upper_max = np.max(self.data_set[:,1][np.where((self.data_set[:,1]>0.0)&(self.data_set[:,0]<1.0))])
#lower_max = np.min(self.data_set[:,1][np.where((self.data_set[:,1]<0.0)&(self.data_set[:,0]<1.0))])
#if np.max(np.array([upper_max,-lower_max])) > 1000.0+np.min(np.array([upper_max,-lower_max])):
# self.data_set = self.data_set[np.where(np.abs(self.data_set[:,1])<1000.0+np.min(np.array([upper_max,-lower_max])))]
#measure Ngal above mag limit
try:
if absflag:
abs_mag = self.data_table[:,5]
else:
abs_mag = self.data_table[:,7] - magnitudes.distance_modulus(self.clus_z,**fidcosmo)
self.Ngal_1mpc = self.r[np.where((abs_mag < -19.55) & (self.r < 0.5) & (np.abs(self.v) < 3500))].size
except IndexError:
abs_mag = np.zeros(self.data_table[:,0].size)
self.Ngal_1mpc = None
if r200 == None:
vdisp_prelim = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<3.0)],9.0)
if np.sum(abs_mag) == 0:
r200_mean_prelim = 0.002*vdisp_prelim + 0.40
self.r200 = r200_mean_prelim/1.7
else:
self.r200 = self.Ngal_1mpc**0.51*np.exp(-1.86)
##original r200 est
#rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))])).T).T
#vdisp_prelim_1 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale']
#rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))])).T).T
#vdisp_prelim_2 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale']
#if vdisp_prelim_2 < 0.6*vdisp_prelim_1: vdisp_prelim = vdisp_prelim_2
#else: vdisp_prelim = vdisp_prelim_1
#r200_mean_prelim = 0.002*vdisp_prelim + 0.40
#self.r200 = r200_mean_prelim/1.7
if self.r200 > 3.0:
self.r200 = 3.0
if 3.0*self.r200 < 6.0:
rlimit = 3.0*self.r200
else:
rlimit = 5.5
else:
self.r200 = r200
if self.r200 > 3.0:
self.r200 = 3.0
print('Pre_r200=',self.r200)
if mirror == True:
print('Calculating Density w/Mirrored Data')
self.gaussian_kernel(np.append(self.data_set[:,0],self.data_set[:,0]),np.append(self.data_set[:,1],-self.data_set[:,1]),self.r200,normalization=self.Hz,scale=q,xmax=xmax,ymax=ymax)
else:
print('Calculating Density')
self.gaussian_kernel(self.data_set[:,0],self.data_set[:,1],self.r200,normalization=self.Hz,scale=q,xmax=xmax,ymax=ymax)
self.img_tot = self.img/np.max(np.abs(self.img))
self.img_grad_tot = self.img_grad/np.max(np.abs(self.img_grad))
self.img_inf_tot = self.img_inf/np.max(np.abs(self.img_inf))
if clus_vdisp is None:
#self.pre_vdisp = 9.15*self.Ngal_1mpc+350.32
#print 'Pre_vdisp=',self.pre_vdisp
#print 'Ngal<1Mpc=',self.Ngal_1mpc
v_cut = self.data_set[:,1][np.where((self.data_set[:,0]<self.r200) & (np.abs(self.data_set[:,1])<vlimit))]
try:
self.pre_vdisp2 = astStats.biweightScale(v_cut[np.where(np.isfinite(v_cut))],9.0)
except:
self.pre_vdisp2 = np.std(v_cut,ddof=1)
print('Vdisp from galaxies=',self.pre_vdisp2)
if self.data_set[:,0].size < 15:
self.v_unc = 0.35
self.c_unc_sys = 0.75
self.c_unc_int = 0.35
elif self.data_set[:,0].size < 25 and self.data_set[:,0].size >= 15:
self.v_unc = 0.30
self.c_unc_sys = 0.55
self.c_unc_int = 0.22
elif self.data_set[:,0].size < 50 and self.data_set[:,0].size >= 25:
self.v_unc = 0.23
self.c_unc_sys = 0.42
self.c_unc_int = 0.16
elif self.data_set[:,0].size < 100 and self.data_set[:,0].size >= 50:
self.v_unc = 0.18
self.c_unc_sys = 0.34
self.c_unc_int = 0.105
else:
self.v_unc = 0.15
self.c_unc_sys = 0.29
self.c_unc_int = 0.09
#if self.pre_vdisp2 > 1.75*self.pre_vdisp: self.pre_vdisp_comb = 9.15*self.Ngal_1mpc+450.32
#else:
self.pre_vdisp_comb = self.pre_vdisp2
#if self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)].size >= 10:
# self.pre_vdisp_comb = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],9.0)
#else:
# self.pre_vdisp_comb = np.std(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],ddof=1)
# #self.pre_vdisp_comb = (self.pre_vdisp*(self.pre_vdisp2*self.v_unc)**2+self.pre_vdisp2*118.14**2)/(118.14**2+(self.pre_vdisp2*self.v_unc)**2)
else:
self.pre_vdisp_comb = clus_vdisp
print('Combined Vdisp=',self.pre_vdisp_comb)
self.beta = 0.5*self.x_range/(self.x_range + self.r200/4.0)
#Identify initial caustic surface and members within the surface
print('Calculating initial surface')
if inflection == False:
if gal_memberflag is None:
self.S.findsurface(self.data_set,self.x_range,self.y_range,self.img_tot,r200=self.r200,halo_vdisp=self.pre_vdisp_comb,beta=None,mirror=mirror,edge_perc=edge_perc,Hz=self.Hz,edge_int_remove=edge_int_remove,q=q,plotphase=False)
else:
self.S.findsurface(self.data_set,self.x_range,self.y_range,self.img_tot,memberflags=self.data_set[:,-1],r200=self.r200,mirror=mirror,edge_perc=edge_perc,Hz=self.Hz,q=q)
else:
if gal_memberflag is None:
self.S.findsurface_inf(self.data_set,self.x_range,self.y_range,self.img_tot,self.img_inf,r200=self.r200,halo_vdisp=self.pre_vdisp_comb,beta=None,Hz=self.Hz,q=q)
else:
self.S.findsurface_inf(self.data_set,self.x_range,self.y_range,self.img_tot,self.img_inf,memberflags=self.data_set[:,-1],r200=self.r200,Hz=self.Hz,q=q)
self.caustic_profile = self.S.Ar_finalD
self.caustic_fit = self.S.vesc_fit
self.caustic_edge = np.abs(self.S.Ar_finalE)
self.caustic_fit_edge = self.S.vesc_fit_e
self.gal_vdisp = self.S.gal_vdisp
self.memflag = self.S.memflag
#Estimate the mass based off the caustic profile, beta profile (if given), and concentration (if given)
if clus_z is not None:
self.Mass = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0)
self.Mass2 = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=fbr,H0=H0)
self.MassE = MassCalc(self.x_range,self.caustic_edge,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=fbr,H0=H0)
self.MassF = MassCalc(self.x_range,self.caustic_fit,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=fbr,H0=H0)
self.MassFE = MassCalc(self.x_range,self.caustic_fit_edge,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=fbr,H0=H0)
self.mprof = self.Mass.massprofile
self.mprof_fbeta = self.Mass2.massprofile
self.mprof_edge = self.MassE.massprofile
self.r200_est = self.Mass.r200_est
self.r200_est_fbeta = self.Mass2.r200_est
self.r200_est_edge = self.MassE.r200_est
self.r500_est = self.Mass.r500_est
self.r500_est_fbeta = self.Mass2.r500_est
self.M200_est = self.Mass.M200_est
self.M200_est_fbeta = self.Mass2.M200_est
self.M200_fbeta = self.Mass2.M200
self.M200_edge = self.MassE.M200
self.M200_edge_est = self.MassE.M200_est
self.M200_fit = self.MassF.M200
self.M200_fit_est = self.MassF.M200_est
self.M200_fit_edge = self.MassFE.M200
self.M200_fit_edge_est = self.MassFE.M200_est
self.M500_est = self.Mass.M500_est
self.M500_est_fbeta = self.Mass2.M500_est
print('r200 estimate: ',self.Mass2.r200_est)
print('M200 estimate: ',self.Mass2.M200_est)
self.Ngal = self.data_set[np.where((self.memflag==1)&(self.data_set[:,0]<=self.r200_est_fbeta))].shape[0]
#calculate velocity dispersion
try:
self.vdisp_gal = stats.biweightScale(self.data_set[:,1][self.memflag==1],9.0)
except:
try:
self.vdisp_gal = np.std(self.data_set[:,1][self.memflag==1],ddof=1)
except:
self.vdisp_gal = 0.0
return 1
def zdistance(self,clus_z,H0=h0):
"""
Finds the angular diameter distance for an array of cluster center redshifts.
Instead, use angular distance file precalculated and upload.
"""
cosmo = {'omega_M_0':0.25,'omega_lambda_0':0.75,'h':H0/100.0}
cosmo = cd.set_omega_k_0(cosmo)
ang_d = cd.angular_diameter_distance(clus_z,**cosmo)
lum_d = cd.luminosity_distance(clus_z,**cosmo)
return ang_d,lum_d
def findangle(self,ra,dec,clus_RA,clus_DEC):
"""
Calculates the angles between the galaxies and the estimated cluster center.
The value is returned in radians.
"""
# zsep = np.sin(clus_DEC*np.pi/180.0)*np.sin(np.array(dec)*np.pi/180.0)
# xysep = np.cos(clus_DEC*np.pi/180.0)*np.cos(np.array(dec)*np.pi/180.0)*np.cos(np.pi/180.0*(clus_RA-np.array(ra)))
# angle = np.arccos(zsep+xysep)
# return angle
x0 = np.cos(clus_DEC*np.pi/180.0)*np.cos(clus_RA*np.pi/180.0)
y0 = np.cos(clus_DEC*np.pi/180.0)*np.sin(clus_RA*np.pi/180.0)
z0 = np.sin(clus_DEC*np.pi/180.0)
x1 = np.cos(np.array(dec)*np.pi/180.0)*np.cos(np.array(ra)*np.pi/180.0)
y1 = np.cos(np.array(dec)*np.pi/180.0)*np.sin(np.array(ra)*np.pi/180.0)
z1 = np.sin(np.array(dec)*np.pi/180.0)
cos_a = x0*x1+y0*y1+z0*z1
angle = np.arccos(cos_a)
return angle
def set_sample(self,data,rlimit=4.0,vlimit=3500):
"""
Reduces the sample by selecting only galaxies inside r and v limits.
The default is to use a vlimit = 3500km/s and rlimit = 4.0Mpc.
Specify in parameter file.
"""
data_set = data[np.where((data[:,0] < rlimit) & (np.abs(data[:,1]) < vlimit))]
return data_set
def shiftgapper(self,data):
npbin = 25
gap_prev = 2000.0 #initialize gap size for initial comparison (must be larger to start).
nbins = np.int(np.ceil(data[:,0].size/(npbin*1.0)))
origsize = data[:,0].shape[0]
data = data[np.argsort(data[:,0])] #sort by r to ready for binning
#print 'NBINS FOR GAPPER = ', nbins
for i in range(nbins):
#print 'BEGINNING BIN:',str(i)
databin = data[npbin*i:npbin*(i+1)]
datanew = None
nsize = databin[:,0].size
datasize = nsize-1
if nsize > 5:
while nsize - datasize > 0 and datasize >= 5:
#print ' ITERATING'
nsize = databin[:,0].size
databinsort = databin[np.argsort(databin[:,1])] #sort by v
f = (databinsort[:,1])[databinsort[:,1].size-np.int(np.ceil(databinsort[:,1].size/4.0))]-(databinsort[:,1])[np.int(np.ceil(databinsort[:,1].size/4.0))]
gap = f/(1.349)
#print i,' GAP SIZE', str(gap)
if gap < 500.0: break
#gap = 500.0
#if gap >= 2.0*gap_prev:
# gap = gap_prev
# #print ' Altered gap = %.3f'%(gap)
databelow = databinsort[databinsort[:,1]<=0]
gapbelow =databelow[:,1][1:]-databelow[:,1][:-1]
dataabove = databinsort[databinsort[:,1]>0]
gapabove = dataabove[:,1][1:]-dataabove[:,1][:-1]
try:
if np.max(gapbelow) >= gap: vgapbelow = np.where(gapbelow >= gap)[0][-1]
else: vgapbelow = -1
#print 'MAX BELOW GAP',np.max(gapbelow)
try:
datanew = np.append(datanew,databelow[vgapbelow+1:],axis=0)
except:
datanew = databelow[vgapbelow+1:]
except ValueError:
pass
try:
if np.max(gapabove) >= gap: vgapabove = np.where(gapabove >= gap)[0][0]
else: vgapabove = 99999999
#print 'MAX ABOVE GAP',np.max(gapabove)
try:
datanew = np.append(datanew,dataabove[:vgapabove+1],axis=0)
except:
datanew = dataabove[:vgapabove+1]
except ValueError:
pass
databin = datanew
datasize = datanew[:,0].size
datanew = None
#print 'DATA SIZE OUT', databin[:,0].size
if gap >=500.0:
gap_prev = gap
else:
gap_prev = 500.0
try:
datafinal = np.append(datafinal,databin,axis=0)
except:
datafinal = databin
#print 'GALAXIES CUT =',str(origsize-datafinal[:,0].size)
return datafinal
def gaussian_kernel(self,xvalues,yvalues,r200,normalization=h0,scale=10.0,xres=200,yres=220,xmax=6.0,ymax=5000.0):
"""
Uses a 2D gaussian kernel to estimate the density of the phase space.
As of now, the maximum radius extends to 6Mpc and the maximum velocity allowed is 5000km/s
The "q" parameter is termed "scale" here which we have set to 10 as default, but can go as high as 50.
"normalization" is simply H0
"x/yres" can be any value, but are recommended to be above 150
"adj" is a custom value and changes the size of uniform filters when used (not normally needed)
Parameters
----------
xvalues : x-coordinates of points in phase space
yvalues : y-coordinates of points in phase space
r200 : Required estimate of r200 to calculate a rough dispersion
normalization = 100 : This is equivalent to H0. Default is H0=100
scale = 10 : "q" parameter in Diaferio 99. Literature says this can be between 10-50
xres = 200 : x-grid resolution
yres = 220 : y-grid resolution
xmax = 6.0 : Maximum x-grid value. If data points exceed this amount either increase
this value or cut sample to be within this value.
ymax = 5000 : Maximum/minimum y-grid value. If data points exceed this amount either increase
this value or cut sample to be within this value.
Returns
-------
self.x_range : array of x-grid values
self.y_range : array of y-grid values
self.img : smoothed density image
self.img_grad : first derivative of img
self.img_inf : second derivative of img
"""
if np.max(xvalues) >= xmax:
raise Exception('Bounding Error: Please either increase your xmax value or trim your sample to be x < '+str(xmax))
if np.max(np.abs(yvalues)) >= ymax:
raise Exception('Bounding Error: Please either increase your ymax value or trim your sample to be y < '+str(ymax))
yvalues = yvalues/(normalization*scale)
self.x_range = np.arange(0,xmax,0.05)
self.x_range_bin = np.arange(0,xmax+0.05,0.05)
xres = self.x_range.size
self.y_range = np.arange(-ymax/(normalization*scale),ymax/(normalization*scale),0.05)*normalization*scale
self.y_range_bin = np.arange(-ymax/(normalization*scale),ymax/(normalization*scale)+0.05,0.05)*normalization*scale
yres = self.y_range.size
self.x_scale = (xvalues/xmax)*xres
self.y_scale = ((yvalues*(normalization*scale)+ymax)/(ymax*2.0))*self.y_range.size
#self.ksize_x = (4.0/(3.0*xvalues.size))**(1/5.0)*np.std(self.x_scale[xvalues<r200])
self.ksize_x = (4.0/(3.0*xvalues.size))**(1/5.0)*np.sqrt((astStats.biweightScale((self.x_scale[xvalues<r200]).copy(),9.0)**2 + astStats.biweightScale((self.y_scale[xvalues<r200]).copy(),9.0)**2)/2.0)
self.ksize_x *= 1.0
self.ksize_y = self.ksize_x#(4.0/(3.0*xvalues.size))**(1/5.0)*np.std(self.y_scale[xvalues<r200])
self.imgr,xedge,yedge = np.histogram2d(xvalues,yvalues,bins=[self.x_range_bin,self.y_range_bin/(normalization*scale)])
self.img = ndi.gaussian_filter(self.imgr, (self.ksize_x,self.ksize_y),mode='reflect')
self.img_grad = ndi.gaussian_gradient_magnitude(self.imgr, (self.ksize_x,self.ksize_y))
self.img_inf = ndi.gaussian_gradient_magnitude(ndi.gaussian_gradient_magnitude(self.imgr, (self.ksize_x,self.ksize_y)), (self.ksize_x,self.ksize_y))
class CausticSurface:
"""
- For now if r200 is not supplied I am using a default value of 2Mpc
- If a scale radius is not given for the cluster, then I am using a default value of r200/5.0 with uncertainty 0.01Mpc
CausticSurface(self,r,v,ri,vi,Zi,memberflags=None,r200=2.0,maxv=5000,halo_scale_radius=None,halo_scale_radius_e=0.01,halo_vdisp=None,bin=None):
r/v - rvalues/vvalues of galaxies
ri/vi - x_range/y_range of grid
Zi - density map
memberflags = None - indices of known member galaxies to calculate a velocity dispersion
r200 = 2.0 - critical radius of the cluster
maxv = 5000km/s - maximum velocity allowed
halo_scale_radius - scale radius (default is r200/5.0)
halo_scale_radius_e = 0.01 - uncertainty in scale radius
halo_vdisp = None - velocity dispersion
bin = None - if doing multiple halos, can assign an ID number
"""
def __init__(self):
pass
def findsurface(self,data,ri,vi,Zi,memberflags=None,r200=2.0,maxv=5000.0,halo_scale_radius=None,halo_scale_radius_e=0.01,halo_vdisp=None,bin=None,plotphase=False,beta=None,mirror=True,q=10.0,Hz = h0,edge_perc=0.1,edge_int_remove=False):
kappaguess = np.max(Zi) #first guess at the level
#self.levels = np.linspace(0.00001,kappaguess,100)[::-1] #create levels (kappas) to try out
self.levels = 10**(np.linspace(np.log10(np.min(Zi[Zi>0]/5.0)),np.log10(kappaguess),200)[::-1])
fitting_radii = np.where((ri>=r200/3.0) & (ri<=r200)) #when fitting an NFW (later), this defines the r range to fit within
self.r200 = r200
if halo_scale_radius is None:
self.halo_scale_radius = self.r200/5.0
else:
self.halo_scale_radius = halo_scale_radius
self.halo_scale_radius_e = halo_scale_radius_e
if beta is None:
self.beta = 0.2+np.zeros(ri.size)
else: self.beta = beta
self.gb = (3-2.0*self.beta)/(1-self.beta)
#Calculate velocity dispersion with either members, fed value, or estimate using 3.5sigma clipping
if memberflags is not None:
vvarcal = data[:,1][np.where(memberflags==1)]
try:
self.gal_vdisp = astStats.biweightScale(vvarcal[np.where(np.isfinite(vvarcal))],9.0)
print('O ya! membership calculation!')
except:
self.gal_vdisp = np.std(vvarcal,ddof=1)
self.vvar = self.gal_vdisp**2
elif halo_vdisp is not None:
self.gal_vdisp = halo_vdisp
self.vvar = self.gal_vdisp**2
else:
#Variable self.gal_vdisp
try:
self.findvdisp(data[:,0],data[:,1],r200,maxv)
except:
self.gal_vdisp = np.std(data[:,1][np.where((data[:,0]<r200) & (np.abs(data[:,1])<maxv))],ddof=1)
self.vvar = self.gal_vdisp**2
##initilize arrays
#self.vesc = np.zeros(self.levels.size)
#self.Ar_final_opt = np.zeros((self.levels.size,ri[np.where((ri<r200) & (ri>=0))].size))
#
##find the escape velocity for all level (kappa) guesses
#for i in range(self.vesc.size):
# self.vesc[i],self.Ar_final_opt[i] = self.findvesc(self.levels[i],ri,vi,Zi,r200)
#
##optimization equation to search for minimum value
#self.skr = (self.vesc-4.0*self.vvar)**2
#try:
# self.level_elem = np.where(self.skr == np.min(self.skr[np.isfinite(self.skr)]))[0][0]
# self.level_final = self.levels[self.level_elem]
# self.Ar_finalD = np.zeros(ri.size)
# for k in range(self.Ar_finalD.size):
# self.Ar_finalD[k] = self.findAofr(self.level_final,Zi[k],vi)
# if k != 0:
# self.Ar_finalD[k] = self.restrict_gradient2(np.abs(self.Ar_finalD[k-1]),np.abs(self.Ar_finalD[k]),ri[k-1],ri[k])
#
##This exception occurs if self.skr is entirely NAN. A flag should be raised for this in the output table
#except ValueError:
# self.Ar_finalD = np.zeros(ri.size)
#
#find contours (new)
self.Ar_finalD = self.findcontours(Zi,self.levels,ri,vi,r200,self.vvar,Hz,q)
data_e = data
#remove outliers from edge calculation
if edge_int_remove:
try:
data_e = self.edge_outlier_clip(data_e,ri,vi,Zi)
print('completed edge_outlier_clip')
except:
data_e = data
#Identify sharp phase-space edge
numbins = 6
perc_top = edge_perc #what percent of top velocity galaxies per/bin used to identify surface
numrval = (data_e[:,0][data_e[:,0]< r200]).size #number of galaxies less than r200
size_bin = int(np.ceil(numrval*1.0/numbins)) #how many galaxies are in each bin
rsort = data_e[:,0][np.argsort(data_e[:,0])] #sort r positions
if mirror == True:
vsort = np.abs(data_e[:,1][np.argsort(data_e[:,0])]) #sort absolute value of velocities by r position
else:
vsort = data_e[:,1][np.argsort(data_e[:,0])] #same as above but not abs
self.data_e = data_e
mid_rbin = np.array([])
avgmax = np.array([])
avgmin = np.array([])
mincomp = np.array([])
for nn in range(numbins):
vbin = vsort[nn*size_bin:(nn+1)*size_bin] #pick velocities in bin # nn
if vbin.size==0:
if nn >= 4: break
rbin = rsort[nn*size_bin:(nn+1)*size_bin] #pick radii in bin # nn
vemax = (vbin[np.argsort(vbin)][::-1])[:int(np.ceil(vbin[vbin>0.0].size*perc_top))] #sort by velocity -> flip array from max-min -> take first edge_perc values where v>0
vemin = (vbin[np.argsort(vbin)])[:int(np.ceil(vbin[vbin<0.0].size*perc_top))] #sort by velocity -> take first edge_perc values where v<0
avgmax = np.append(avgmax,np.average(vemax)) #add average of top edge_perc velocities to max array
avgmin = np.append(avgmin,np.average(vemin)) #same as above but min array
#take the minimum of either the above || below zero caustic
if np.isnan(avgmax)[-1] == True: break
if np.min(vbin) >= 0: mincomp = np.append(mincomp,avgmax[nn]) #if no negative velocities (aka, mirrored)
else: mincomp = np.append(mincomp,np.min([np.abs(avgmin[nn]),avgmax[nn]])) #else take the minimum extreme
mid_rbin = np.append(mid_rbin,np.median(rbin)) #take median rvalue of bin
chi = np.array([])
#loop through contours and find squared difference with edge extreme
for nn in range(len(self.contours)):
fint = interp1d(ri[ri<r200],self.contours[nn][ri<r200]) #interpolate contour
Ar_comp = fint(mid_rbin[mid_rbin<np.max(ri[ri<r200])]) #interpolated contour
chi = np.append(chi,np.median(np.abs(Ar_comp-mincomp[mid_rbin<np.max(ri[ri<r200])]))) #measure squared distance
try:
self.Ar_finalE = np.array(self.contours)[np.isfinite(chi)][np.where(chi[np.isfinite(chi)] == np.min(chi[np.isfinite(chi)]))][0] #find level with min chi value
#self.level_finalE = ((self.levels[np.isfinite(chi)])[np.where(chi[np.isfinite(chi)] == np.min(chi[np.isfinite(chi)]))])[0] #find level with min chi value
#self.Ar_finalE = np.zeros(ri.size)
#for k in range(self.Ar_finalE.size):
# self.Ar_finalE[k] = self.findAofr(self.level_finalE,Zi[k],vi)
# if k != 0:
# self.Ar_finalE[k] = self.restrict_gradient2(np.abs(self.Ar_finalE[k-1]),np.abs(self.Ar_finalE[k]),ri[k-1],ri[k])
except ValueError:
self.Ar_finalE = np.zeros(ri.size)
#fit an NFW to the resulting caustic profile.
self.vesc_fit = self.NFWfit(ri[fitting_radii],self.Ar_finalD[fitting_radii]*np.sqrt(self.gb[fitting_radii]),self.halo_scale_radius,ri,self.gb)
self.vesc_fit_e = self.NFWfit(ri[fitting_radii],self.Ar_finalE[fitting_radii]*np.sqrt(self.gb[fitting_radii]),self.halo_scale_radius,ri,self.gb)
#set first element (which is NaN) equal to the second value
self.vesc_fit[0] = self.vesc_fit[1]
self.vesc_fit_e[0] = self.vesc_fit_e[1]
if plotphase == True:
s,ax = subplots(1,figsize=(10,7))
#ax.pcolormesh(ri,vi,Zi.T)
ax.plot(data[:,0],data[:,1],'k.',markersize=0.5,alpha=0.8)
for t,con in enumerate(self.contours):
ax.plot(ri,con,c='0.4',alpha=0.5)
ax.plot(ri,-con,c='0.4',alpha=0.5)
ax.plot(ri,self.Ar_finalD,c='red')
ax.plot(ri,-self.Ar_finalD,c='red')
ax.plot(ri,self.Ar_finalE,c='blue')
#ax.plot(mid_rbin,avgmax,c='r')
ax.set_ylim(0,5000)
ax.set_xlim(0,4)
s.savefig('plotphase.png')
close()
#show()
##Output galaxy membership
kpc2km = 3.09e16
try:
fitfunc = lambda x,a,b: np.sqrt(2*4*np.pi*6.67e-20*a*(b*kpc2km)**2*np.log(1+x/b)/(x/b))
self.popt,self.pcov = curve_fit(fitfunc,ri,self.Ar_finalD,p0=[5e14,1])
self.Arfit = fitfunc(ri,self.popt[0],self.popt[1])
except:
fitfunc = lambda x,a: np.sqrt(2*4*np.pi*6.67e-20*a*(30.0*kpc2km)**2*np.log(1+x/30.0)/(x/30.0))
self.popt,pcov = curve_fit(fitfunc,ri,self.Ar_finalD)
self.Arfit = fitfunc(ri,self.popt[0])
self.memflag = np.zeros(data.shape[0])
#fcomp = interp1d(ri,self.Ar_finalD)
#print ri.size, self.vesc_fit.size
fcomp = interp1d(ri,self.vesc_fit)
for k in range(self.memflag.size):
vcompare = fcomp(data[k,0])
if np.abs(vcompare) >= np.abs(data[k,1]):
self.memflag[k] = 1
def edge_outlier_clip(self,data_e,ri,vi,Zi):
r_inside = []
v_inside = []
i = 0
while ri[i] <= np.max(data_e[:,0]):
inner_el = i
outer_el = i + 5
inner_r = ri[inner_el]
outer_r = ri[outer_el]
'''
dens = np.average(Zi[inner_el:outer_el],axis=0)
roots = np.sort(np.abs(vi[dens>0.05]))
databinned = data_e[np.where((data_e[:,0]>=inner_r)&(data_e[:,0]<outer_r))]
if len(roots) == 0:
root = 2 * astStats.biweightScale(databinned[:,1].copy(),9.0)
elif np.abs(roots[-1]) < 500.0:
root = 2 * astStats.biweightScale(databinned[:,1].copy(),9.0)
elif np.abs(roots[-1]) > 3500.0:
root = 3500.0
else:
root = np.abs(roots[-1])
r_inside.extend(databinned[:,0][np.where(np.abs(databinned[:,1])<root)])
v_inside.extend(databinned[:,1][np.where(np.abs(databinned[:,1])<root)])
i += 5
data_e = np.vstack((np.array(r_inside),np.array(v_inside))).T
return data_e
'''
deriv = (np.average(Zi[inner_el:outer_el],axis=0)[1:]-np.average(Zi[inner_el:outer_el],axis=0)[:-1]) \
/(vi[1:]-vi[:-1])
roots = np.sort(np.abs(vi[((np.average(Zi[inner_el:outer_el],axis=0)[1:]- \
np.average(Zi[inner_el:outer_el],axis=0)[:-1])/(vi[1:]- \
vi[:-1]))[1:]*((np.average(Zi[inner_el:outer_el],axis=0)[1:]- \
np.average(Zi[inner_el:outer_el],axis=0)[:-1])/(vi[1:]-vi[:-1]))[:-1] < 0]))
databinned = data_e[np.where((data_e[:,0]>=inner_r)&(data_e[:,0]<outer_r))]
if len(roots) > 1:
if roots[1] < 1000.0:
if len(roots) > 2:
if roots[2] < 1000.0:
root = 3 * astStats.biweightScale(databinned[:,1].copy(),9.0)
else:
root = roots[2]
else: root = 3 * astStats.biweightScale(databinned[:,1].copy(),9.0)
else: root = roots[1]
else: root = 3500.0
r_inside.extend(databinned[:,0][np.where(np.abs(databinned[:,1])<root)])
v_inside.extend(databinned[:,1][np.where(np.abs(databinned[:,1])<root)])
i += 5
data_e = np.vstack((np.array(r_inside),np.array(v_inside))).T
return data_e
def findsurface_inf(self,data,ri,vi,Zi,Zi_inf,memberflags=None,r200=2.0,maxv=5000.0,halo_scale_radius=None,halo_scale_radius_e=0.01,halo_vdisp=None,beta=None):
"""
Identifies the caustic surface using the iso-density contours in phase space,
as well as the second derivative of the density (aptly named the inflection technique).
This technique attempts to rid the caustic technique of the dreaded velocity dispersion
calibration that is used to pick a surface.
Parameters
----------
data : first and second columns must be radius and velocity
ri : x-grid values
vi : y-grid values
Zi : density image
Zi_inf : second derivative of the density image
memberflags = None : array of 1's if member 0's if not
r200 = 2.0 : r200 value
maxv = 5000.0 : maximum y-value
halo_scale_radius = None : The default is actually a concentration of 5.0
which is applied later if None is given.
halo_scale_radius_e=0.01 : error in halo_scale_radius
halo_vdisp = None : supply cluster velocity dispersion
beta = None : The default is actually 0.2 which is applied later in the code
although as of now beta is not used in this function
Variables
---------
"""
kappaguess = np.max(Zi) #first thing is to guess at the level
self.levels = np.linspace(0.00001,kappaguess,100)[::-1] #create levels (kappas) to try out
fitting_radii = np.where((ri>=r200/3.0) & (ri<=r200))
self.r200 = r200
if halo_scale_radius is None:
self.halo_scale_radius = self.r200/5.0
else:
self.halo_scale_radius = halo_scale_radius
self.halo_scale_radius_e = halo_scale_radius_e
#c_guess = np.array([halo_srad])#np.linspace(1.0,12.0,100)
#density_guess = np.linspace(1e13,5e16,1000)
if beta is None:
self.beta = 0.2+np.zeros(ri.size)
else: self.beta = beta
self.gb = (3-2.0*self.beta)/(1-self.beta)
#Calculate velocity dispersion with either members, fed value, or estimate using 3.5sigma clipping
if memberflags is not None:
vvarcal = data[:,1][np.where(memberflags==1)]
try:
self.gal_vdisp = astStats.biweightScale(vvarcal[np.where(np.isfinite(vvarcal))],9.0)
print('O ya! membership calculation!')
except:
self.gal_vdisp = np.std(vvarcal,ddof=1)
self.vvar = self.gal_vdisp**2
elif halo_vdisp is not None:
self.gal_vdisp = halo_vdisp
self.vvar = self.gal_vdisp**2
else:
#Variable self.gal_vdisp
try:
self.findvdisp(data[:,0],data[:,1],r200,maxv)
except:
self.gal_vdisp = np.std(data[:,1][np.where((data[:,0]<r200) & (np.abs(data[:,1])<maxv))],ddof=1)
self.vvar = self.gal_vdisp**2
self.Ar_final_opt = np.zeros((self.levels.size,ri[np.where((ri<r200) & (ri>=0))].size)) #2D array: density levels x velocity profile
self.inf_vals = np.zeros((self.levels.size,ri[np.where((ri<r200) & (ri>=0))].size)) #2D array: density levels x inflection profile
#s = figure()
#ax = s.add_subplot(111)
for i in range(self.levels.size): # find the escape velocity for all level (kappa) guesses
self.Ar_final_opt[i],self.inf_vals[i] = self.findvesc2(self.levels[i],ri,vi,Zi,Zi_inf,r200)
#ax.plot(ri[np.where((ri<r200) & (ri>=0))],np.abs(self.Ar_final_opt[i]),c='black',alpha=0.4) #plot each density contour
self.inf_avg = np.average(self.inf_vals.T[fitting_radii],axis=0) #average inflection along each contour surface
self.Ar_avg = np.average(self.Ar_final_opt,axis=1) #average velocity along each contour surface inside r200
#Need to identify maximum average inflection, so smooth the measurement. Might want to do this a non-parametric way
#tryfit = np.polyfit(self.levels,self.inf_avg,7)
#self.infyvals = tryfit[0]*self.levels**7+tryfit[1]*self.levels**6+tryfit[2]*self.levels**5+tryfit[3]*self.levels**4+tryfit[4]*self.levels**3+tryfit[5]*self.levels**2+tryfit[6]*self.levels+tryfit[7]
tryfit = np.polyfit(self.Ar_avg,self.inf_avg,7)
self.infyvals = tryfit[0]*self.Ar_avg**7+tryfit[1]*self.Ar_avg**6+tryfit[2]*self.Ar_avg**5+tryfit[3]*self.Ar_avg**4+tryfit[4]*self.Ar_avg**3+tryfit[5]*self.Ar_avg**2+tryfit[6]*self.Ar_avg+tryfit[7]
self.inf_std = np.std(self.inf_vals.T[fitting_radii],axis=0) #std of inflection along each caustic surface
#self.level_elem = (self.levels[Ar_avg>np.sqrt(vvar)])[np.where(self.inf_avg[Ar_avg>np.sqrt(vvar)] == np.max(self.inf_avg[Ar_avg>np.sqrt(vvar)]))]
self.level_elem = self.levels[np.where(self.inf_avg == np.max(self.inf_avg))][0]
#low_zone = np.where((np.average(np.abs(self.Ar_final_opt),axis=1)>np.max(v)/2.0) & (np.average(np.abs(self.Ar_final_opt),axis=1)<np.max(v)))
high_zone = np.where((np.average(np.abs(self.Ar_final_opt),axis=1)>np.max(data[:,1])/2.0))
#level_elem_low = self.levels[low_zone][np.where(self.inf_avg[low_zone] == np.min(self.inf_avg[low_zone]))][-1]
#level_elem_high = self.levels[high_zone][np.where(self.inf_avg[high_zone] == np.max(self.inf_avg[high_zone]))][-1]
try:
self.level_elem_high = (self.levels[1:-1][np.where((self.infyvals[1:-1]>self.infyvals[2:])&(self.infyvals[1:-1]>self.infyvals[:-2]))])[-1]
except IndexError:
self.level_elem_high = self.levels[0]
self.Ar_final_high = np.zeros(ri.size)
#self.Ar_final_low = np.zeros(ri.size)
for i in range(ri.size):
self.Ar_final_high[i] = self.findAofr(self.level_elem_high,Zi[i],vi)
#self.Ar_final_low[i] = self.findAofr(level_elem_low,Zi[i],vi)
if i > 0:
self.Ar_final_high[i] = self.restrict_gradient2(np.abs(self.Ar_final_high[i-1]),np.abs(self.Ar_final_high[i]),ri[i-1],ri[i])
#self.Ar_final_low[i] = self.restrict_gradient2(np.abs(self.Ar_final_low[i-1]),np.abs(self.Ar_final_low[i]),ri[i-1],ri[i])
#Ar_final = self.Ar_final_opt[np.where(self.inf_avg == np.max(self.inf_avg))][0]
#self.Ar_final = (self.Ar_final_high+self.Ar_final_low)/2.0
self.Ar_finalD = self.Ar_final_high
##Output galaxy membership
kpc2km = 3.09e16
try:
fitfunc = lambda x,a,b: np.sqrt(2*4*np.pi*6.67e-20*a*(b*kpc2km)**2*np.log(1+x/b)/(x/b))
self.popt,self.pcov = curve_fit(fitfunc,ri,self.Ar_final)
self.vesc_fit = fitfunc(ri,self.popt[0],self.popt[1])
except:
fitfunc = lambda x,a: np.sqrt(2*4*np.pi*6.67e-20*a*(30.0*kpc2km)**2*np.log(1+x/30.0)/(x/30.0))
self.popt,self.pcov = curve_fit(fitfunc,ri,self.Ar_finalD)
self.vesc_fit = fitfunc(ri,self.popt[0])
self.memflag = np.zeros(data.shape[0])
#fcomp = interp1d(ri,self.Ar_finalD)
#print ri.size, self.vesc_fit.size
fcomp = interp1d(ri,self.vesc_fit)
for k in range(self.memflag.size):
vcompare = fcomp(data[k,0])
if np.abs(vcompare) >= np.abs(data[k,1]):
self.memflag[k] = 1
#ax.plot(ri,np.abs(self.Ar_final),c='red',lw=2)
#ax.plot(ri,vesc_fit,c='green',lw=2)
#ax.plot(r,v,'k.')
#pcolormesh(ri,vi,Zi_inf.T)
#ax.set_ylim(0,3500)
#savefig('/nfs/christoq_ls/giffordw/flux_figs/surfacetests/nideal/'+str(bin-1)+'.png')
#close()
def causticmembership(self,data,ri,caustics):
self.memflag = np.zeros(data.shape[0])
for k in range(self.memflag.size):
diff = data[k,0]-ri
xrange_up = ri[np.where(ri > data[k,0])][0]
xrange_down = ri[np.where(ri <= data[k,0])][-1]
c_up = np.abs(caustics[np.where(ri > data[k,0])])[0]
c_down = np.abs(caustics[np.where(ri<= data[k,0])])[-1]
slope = (c_up-c_down)/(xrange_up-xrange_down)
intercept = c_up - slope*xrange_up
vcompare = slope*data[k,0]+intercept
if vcompare >= np.abs(data[k,1]):
self.memflag[k] = 1
def findvdisp(self,r,v,r200,maxv):
"""
Use astLib.astStats biweight sigma clipping Scale estimator for the velocity dispersion
"""
v_cut = v[np.where((r<r200) & (np.abs(v)<maxv))]
try:
self.gal_vdisp = astStats.biweightScale(v_cut[np.where(np.isfinite(v_cut))],9.0)
except:
self.gal_vdisp = np.std(v_cut,ddof=1)
def findvesc(self,level,ri,vi,Zi,r200):
"""
Calculate vesc^2 by first calculating the integrals in Diaf 99 which are not labeled but in
between Eqn 18 and 19
"""
useri = ri[np.where((ri<r200) & (ri>=0))] #look only inside r200
Ar = np.zeros(useri.size)
phir = np.zeros(useri.size)
#loop through each dr and find the caustic amplitude for the given level (kappa) passed to this function
for i in range(useri.size):
Ar[i] = self.findAofr(level,Zi[np.where((ri<r200) & (ri>=0))][i],vi)
if i > -1: #to fix the fact that the first row of Zi may be 'nan'
#The Serra paper also restricts the gradient when the ln gradient is > 2. We use > 3
Ar[i] = self.restrict_gradient2(np.abs(Ar[i-1]),np.abs(Ar[i]),useri[i-1],useri[i])
philimit = np.abs(Ar[i]) #phi integral limits
phir[i] = self.findphir(Zi[i][np.where((vi<philimit) & (vi>-philimit))],vi[np.where((vi<philimit) & (vi>-philimit))])
return (np.trapz(Ar**2*phir,useri)/np.trapz(phir,useri),Ar)
def findvesc2(self,level,ri,vi,Zi,Zi_inf,r200):
"""
Used by findsurface_inf to identify caustic surfaces
Parameters
----------
level = density value
ri = x-grid values
vi = y-grid values
Zi = density image
Zi_inf = second derivative of density image
r200 = r200 of cluster
Returns
-------
(Ar,inf_val)
Ar = caustic surface
inf_val = inflection values along caustic surface
"""
useri = ri[np.where((ri<r200) & (ri>=0))] #look only inside r200
Ar = np.zeros(useri.size)
inf_val = np.zeros(useri.size)
for i in range(useri.size):
Ar[i] = self.findAofr(level,Zi[np.where((ri<r200) & (ri>=0))][i],vi)
if i >0:
Ar[i] = self.restrict_gradient2(np.abs(Ar[i-1]),np.abs(Ar[i]),useri[i-1],useri[i])
inf_val[i] = Zi_inf[i][np.where(np.abs(vi-Ar[i]) == np.min(np.abs(vi-Ar[i])))][0]
return Ar,inf_val
def findphir(self,shortZi,shortvi):
short2Zi = np.ma.masked_array(shortZi)
vi = shortvi[np.ma.where(np.ma.getmaskarray(short2Zi)==False)]
Zi = short2Zi[np.ma.where(np.ma.getmaskarray(short2Zi)==False)]
vi = vi[np.isfinite(Zi)]
Zi = Zi[np.isfinite(Zi)]
x = np.trapz(Zi.compressed(),vi)
return x
def findAofr(self,level,Zi,vgridvals):
"""
Finds the velocity where kappa is
"""
#dens0 = Zi[np.where(vgridvals>=0)][0]
dens0 =
|
np.max(Zi)
|
numpy.max
|
import numpy as np
import pytest
from scipy import ndimage as ndi
from scipy.signal import convolve2d
from skimage import restoration, util
from skimage._shared import filters
from skimage._shared._warnings import expected_warnings
from skimage._shared.testing import fetch
from skimage._shared.utils import _supported_float_type
from skimage.color import rgb2gray
from skimage.data import astronaut, camera
from skimage.restoration import uft
test_img = util.img_as_float(camera())
def _get_rtol_atol(dtype):
rtol = 1e-3
atol = 0
if dtype == np.float16:
rtol = 1e-2
atol = 1e-3
elif dtype == np.float32:
atol = 1e-5
return rtol, atol
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_wiener(dtype):
psf =
|
np.ones((5, 5), dtype=dtype)
|
numpy.ones
|
import numpy as np
"""least_squares.py performs least-squares fitting."""
def fit(Q, s, sig=None):
renorm = False
if sig is None:
sig = 1
renorm = True
N, n = np.shape(Q)
if n==1 or N==1:
Q = Q[:]
(N, n) = np.shape(Q)
if n > N:
raise ValueError("Not enough data points to fit the curve.")
try:
n_sig = len(sig)
except TypeError:
n_sig = 1
if n_sig == 1:
sig = sig * np.ones((N,1))
elif n_sig != N:
raise ValueError("len(sig) must be equal to number of data points.")
sig2 = np.asarray(sig) ** 2.0
e = sig2 ** -1
E = e*np.ones((1,n))
G = np.mat(np.array(Q)*np.array(E))
NDF = N - n
if n == N:
NDF = 1
V = np.linalg.inv(np.mat(Q).transpose() * G)
t = V*G.transpose()
dR = np.sqrt(np.diag(V))
T = np.mat(Q)*t
dy2 = np.mat(np.asarray(T)**2)*sig2
dy = np.sqrt(
|
np.asarray(dy2)
|
numpy.asarray
|
import torch
import torch.nn as nn
import numpy as np
from tqdm.autonotebook import tqdm
# Continuous models
class Hodgkin_Huxley():
r"""
Hodgkin-Huxley model via Euler integration
"""
def __init__(self, G_na=120, G_k=36, G_l=0.3, E_na=50, E_k=-77, E_l=-54.4):
r"""
units are in mV, microS, nF, mA, ms
"""
self.G_na = G_na
self.G_k = G_k
self.G_l = G_l
self.E_na = E_na
self.E_k = E_k
self.E_l = E_l
def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000):
r"""
Integrate the HH dynamics, the state array (v, m, h, n) is represented by 4 floating-point values
:param int T: timesteps to run the simulation for
:param int runs: number of trials to run (I_ext and i.c. can differ per run)
:param np.array I_ext: external input current, with shape (runs, timesteps)
:param np.array ic: neuron initial conditions, with shape (runs, 4)
:returns: neuron state over the simulation
:rtype: np.array
"""
alpha_m = lambda V: (2.5-0.1*(V+65)) / (np.exp(2.5-0.1*(V+65)) -1)
beta_m = lambda V: 4.0 * np.exp(-(V+65)/18)
alpha_h = lambda V: 0.07 * np.exp(-(V+65)/20)
beta_h = lambda V: 1.0 / (np.exp(3.0-0.1*(V+65)) + 1)
alpha_n = lambda V: (0.1-0.01*(V+65)) / (np.exp(1-0.1*(V+65)) - 1)
beta_n = lambda V: 0.125 * np.exp(-(V+65)/80)
state = np.zeros((runs, T, 4)) # vector v, m, h, n
for k in range(runs):
state[k, 0, :] = ic[k, :]#[-6.49997224e+01, 5.29342176e-02, 5.96111046e-01, 3.17681168e-01]
ds = np.zeros((runs, 4))
iterator = tqdm(range(T-1))
for t in iterator:
ds[:, 0] = -(G_l*(state[:, t, 0] - E_l) + \
G_k*np.power(state[:, t, 3], 4)*(state[:, t, 0] - E_k) + \
G_na*np.power(state[:, t, 1], 3)*state[:, t, 2]*(state[:, t, 0] - E_na)) + I_ext[:, t]
ds[:, 1] = alpha_m(state[:, t, 0]) * (1 - state[:, t, 1]) - beta_m(state[:, t, 0]) * state[:, t, 1]
ds[:, 2] = alpha_h(state[:, t, 0]) * (1 - state[:, t, 2]) - beta_h(state[:, t, 0]) * state[:, t, 2]
ds[:, 3] = alpha_n(state[:, t, 0]) * (1 - state[:, t, 3]) - beta_n(state[:, t, 0]) * state[:, t, 3]
state[:, t+1] = state[:, t] + ds * dt
return state
class FitzHugh_Nagumo():
r"""
A 2D reduction of the Hodgkin-Huxley model to the phase plane.
"""
def __init__(self, b_0, b_1, tau_u, tau_w):
r"""
units are in mV, microS, nF, mA, ms
"""
self.b_0 = b_0
self.b_1 = b_1
self.tau_u = tau_u
self.tau_w = tau_w
def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000):
r"""
Integrate the HH dynamics, the state array (v, m, h, n) is represented by 4 floating-point values
:param int T: timesteps to run the simulation for
:param int runs: number of trials to run (I_ext and i.c. can differ per run)
:param np.array I_ext: external input current, with shape (runs, timesteps)
:param np.array ic: neuron initial conditions, with shape (runs, 4)
:returns: neuron state over the simulation
:rtype: np.array
"""
state = np.zeros((runs, T, 2)) # vector u, w
for k in range(runs):
state[k, 0, :] = ic[k, :]#[-6.49997224e+01, 5.29342176e-02, 5.96111046e-01, 3.17681168e-01]
ds = np.zeros((runs, 2))
iterator = tqdm(range(T-1))
for t in iterator:
ds[:, 0] = 1/self.tau_u * (state[:, t, 0] - state[:, t, 0]**3/3. - state[:, t, 1] + I_ext)
ds[:, 1] = 1/self.tau_w * (self.b_0 + self.b_1*state[:, t, 0] - state[:, t, 1])
state[:, t+1] = state[:, t] + ds * dt
return state
class Morris_Lecar():
r"""
A 2D reduction of the Hodgkin-Huxley model to the phase plane.
"""
def __init__(self, G_na=120, G_k=36, G_l=0.3, E_na=50, E_k=-77, E_l=-54.4):
r"""
units are in mV, microS, nF, mA, ms
"""
self.G_na = G_na
self.G_k = G_k
self.G_l = G_l
self.E_na = E_na
self.E_k = E_k
self.E_l = E_l
def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000):
r"""
Integrate the HH dynamics, the state array (v, m, h, n) is represented by 4 floating-point values
:param int T: timesteps to run the simulation for
:param int runs: number of trials to run (I_ext and i.c. can differ per run)
:param np.array I_ext: external input current, with shape (runs, timesteps)
:param np.array ic: neuron initial conditions, with shape (runs, 4)
:returns: neuron state over the simulation
:rtype: np.array
"""
alpha_m = lambda V: (2.5-0.1*(V+65)) / (np.exp(2.5-0.1*(V+65)) -1)
beta_m = lambda V: 4.0 * np.exp(-(V+65)/18)
alpha_h = lambda V: 0.07 * np.exp(-(V+65)/20)
beta_h = lambda V: 1.0 / (np.exp(3.0-0.1*(V+65)) + 1)
alpha_n = lambda V: (0.1-0.01*(V+65)) / (np.exp(1-0.1*(V+65)) - 1)
beta_n = lambda V: 0.125 * np.exp(-(V+65)/80)
state = np.zeros((runs, T, 4)) # vector v, m, h, n
for k in range(runs):
state[k, 0, :] = ic[k, :]#[-6.49997224e+01, 5.29342176e-02, 5.96111046e-01, 3.17681168e-01]
ds = np.zeros((runs, 4))
iterator = tqdm(range(T-1))
for t in iterator:
ds[:, 0] = -(G_l*(state[:, t, 0] - E_l) + \
G_k*np.power(state[:, t, 3], 4)*(state[:, t, 0] - E_k) + \
G_na*np.power(state[:, t, 1], 3)*state[:, t, 2]*(state[:, t, 0] - E_na)) + I_ext[:, t]
ds[:, 1] = alpha_m(state[:, t, 0]) * (1 - state[:, t, 1]) - beta_m(state[:, t, 0]) * state[:, t, 1]
ds[:, 2] = alpha_h(state[:, t, 0]) * (1 - state[:, t, 2]) - beta_h(state[:, t, 0]) * state[:, t, 2]
ds[:, 3] = alpha_n(state[:, t, 0]) * (1 - state[:, t, 3]) - beta_n(state[:, t, 0]) * state[:, t, 3]
state[:, t+1] = state[:, t] + ds * dt
return state
def count_APs(V, lim=20.0):
r"""
Action potential counter
"""
idx = (V > lim).astype(float)
idf = np.diff(idx) == 1
return idf.sum()
# Integrate-and-fire models
class Izhikevich():
r"""
Biophysically inspired Izhikevich model (2003/2004) [1], a nonlinear integrate-and-fire model.
References:
[1]
"""
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
def euler_int(self, T, runs, I_ext, ic, dt=0.1, prin=1000):
r"""
Euler integration of the dynamics, with state array (v, u)
"""
state = np.zeros((runs, T, 2)) # vector v, u
spiketrain = np.zeros((runs, T))
reset_state = np.empty((runs, 2))
reset_state[:, 0].fill(self.c)
for k in range(runs):
state[k, 0, :] = ic[k, :]
ds = np.zeros((runs, 2))
iterator = tqdm(range(T-1))
for t in iterator:
ds[:, 0] = 0.04*state[:, t, 0]**2 + 5.*state[:, t, 0] + 140. - state[:, t, 1] + I_ext[:, t]
ds[:, 1] = self.a*(self.b*state[:, t, 0] - state[:, t, 1])
reset = (state[:, t, 0] >= 30.)
if reset.sum() > 0:
reset_state[:, 1] = (state[:, t, 1] + self.d)
state[:, t+1] = reset[:, None]*reset_state + (1-reset)[:, None]*(state[:, t] + ds * dt)
spiketrain[:, t+1] = reset
else:
state[:, t+1] = state[:, t] + ds * dt
return state, spiketrain
class AdExIF():
r"""
Adaptive exponential integrate-and-fire model. [1]
References:
[1] `Neuronal Dynamics`, <NAME>, <NAME>, <NAME> and <NAME>.
"""
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
def euler_int(self, T, runs, I_ext, ic, dt=0.001, prin=1000):
r"""
Euler integration of the dynamics, with state array (v, u)
"""
state =
|
np.zeros((runs, T, 2))
|
numpy.zeros
|
# coding=utf-8
from __future__ import (division, print_function, absolute_import,
unicode_literals)
'''
Chemical Evolution - chem_evol.py
Functionality
=============
This is the superclass inherited by the SYGMA and the OMEGA modules. It provides
common functions for initialization and for the evolution of one single timestep.
Made by
=======
MAY2015: <NAME>
The core of this superclass is a reorganization of the functions previously found in
earlier versions of SYGMA:
v0.1 NOV2013: <NAME>, <NAME>
v0.2 JAN2014: <NAME>
v0.3 APR2014: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME> & the
NuGrid collaboration
v0.4 FEB2015: <NAME>, <NAME>
v0.5 OCT2016: <NAME>, <NAME>, <NAME>
Stop keeking track of version from now on.
MARCH2018: <NAME>
- Switched to Python 3
- Capability to include radioactive isotopes
JULY2018: <NAME> & <NAME>o
- Re-wrote (improved) yield and lifetime treatment (B. Cote)
- PopIII IMF and yields update (R. Sarmento)
JAN2019: B. Cote
- Re-included radioactive isotopes with the new (improved) yield treatment
FEB2019: <NAME>, <NAME>
- Optimized to code to run faster (integration method)
Usage
=====
See sygma.py and omega.py
'''
# Standard packages
import numpy as np
import time as t_module
import copy
import os
import sys
import re
from pylab import polyfit
from scipy.integrate import quad
from scipy.integrate import dblquad
# Define where is the working directory
nupy_path = os.path.dirname(os.path.realpath(__file__))
# Import NuPyCEE codes
import NuPyCEE.read_yields as ry
class chem_evol(object):
'''
Input parameters (chem_evol.py)
================
special_timesteps : integer
Number of special timesteps. This option (already activated by default)
is activated when special_timesteps > 0. It uses a logarithm timestep
scheme which increases the duration of timesteps throughout the simulation.
Default value : 30
dt : float
Duration of the first timestep [yr] if special_timesteps is activated.
Duration of each timestep if special_timesteps is desactivated.
Default value : 1.0e6
tend : float
Total duration of the simulation [yr].
Default value : 13.0e9
dt_split_info : numpy array
Information regarding the creation of a timestep array with varying step size.
Array format : dt_split_info[number of conditions][0->dt,1->upper time limit]
Exemple : dt_split_info = [[1e6,40e6],[1e8,13e9]] means the timesteps will be
of 1 Myr until the time reaches 40 Myr, after which the timesteps
will be of 100 Myr until the time reaches 13 Gyr. The number of
"split" is unlimited, but the array must be in chronological order.
Default value : [] --> Not taken into account
imf_bdys : list
Upper and lower mass limits of the initial mass function (IMF) [Mo].
Default value : [0.1,100]
imf_yields_range : list
Initial mass of stars that contribute to stellar ejecta [Mo].
Default value : [1,30]
imf_type : string
Choices : 'salpeter', 'chabrier', 'kroupa', 'alphaimf', 'lognormal'
'alphaimf' creates a custom IMF with a single power-law covering imf_bdys.
'lognormal' creates an IMF of the form Exp[1/(2 1^2) Log[x/charMass]^2
Default value : 'kroupa'
alphaimf : float
Aplha index of the custom IMF, dN/dM = Constant * M^-alphaimf
Default value : 2.35
imf_bdys_pop3 : list
Upper and lower mass limits of the IMF of PopIII stars [Mo].
Default value : [0.1,100]
imf_yields_range_pop3 : list
Initial mass of stars that contribute to PopIII stellar ejecta [Mo].
PopIII stars ejecta taken from Heger et al. (2010)
Default value : [10,30]
imf_pop3_char_mass : float
The characteristic mass in a log normal IMF distribution.
Default value : 40.0
high_mass_extrapolation : string
Extrapolation technique used to extrapolate yields for stars more
massive than the most massive model (MMM) present in the yields table.
Choices:
"copy" --> This will apply the yields of the most massive model
to all more massive stars.
"scale" --> This will scale the yields of the most massive model
using the relation between the total ejected mass and
the initial stellar mass. The later relation is taken
from the interpolation of the two most massive models.
"extrapolate" --> This will extrapolate the yields of the most massive
model using the interpolation coefficients taken from
the interpolation of the two most massive models.
Default value : "copy"
iniZ : float
Initial metallicity of the gas in mass fraction (e.g. Solar Z = 0.02).
Choices : 0.0, 0.0001, 0.001, 0.006, 0.01, 0.02
(-1.0 to use non-default yield tables)
Default value : 0.0
Z_trans : float
Variable used when interpolating stellar yields as a function of Z.
Transition Z below which PopIII yields are used, and above which default
yields are used.
Default value : -1 (not active)
mgal : float
Initial mass of gas in the simulation [Mo].
Default value : 1.6e11
sn1a_on : boolean
True or False to include or exclude the contribution of SNe Ia.
Default value : True
sn1a_rate : string
SN Ia delay-time distribution function used to calculate the SN Ia rate.
Choices :
'power_law' - custom power law, set parameter with beta_pow (similar to Maoz & Mannucci 2012)
'gauss' - gaussian DTD, set parameter with gauss_dtd
'exp' - exponential DTD, set parameter with exp_dtd
'maoz' - specific power law from Maoz & Mannucci (2012)
Default value : 'power_law'
sn1a_energy : float
Energy ejected by single SNIa event. Units in erg.
Default value : 1e51
ns_merger_on : boolean
True or False to include or exclude the contribution of neutron star mergers.
Note : If t_nsm_coal or nsm_dtd_power is not used (see below), the delay time
distribution of neutron star mergers is given by the standard population synthesis
models of Dominik et al. (2012), using Z = 0.002 and Z = 0.02. In this case, the
total number of neutron star mergers can be tuned using f_binary and f_merger
(see below).
Default value : False
f_binary : float
Binary fraction for massive stars used to determine the total number of neutron
star mergers in a simple stellar population.
Default value : 1.0
f_merger : float
Fraction of massive star binary systems that lead to neutron star mergers in a
simple stellar population.
Default value : 0.0008
beta_pow : float
Slope of the power law for custom SN Ia rate, R = Constant * t^-beta_pow.
Default value : -1.0
gauss_dtd : list
Contains parameter for the gaussian DTD: first the characteristic time [yrs] (gaussian center)
and then the width of the distribution [yrs].
Default value : [3.3e9,6.6e8]
exp_dtd : float
Characteristic delay time [yrs] for the e-folding DTD.
nb_1a_per_m : float
Number of SNe Ia per stellar mass formed in a simple stellar population.
Default value : 1.0e-03
direct_norm_1a : float
Normalization coefficient for SNIa rate integral.
Default: deactived but replaces the usage of teh nb_1a_per_m when its value is larger than zero.
transitionmass : float
Initial mass which marks the transition from AGB to massive stars [Mo].
Default value : 8.0
exclude_masses : list
Contains initial masses in yield tables to be excluded from the simulation;
Default value : []
table : string
Path pointing toward the stellar yield tables for massive and AGB stars.
Default value : 'yield_tables/agb_and_massive_stars_nugrid_MESAonly_fryer12delay.txt' (NuGrid)
sn1a_table : string
Path pointing toward the stellar yield table for SNe Ia.
Default value : 'yield_tables/sn1a_t86.txt' (Tielemann et al. 1986)
nsmerger_table : string
Path pointing toward the r-process yield tables for neutron star mergers
Default value : 'yield_tables/r_process_rosswog_2014.txt' (Rosswog et al. 2013)
iniabu_table : string
Path pointing toward the table of initial abuncances in mass fraction.
Default value : 'yield_tables/iniabu/iniab2.0E-02GN93.ppn'
yield_tables_dir : string
Path to a custom directory that includes yields.
!! It needs to point to the directory where the yields directory is !!
This will bypass the default yields directory.
Default value : '' --> Deactivated
yield_interp : string
if 'None' : no yield interpolation, no interpolation of total ejecta
if 'lin' - Simple linear yield interpolation.
if 'wiersma' - Interpolation method which makes use of net yields
as used e.g. in Wiersma+ (2009); Does not require net yields.
if netyields_on is true than makes use of given net yields
else calculates net yields from given X0 in yield table.
Default : 'lin'
netyields_on : boolean
if true assumes that yields (input from table parameter)
are net yields.
Default : false.
total_ejecta_interp : boolean
if true then interpolates total ejecta given in yield tables
over initial mass range.
Default : True
stellar_param_on : boolean
if true reads in additional stellar parameter given in table stellar_param_table.
Default : true in sygma and false in omega
stellar_param_table: string
Path pointoing toward the table hosting the evolution of stellar parameter
derived from stellar evolution calculations.
Default table : 'yield_tables/isotope_yield_table_MESA_only_param.txt'
iolevel : int
Specifies the amount of output for testing purposes (up to 3).
Default value : 0
poly_fit_dtd : list
Array of polynomial coefficients of a customized delay-time distribution
function for SNe Ia. The polynome can be of any order.
Example : [0.2, 0.3, 0.1] for rate_snIa(t) = 0.2*t**2 + 0.3*t + 0.1
Note : Must be used with the poly_fit_range parameter (see below)
Default value : np.array([]) --> Deactivated
poly_fit_range : list --> [t_min,t_max]
Time range where the customized delay-time distribution function for
SNe Ia will be applied for a simple stellar population.
Default value : np.array([]) --> Deactivated
mass_sampled : list
Stellar masses that are sampled to eject yields in a stellar population.
Warning : The use of this parameter bypasses the IMF calculation and
do not ensure a correlation with the star formation rate. Each sampled
mass will eject the exact amount of mass give in the stellar yields.
Default value : np.array([]) --> Deactivated
scale_cor : 2D list
Determine the fraction of yields ejected for any given stellar mass bin.
Example : [ [1.0,8], [0.5,100] ] means that stars with initial mass between
0 and 8 Msu will eject 100% of their yields, and stars with initial mass
between 8 and 100 will eject 50% of their yields. There is no limit for
the number of [%,M_upper_limit] arrays used.
Default value : np.array([]) --> Deactivated
t_nsm_coal : float
When greater than zero, t_nsm_coal sets the delay time (since star formation)
after which all neutron star mergers occur in a simple stellar population.
Default value : -1 --> Deactivated
nsm_dtd_power : 3-index array --> [t_min, t_max, slope_of_the_power_law]
When used, nsm_dtd_power defines a delay time distribution for neutron
star mergers in the form of a power law, for a simple stellar population.
Exemple: [1.e7, 1.e10, -1.] --> t^-1 from 10 Myr to 10 Gyr
Default value : [] --> Deactivated
nb_nsm_per_m : float
Number of neutron star mergers per stellar mass formed in a simple
stellar population.
Note : This parameter is only considered when t_nsm_coal or nsm_dtd_power
is used to define the delay time of neutron star mergers.
Default value : -1 --> Deactivated
m_ej_nsm : float
Mass ejected per neutron star merger event.
Default value : 2.5e-02
Delayed extra source
Adding source that requires delay-time distribution (DTD) functions
-------------------------------------------------------------------
delayed_extra_dtd : multi-D Numpy array --> [nb_sources][nb_Z]
nb_sources is the number of different input astrophysical site (e.g.,
SNe Ia, neutron star mergers).
nb_Z is the number of available metallicities.
delayed_extra_dtd[i][j] is a 2D array in the form of
[ number_of_times ][ 0-time, 1-rate ].
Defalut value : np.array([]), deactivated
delayed_extra_dtd_norm : multi-D Numpy array --> [nb_sources]
Total number of delayed sources occurring per Msun formed,
for each source and each metallicity.
Defalut value : np.array([]), deactivated
delayed_extra_yields : Numpy array of strings
Path to the yields table for each source.
Defalut value : np.array([]), deactivated
delayed extra_yields_norm : multi-D Numpy array --> [nb_sources][nb_Z]
Fraction of the yield table (float) that will be ejected per event,
for each source and each metallicity. This will be the mass ejected
per event if the yields are in mass fraction (normalized to 1).
Defalut value : np.array([]), deactivated
delayed_extra_stochastic : Numpy array of Boolean --> [nb_sources]
Determine whether the DTD provided as an input needs to be
stochastically sampled using a Monte Carlo technique.
Defalut value : np.array([]), deactivated
Run example
===========
See sygma.py and omega.py
'''
##############################################
## Constructor ##
##############################################
def __init__(self, imf_type='kroupa', alphaimf=2.35, imf_bdys=[0.1,100], \
sn1a_rate='power_law', iniZ=0.02, dt=1e6, special_timesteps=30, \
nsmerger_bdys=[8, 100], tend=13e9, mgal=1.6e11, transitionmass=8, iolevel=0, \
ini_alpha=True, \
table='yield_tables/agb_and_massive_stars_nugrid_MESAonly_fryer12delay.txt', \
use_decay_module=False, f_network='isotopes_modified.prn', f_format=1, \
table_radio='', decay_file='', sn1a_table_radio='',\
bhnsmerger_table_radio='', nsmerger_table_radio='',\
hardsetZ=-1, sn1a_on=True, sn1a_table='yield_tables/sn1a_t86.txt',\
sn1a_energy=1e51, ns_merger_on=False, bhns_merger_on=False,\
f_binary=1.0, f_merger=0.0008, t_merger_max=1.3e10,\
m_ej_nsm = 2.5e-02, nb_nsm_per_m=-1.0, \
t_nsm_coal=-1.0, m_ej_bhnsm=2.5e-02, nsm_dtd_power=[],\
bhnsmerger_table = 'yield_tables/r_process_arnould_2007.txt', \
nsmerger_table = 'yield_tables/r_process_arnould_2007.txt',\
iniabu_table='', extra_source_on=False, \
extra_source_table=['yield_tables/extra_source.txt'], \
f_extra_source=[1.0], pre_calculate_SSPs=False,\
extra_source_mass_range=[[8,30]], \
extra_source_exclude_Z=[[]], radio_refinement=100, \
pop3_table='yield_tables/popIII_heger10.txt', \
imf_bdys_pop3=[0.1,100], imf_yields_range_pop3=[10,30], \
imf_pop3_char_mass=40.0,\
high_mass_extrapolation='copy',\
use_external_integration=False,\
starbursts=[], beta_pow=-1.0,gauss_dtd=[3.3e9,6.6e8],\
exp_dtd=2e9,nb_1a_per_m=1.0e-3,direct_norm_1a=-1,Z_trans=0.0, \
f_arfo=1, imf_yields_range=[1,30],exclude_masses=[],\
netyields_on=False,wiersmamod=False,yield_interp='lin',\
print_off=False, yield_tables_dir='',\
total_ejecta_interp=True, tau_ferrini=False,\
input_yields=False,t_merge=-1.0,stellar_param_on=False,\
stellar_param_table='yield_tables/stellar_feedback_nugrid_MESAonly.txt',\
popIII_info_fast=True, out_follows_E_rate=False, \
t_dtd_poly_split=-1.0, delayed_extra_log=False, \
delayed_extra_yields_log_int=False, \
delayed_extra_log_radio=False, delayed_extra_yields_log_int_radio=False, \
pritchet_1a_dtd=[], ism_ini=np.array([]), ism_ini_radio=np.array([]),\
nsmerger_dtd_array=np.array([]),\
bhnsmerger_dtd_array=np.array([]),\
ytables_in=np.array([]), zm_lifetime_grid_nugrid_in=np.array([]),\
isotopes_in=np.array([]), ytables_pop3_in=np.array([]),\
zm_lifetime_grid_pop3_in=np.array([]), ytables_1a_in=np.array([]),\
ytables_nsmerger_in=np.array([]), dt_in_SSPs=np.array([]),\
dt_in=np.array([]),dt_split_info=np.array([]),\
ej_massive=np.array([]), ej_agb=np.array([]),\
ej_sn1a=np.array([]), ej_massive_coef=np.array([]),\
ej_agb_coef=np.array([]), ej_sn1a_coef=np.array([]),\
dt_ssp=np.array([]), poly_fit_dtd_5th=np.array([]),\
mass_sampled_ssp=np.array([]), scale_cor_ssp=np.array([]),\
poly_fit_range=np.array([]), SSPs_in=np.array([]),\
delayed_extra_dtd=np.array([]), delayed_extra_dtd_norm=np.array([]), \
delayed_extra_yields=np.array([]), delayed_extra_yields_norm=np.array([]), \
delayed_extra_yields_radio=np.array([]), \
delayed_extra_yields_norm_radio=np.array([]), \
delayed_extra_stochastic=np.array([]), \
ytables_radio_in=np.array([]), radio_iso_in=np.array([]), \
ytables_1a_radio_in=np.array([]), ytables_nsmerger_radio_in=np.array([]),\
test_clayton=np.array([]), inter_Z_points=np.array([]),\
nb_inter_Z_points=np.array([]), y_coef_M=np.array([]),\
y_coef_M_ej=np.array([]), y_coef_Z_aM=np.array([]),\
y_coef_Z_bM=np.array([]), y_coef_Z_bM_ej=np.array([]),\
tau_coef_M=np.array([]), tau_coef_M_inv=np.array([]),\
tau_coef_Z_aM=np.array([]), tau_coef_Z_bM=np.array([]),\
tau_coef_Z_aM_inv=np.array([]), tau_coef_Z_bM_inv=np.array([]),\
y_coef_M_pop3=np.array([]), y_coef_M_ej_pop3=np.array([]),\
tau_coef_M_pop3=np.array([]), tau_coef_M_pop3_inv=np.array([]),\
inter_lifetime_points_pop3=np.array([]),\
inter_lifetime_points_pop3_tree=np.array([]),\
nb_inter_lifetime_points_pop3=np.array([]),\
inter_lifetime_points=np.array([]), inter_lifetime_points_tree=np.array([]),\
nb_inter_lifetime_points=np.array([]), nb_inter_M_points_pop3=np.array([]),\
inter_M_points_pop3_tree=np.array([]), nb_inter_M_points=
|
np.array([])
|
numpy.array
|
"""
An advancing front grid generator for use with unstructured_grid
Largely a port of paver.py.
"""
from __future__ import print_function
import math
import numpy as np
from collections import defaultdict
import time
from scipy import optimize as opt
import pdb
import logging
log=logging.getLogger(__name__)
from shapely import geometry
from . import (unstructured_grid,
exact_delaunay,
shadow_cdt)
from .. import utils
try:
import matplotlib.pyplot as plt
except ImportError:
log.warning("Plotting not available - no matplotlib")
plt=None
def circumcenter_py(p1,p2,p3):
""" Compute circumcenter of a single triangle using pure python.
For small input sizes, this is much faster than using the vectorized
numpy version in utils.
"""
ref = p1
p1x = 0
p1y = 0
p2x = p2[0] - ref[0]
p2y = p2[1] - ref[1]
p3x = p3[0] - ref[0]
p3y = p3[1] - ref[1]
# taken from TRANSFORMER_gang.f90
dd=2.0*((p1x-p2x)*(p1y-p3y) -(p1x-p3x)*(p1y-p2y))
b_com=p1x*p1x+p1y*p1y
b1=b_com-p2x*p2x-p2y*p2y
b2=b_com-p3x*p3x-p3y*p3y
# avoid division by zero is the points are collinear
dd=max(dd,1e-40)
return [ (b1*(p1y-p3y)-b2*(p1y-p2y))/dd + ref[0] ,
(b2*(p1x-p2x)-b1*(p1x-p3x))/dd + ref[1] ]
# from numba import jit, int32, float64
# @jit(nopython=True)
# @jit
# @jit(float64(float64[:],float64[:,:,:],float64),nopython=True)
def one_point_cost(pnt,edges,target_length=5.0):
# pnt is intended to complete a triangle with each
# pair of points in edges, and should be to the left
# of each edge
penalty = 0
max_angle = 85.0*np.pi/180.
# all_edges[triangle_i,{ab,bc,ca},{x,y}]
all_edges = np.zeros( (edges.shape[0], 3 ,2), np.float64 )
# get the edges:
all_edges[:,0,:] = edges[:,0] - pnt # ab
all_edges[:,1,:] = edges[:,1] - edges[:,0] # bc
all_edges[:,2,:] = pnt - edges[:,1] # ca
i = np.arange(3)
im1 = (i-1)%3
#--# cost based on angle:
abs_angles = np.arctan2( all_edges[:,:,1], all_edges[:,:,0] )
all_angles = (np.pi - (abs_angles[:,i] - abs_angles[:,im1]) % (2*np.pi)) % (2*np.pi)
if 1:
# 60 is what it's been for a while, but I think in one situation
# this put too much weight on small angles.
# tried considering just large angles, but that quickly blew up.
# even just changing this to 50 still blows up.
# how about a small tweak - s/60/58/ ??
worst_angle = np.abs(all_angles - 60*np.pi/180.).max()
alpha = worst_angle /(max_angle - 60*np.pi/180.0)
# 10**alpha: edges got very short...
# 5**alpha - 1: closer, but overall still short edges.
# alpha**5: angles look kind of bad
angle_penalty = 10*alpha**5
# Seems like it doesn't try hard enough to get rid of almost bad angles.
# in one case, there is a small angle of 33.86 degrees, and another angle
# of 84.26 degrees. so the cost function only cares about the small angle
# because it is slightly more deviant from 60deg, but we may be in a cell
# where the only freedom we have is to change the larger angles.
# so add this in:
if 1:
# extra exponential penalty for nearly bad triangles:
# These values mean that 3 degrees before the triangle is invalid
# the exponential cuts in and will add a factor of e by the time the
# triangles is invalid.
scale_rad = 3.0*np.pi/180. # radians - e-folding scale of the cost
# max_angle - 2.0*scale_rad works..
thresh = max_angle - 1.0*scale_rad # angle at which the exponential 'cuts in'
big_angle_penalty = np.exp( (all_angles.max() - thresh) / scale_rad)
else:
alphas = (all_angles - 60*np.pi/180.) / (max_angle - 60*np.pi/180.)
alphas = 10*alphas**4
angle_penalty = alphas.sum()
penalty += angle_penalty + big_angle_penalty
#--# Length penalties:
if 1:
ab_lens = (all_edges[:,0,:]**2).sum(axis=1)
ca_lens = (all_edges[:,2,:]**2).sum(axis=1)
min_ab=ab_lens.min() # min(ab_lens)
min_ca=ca_lens.min() # min(ca_lens)
else:
# maybe better for numba?
min_ab=np.inf
min_ca=np.inf
for idx in range(edges.shape[0]):
l_ab=(all_edges[idx,0,:]**2).sum()
l_ca=(all_edges[idx,2,:]**2).sum()
if l_ab<min_ab:
min_ab=l_ab
if l_ca<min_ca:
min_ca=l_ca
# had been using ab_lens.min(), but numba didn't like that.
# okay - the problem is that numba doesn't understand the sum
# above, and thinks that ab_lens is a scalar.
min_len = min( min_ab,min_ca )
max_len = max( min_ab,min_ca )
undershoot = target_length**2 / min_len
overshoot = max_len / target_length**2
length_penalty = 0
length_factor = 2
length_penalty += length_factor*(max(undershoot,1) - 1)
length_penalty += length_factor*(max(overshoot,1) - 1)
# paver had two other approachs, effectively commented out
penalty += length_penalty
return penalty
class Curve(object):
"""
Boundaries which can be open or closed, indexable
by a floating point value (including modulo arithmetic).
By default, indexes by distance along each segment.
"""
class CurveException(Exception):
pass
def __init__(self,points,closed=True,ccw=None):
"""
points: [N,2]
closed: if True, treat this as a closed ring
ccw: if True, make sure the order is ccw,
False - make sure cw
None - leave as is.
"""
if ccw is not None:
area=utils.signed_area(points)
if (area>0) != bool(ccw):
points=points[::-1,:]
self.points=np.asarray(points)
self.closed=bool(closed)
if self.closed:
if np.all(self.points[0]==self.points[-1]):
pass # already duplicated
else:
self.points = np.concatenate( (self.points,
self.points[:1,:] ) )
else:
assert not np.all(self.points[0]==self.points[-1])
self.distances=utils.dist_along(self.points)
def __call__(self,f,metric='distance'):
if metric=='distance':
if self.closed:
# wraps around
# double mod in case f==-eps
f=(f % self.distances[-1]) % self.distances[-1]
# side='right' ensures that f=0 works
# it's unfortunately possible to get f=-eps, which rounds in
# a way such that (f % distances[-1]) == distances[-1]
# the double mod above might solve that
idxs=np.searchsorted(self.distances,f,side='right') - 1
assert not np.any( f>self.distances[-1] ),"Curve: Range or round off problem"
idxs=idxs.clip(0,len(self.distances)-2) # to be sure equality doesn't push us off the end
alphas = (f - self.distances[idxs]) / (self.distances[idxs+1]-self.distances[idxs])
if not np.isscalar(alphas):
alphas = alphas[:,None]
return (1-alphas)*self.points[idxs] + alphas*self.points[idxs+1]
else:
assert False
def total_distance(self):
return self.distances[-1]
def upsample(self,scale,return_sources=False):
"""
return_sources: return a second array having the distance values for each
return point, if this is true.
"""
# def upsample_linearring(points,density,closed_ring=1,return_sources=False):
new_segments = []
sources = []
for i,(A,B) in enumerate(zip( self.points[:-1,:],
self.points[1:,:] ) ):
l = utils.dist(B-A)
local_scale = scale( 0.5*(A+B) )
npoints = max(1,round( l/local_scale ))
alphas = np.arange(npoints) / float(npoints)
alphas=alphas[:,None]
new_segment = (1.0-alphas)*A + alphas*B
new_segments.append(new_segment)
if return_sources:
sources.append(self.distances[i] + alphas*l)
new_points = np.concatenate( new_segments )
if return_sources:
sources = np.concatenate(sources)
return new_points,sources
else:
return new_points
def distance_away(self,anchor_f,signed_distance,rtol=0.05):
""" Find a point on the curve signed_distance away from the
point corresponding to anchor_f, within the given relative tolerance.
returns new_f,new_x.
If a point could not be found within the requested tolerance, raises
a self.CurveException.
"""
sign=int(np.sign(signed_distance))
abs_dist=np.abs(signed_distance)
anchor_pnt=self(anchor_f)
anchor_idx_a=np.searchsorted(self.distances,anchor_f,side='right') - 1
anchor_idx_b=(anchor_idx_a+1)%(len(self.points)-1)
if sign<0:
anchor_idx_a,anchor_idx_b=anchor_idx_b,anchor_idx_a
# How many segment of the curve are we willing to examine? all of them,
# but no more.
Npnts=len(self.points)-1 # duplicate for closed ring
max_segs=Npnts
for segi in range(max_segs):
idxa=anchor_idx_a+sign*segi
idxb=idxa+sign # +-1
idxa=idxa%Npnts
idxb=idxb%Npnts
if segi==0:
# only care about the portion of the first segment
# "ahead" of anchor (TODO: handle sign<0)
pnta=anchor_pnt
else:
pnta=self.points[idxa]
pntb=self.points[idxb]
dista=utils.dist(pnta - anchor_pnt)
distb=utils.dist(pntb - anchor_pnt)
# as written, this may bail out of the iteration with an
# inferior solution (i.e. stop when the error is 5%, rather
# than go to the next segment where we could get an exact
# answer). It's not too bad though.
if (dista<(1-rtol)*abs_dist) and (distb<(1-rtol)*abs_dist):
# No way this segment is good.
continue
else:
break
else:
# i.e. checked everybody, could never get far enough
# away
raise self.CurveException("Could not get far enough away")
assert dista<distb
assert dista<(1+rtol)*abs_dist
assert distb>(1-rtol)*abs_dist
if segi==0:
close_f=anchor_f
else:
close_f=self.distances[idxa]
far_f=self.distances[idxb]
if sign*far_f<sign*close_f:
far_f+=sign*self.distances[-1]
# explicitly check the far end point
if abs(distb-abs_dist) / abs_dist < rtol:
# good enough
result=far_f,self(far_f)
else:
# if there are large disparities in adjacent edge lengths
# it's possible that it takes many iterations here.
for maxit in range(20):
mid_f=0.5*(close_f+far_f)
pnt_mid=self(mid_f)
dist_mid=utils.dist(pnt_mid - anchor_pnt)
rel_err = (dist_mid-abs_dist)/abs_dist
if rel_err < -rtol:
close_f=mid_f
elif rel_err > rtol:
far_f=mid_f
else:
result=mid_f,pnt_mid
break
else:
assert False
return result
def point_to_f(self,x,f_start=0,direction=1,rel_tol=1e-4):
"""
Return the ring_f which yields a point close to x.
This scans the points in the curve, starting with f_start
and proceeding in the given direction.
if direction is 0, both directions will be attempted and
the first valid result returned.
rel_tol: stop when a point is found within rel_tol*len(segment)
of a segment
"""
# Walk along the curve, looking for a segment which approximately
# contains x.
# Have to be careful about exact matches. distances[i] should always
# yield idx_start=i.
# But anything in between depends on the direction
if direction==1:
idx_start=np.searchsorted(self.distances,f_start,side='right') - 1
elif direction==-1:
idx_start=np.searchsorted(self.distances,f_start,side='left')
elif direction==0:
# try either, accept any hit.
try:
return self.point_to_f(x,f_start=f_start,direction=1,rel_tol=rel_tol)
except self.CurveException:
return self.point_to_f(x,f_start=f_start,direction=-1,rel_tol=rel_tol)
else:
raise Exception("direction must be +-1")
# Start traversing the segments:
seg_idx_a=idx_start
for i in range(len(self.points)): # max possible traversal
seg_idx_b=seg_idx_a + direction
# Wrapping
if seg_idx_b<0:
if self.closed:
seg_idx_b += len(self.points)
else:
break
if seg_idx_b>=len(self.points):
if self.closed:
seg_idx_b -= len(self.points)
else:
break
seg=self.points[ [seg_idx_a,seg_idx_b] ]
seg_len=utils.dist(seg[0],seg[1])
dist,alpha = utils.point_segment_distance(x,seg,return_alpha=True)
if dist/seg_len < rel_tol:
# How to get to an f from this?
new_f=self.distances[seg_idx_a] + direction*alpha*seg_len
if not self.closed:
new_f=max(0,min(new_f,self.distances[-1]))
return new_f
seg_idx_a=seg_idx_b
raise self.CurveException("Failed to find a point within tolerance")
def is_forward(self,fa,fb,fc):
""" return true if fa,fb and fc are distinct and
ordered CCW around the curve
"""
if fa==fb or fb==fc or fc==fa:
return False
if self.closed:
d=self.total_distance()
return ((fb-fa) % d) < ((fc-fa)%d)
else:
return fa<fb<fc
# return ( (fb-fa) < (fc-fa) )
def is_reverse(self,fa,fb,fc):
# for closed curves, is_reverse=not is_forward, but
# for open curves, that's not necessarily true.
# when degenerate situations are included, then they
# are not opposites even for closed curves.
if fa==fb or fb==fc or fc==fa:
return False
if self.closed:
d=self.total_distance()
return ((fb-fc) % d) < ((fa-fc)%d)
else:
# return (fa-fb) < (fa-fc)
return fc<fb<fa
def is_ordered(self,fa,fb,fc):
"""
Non-robust check for fb falling between fc. For a closed
curve, this resorts to the heuristic of whether fb falls
between fa and fc on the shorter way around.
"""
if self.closed:
tdist=self.total_distance()
if (fa-fc) % tdist < tdist/2:
if self.is_forward(fc,fb,fa):
return True
else:
if self.is_forward(fa,fb,fc):
return True
return False
else:
return (fa<fb<fc) or (fa>fb>fc)
def signed_area(self):
assert self.closed
return utils.signed_area(self.points)
def reverse(self):
return Curve(points=self.points[::-1,:],
closed=self.closed)
def plot(self,ax=None,**kw):
ax=ax or plt.gca()
return ax.plot(self.points[:,0],self.points[:,1],**kw)[0]
def internal_angle(A,B,C):
BA=A-B
BC=C-B
theta_BA = np.arctan2( BA[1], BA[0] )
theta_BC = np.arctan2( BC[1], BC[0] )
return (theta_BA - theta_BC) % (2*np.pi)
class StrategyFailed(Exception):
pass
class Strategy(object):
def metric(self,site,scale_factor):
assert False
def execute(self,site):
"""
Apply this strategy to the given Site.
Returns a dict with nodes,cells which were modified
"""
assert False
class WallStrategy(Strategy):
"""
Add two edges and a new triangle to the forward side of the
site.
"""
def __str__(self):
return "<Wall>"
def metric(self,site):
# rough translation from paver
theta=site.internal_angle * 180/np.pi
scale_factor = site.edge_length / site.local_length
# Wall can be applied in a wide variety of situations
# angles greater than 90, Wall may be the only option
# angles less than 60, and we can't do a wall.
# np.clip( (120 - theta) / 30, 0,np.inf)
# at 90, we can try, but a bisect would be better.
# at 180, this is the only option.
return (180-theta) / 180
def execute(self,site):
na,nb,nc= site.abc
grid=site.grid
b,c = grid.nodes['x'][ [nb,nc] ]
bc=c-b
new_x = b + utils.rot(np.pi/3,bc)
nd=grid.add_node(x=new_x,fixed=site.af.FREE)
# new_c=grid.add_cell_and_edges( [nb,nc,nd] )
j0=grid.nodes_to_edge(nb,nc)
unmesh2=[grid.UNMESHED,grid.UNMESHED]
# the correct unmeshed will get overwritten in
# add cell.
j1=grid.add_edge(nodes=[nc,nd],cells=unmesh2)
j2=grid.add_edge(nodes=[nb,nd],cells=unmesh2)
new_c=grid.add_cell(nodes=[nb,nc,nd],
edges=[j0,j1,j2])
return {'nodes': [nd],
'cells': [new_c] }
class BisectStrategy(Strategy):
"""
Add three edges and two new triangles.
"""
def __str__(self):
return "<Bisect>"
def metric(self,site):
# rough translation from paver
theta=site.internal_angle * 180/np.pi
scale_factor = site.edge_length / site.local_length
# Ideal is 120 degrees for a bisect
# Can't bisect when it's nearing 180.
if theta> 2*89:
return np.inf # not allowed
else:
ideal=120 + (1-scale_factor)*30
return np.abs( (theta-ideal)/ 50 ).clip(0,1)
def execute(self,site):
na,nb,nc= site.abc
grid=site.grid
b,c = grid.nodes['x'][ [nb,nc] ]
bc=c-b
new_x = b + utils.rot(np.pi/3,bc)
nd=grid.add_node(x=new_x,fixed=site.af.FREE)
# new_c=grid.add_cell_and_edges( [nb,nc,nd] )
j_ab=grid.nodes_to_edge(na,nb)
j_bc=grid.nodes_to_edge(nb,nc)
unmesh2=[grid.UNMESHED,grid.UNMESHED]
# the correct unmeshed will get overwritten in
# add cell.
j_cd=grid.add_edge(nodes=[nc,nd],cells=unmesh2)
j_bd=grid.add_edge(nodes=[nb,nd],cells=unmesh2)
j_ad=grid.add_edge(nodes=[na,nd],cells=unmesh2)
new_c1=grid.add_cell(nodes=[nb,nc,nd],
edges=[j_bc,j_cd,j_bd])
new_c2=grid.add_cell(nodes=[na,nb,nd],
edges=[j_ab,j_bd,j_ad])
return {'nodes': [nd],
'cells': [new_c1,new_c2],
'edges': [j_cd,j_bd,j_ad] }
class ResampleStrategy(Strategy):
""" TESTING: resample one step beyond.
"""
def __str__(self):
return "<Resample>"
def nodes_beyond(self,site):
he=site.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
pre_a=he.rev().node_rev()
post_c=he.fwd().fwd().node_fwd()
return pre_a,post_c
def distances(self,site):
"return pair of distances from the site to next node"
pre_a,post_c = self.nodes_beyond(site)
p_pa,p_a,p_c,p_pc=site.grid.nodes['x'][ [pre_a,
site.abc[0],
site.abc[2],
post_c] ]
dists=[utils.dist( p_pa - p_a ),
utils.dist( p_c - p_pc )]
return dists
def metric(self,site):
dists=self.distances(site)
# return a good low score when those distances are short relative
# scale
scale=site.local_length
return min( dists[0]/scale,dists[1]/scale )
def execute(self,site):
grid=site.grid
scale=site.local_length
metric0=self.metric(site)
def maybe_resample(n,anchor,direction):
if n in site.abc:
# went too far around! Bad!
return n
# Is this overly restrictive? What if the edge is nice
# and long, and just wants a node in the middle?
# That should be allowed, until there is some way of annotating
# edges as rigid.
# But at the moment that breaks things.
# it shouldn't though. And the checks here duplicate checks in
# af.resample(). So skip the test, and go for it.
# if grid.nodes['fixed'][n] in [site.af.HINT,site.af.SLIDE]:
try:
n=site.af.resample(n=n,anchor=anchor,scale=scale,
direction=direction)
except Curve.CurveException as exc:
pass
return n
# execute one side at a time, since it's possible for a
# resample on one side to reach into the other side.
he=site.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
pre_a=he.rev().node_rev()
new_pre_a=maybe_resample(pre_a,site.abc[0],-1)
post_c=he.fwd().fwd().node_fwd()
new_post_c=maybe_resample(post_c,site.abc[2],1)
metric=self.metric(site)
if metric>metric0:
# while other nodes may have been modified, these are
# the ones still remaining, and even these are probably of
# no use for optimization. may change this to report no
# optimizable items
return {'nodes':[new_pre_a,new_post_c]}
else:
log.warning("Resample made no improvement (%f => %f)"%(metric0,metric))
raise StrategyFailed("Resample made no improvement")
class CutoffStrategy(Strategy):
def __str__(self):
return "<Cutoff>"
def metric(self,site):
theta=site.internal_angle
scale_factor = site.edge_length / site.local_length
# Cutoff wants a small-ish internal angle
# If the sites edges are long, scale_factor > 1
# and we'd like to be making smaller edges, so ideal angle gets smaller
#
if theta> 89*np.pi/180:
return np.inf # not allowed
else:
ideal=60 + (1-scale_factor)*30
return np.abs(theta - ideal*np.pi/180.)
def execute(self,site):
grid=site.grid
na,nb,nc=site.abc
j0=grid.nodes_to_edge(na,nb)
j1=grid.nodes_to_edge(nb,nc)
j2=grid.nodes_to_edge(nc,na)
if j2 is None:
# typical, but if we're finishing off the last triangle, this edge
# exists.
j2=grid.add_edge(nodes=[nc,na],cells=[grid.UNMESHED,grid.UNMESHED])
c=site.grid.add_cell(nodes=site.abc,
edges=[j0,j1,j2])
return {'cells':[c] }
class JoinStrategy(Strategy):
"""
Given an inside angle, merge the two edges
"""
def __str__(self):
return "<Join>"
def metric(self,site):
theta=site.internal_angle
scale_factor = site.edge_length / site.local_length
# Cutoff wants a small-ish internal angle
# If the sites edges are long, scale_factor > 1
# and we'd like to be making smaller edges, so ideal angle gets smaller
#
if theta> 89*np.pi/180:
return np.inf # not allowed
else:
# as theta goes to 0, a Join has no effect on scale.
#
# at larger theta, a join effectively coarsens
# so if edges are too small, we want to coarsen, scale_factor
# will be < 1
# adding the factor of 2: it was choosing join too often.
return 2*scale_factor * theta
def execute(self,site):
grid=site.grid
na,nb,nc=site.abc
# special case, when na and nc share a second common neighbor,
# forming a quad, that neighbor will be kept in nd
nd=None
# choose the node to move -
mover=None
j_ac=grid.nodes_to_edge(na,nc)
j_ac_oring=0
if j_ac is not None:
# special case: nodes are already joined, but there is no
# cell.
# this *could* be extended to allow the deletion of thin cells,
# but I don't want to get into that yet (since it's modification,
# not creation)
if (grid.edges['cells'][j_ac,0] >=0) or (grid.edges['cells'][j_ac,1]>=0):
raise StrategyFailed("Edge already has real cells")
# remember for tests below:
j_ac_oring=grid.edges['oring'][j_ac]
grid.delete_edge(j_ac)
j_ac=None
# a previous version only checked fixed against HINT and SLIDE
# when the edge j_ac existed. Why not allow this comparison
# even when j_ac doesn't exist?
# need to be more careful than that, though. The only time it's okay
# for a SLIDE or HINT to be the mover is if anchor is on the same ring,
# and the path between them is clear, which means b cannot be on that
# ring.
if grid.nodes['fixed'][na]==site.af.FREE:
mover=na
anchor=nc
elif grid.nodes['fixed'][nc]==site.af.FREE:
mover=nc
anchor=na
elif grid.nodes['oring'][na]>0 and grid.nodes['oring'][nc]>0:
# *might* be legal but requires more checks:
ring=grid.nodes['oring'][na]
if ring!=grid.nodes['oring'][nc]: # this can maybe get relaxed to join onto a fixed node on multiple rings
raise StrategyFailed("Cannot join across rings")
if grid.nodes['oring'][nb]==ring:
# This original check is too lenient. in a narrow
# channel, it's possible to have the three nodes
# on the same ring, straddling the channel, and this
# may allow for a join across the channel.
# # this is a problem if nb falls in between them.
# fa,fb,fc=grid.nodes['ring_f'][ [na,nb,nc] ]
# curve=site.af.curves[ring-1]
#
# if curve.is_ordered(fa,fb,fc):
# raise StrategyFailed("Cannot join across middle node")
# instead, check for an edge between a and c.
if j_ac_oring!=ring:
raise StrategyFailed("Cannot join non-adjacent along ring")
# probably okay, not sure if there are more checks to attempt
if grid.nodes['fixed'][na]==site.af.HINT:
mover,anchor=na,nc
else:
mover,anchor=nc,na
else:
raise StrategyFailed("Neither node can be moved")
he_ab=grid.nodes_to_halfedge(na,nb)
he_da=he_ab.rev()
pre_a=he_da.node_rev()
he_bc=he_ab.fwd()
he_cd=he_bc.fwd()
post_c=he_cd.node_fwd()
if pre_a==post_c:
log.info("Found a quad - proceeding carefully with nd")
nd=pre_a
# figure out external cell markers before the half-edges are invalidated.
# note the cell index on the outside of mover, and record half-edges
# for the anchor side
if mover==na:
cell_opp_mover=he_ab.cell_opp()
cell_opp_dmover=he_da.cell_opp()
he_anchor=he_bc
he_danchor=he_cd
else:
cell_opp_mover=he_bc.cell_opp()
cell_opp_dmover=he_cd.cell_opp()
he_anchor=he_ab
he_danchor=he_da
edits={'cells':[],'edges':[] }
cells_to_replace=[]
def archive_cell(c):
cells_to_replace.append( (c,grid.cells[c].copy()) )
grid.delete_cell(c)
edges_to_replace=[]
def archive_edge(j):
for c in grid.edges['cells'][j]:
if c>=0:
archive_cell(c)
edges_to_replace.append( (j,grid.edges[j].copy()) )
grid.delete_edge(j)
for j in list(grid.node_to_edges(mover)):
archive_edge(j)
grid.delete_node(mover)
for j,data in edges_to_replace:
nodes=data['nodes']
for i in [0,1]:
if nodes[i]==mover:
if (nodes[1-i]==nb) or (nodes[1-i]==nd):
nodes=None # signal that we don't add it
else:
nodes[i]=anchor
break
if nodes is not None:
# need to remember boundary, but any real
# cells get added in the next step, so can
# be -2 here.
cells=data['cells']
if cells[0]>=0:
cells[0]=-2
if cells[1]>=0:
cells[1]=-2
# This can raise Collinear exceptions
# also, it's possible that one of these edges will be a dupe,
# in the case of a quad
try:
# fairly sure there are tests above which prevent
# this from having to populate additional fields, but
# not positive. 2018-02-26: need to think about oring.
jnew=grid.add_edge( nodes=nodes, cells=cells,
oring=data['oring'],ring_sign=data['ring_sign'],
fixed=data['fixed'] )
except exact_delaunay.ConstraintCollinearNode:
raise StrategyFailed("Edge was collinear with existing nodes")
edits['edges'].append(jnew)
for c,data in cells_to_replace:
nodes=data['nodes']
for ni,n in enumerate(nodes):
if n==mover:
nodes[ni]=anchor
cnew=grid.add_cell(nodes=nodes)
edits['cells'].append(cnew)
if cell_opp_mover<0: # need to update boundary markers
j_cells=grid.edges['cells'][he_anchor.j,:].copy()
j_cells[he_anchor.orient]=cell_opp_mover
grid.modify_edge(he_anchor.j,cells=j_cells)
if nd is not None and cell_opp_dmover<0:
j_cells=grid.edges['cells'][he_danchor.j,:].copy()
j_cells[he_danchor.orient]=cell_opp_dmover
grid.modify_edge(he_danchor.j,cells=j_cells)
# This check could also go in unstructured_grid, maybe optionally?
areas=grid.cells_area()
if np.any( areas[edits['cells']]<=0.0 ):
raise StrategyFailed("Join created non-positive area cells")
return edits
class NonLocalStrategy(Strategy):
"""
Add an edge to a nearby, but not locally connected, element.
Currently, this is not very strong in identifying whether a
nearby node.
"""
def __str__(self):
return "<Nonlocal>"
def nonlocal_pair(self,site):
"""
Nonlocal nodes for a site
"""
af=site.af
best_pair=None,None
best_dist=np.inf
# skip over neighbors of any of the sites nodes
# take any neighbors in the DT.
each_dt_nbrs=[af.cdt.delaunay_neighbors(n) for n in site.abc]
if 1:
# filter out neighbors which are not within the 'sector'
# defined by the site.
apnt,bpnt,cpnt=af.grid.nodes['x'][site.abc]
ba_angle=np.arctan2(apnt[1] - bpnt[1],
apnt[0] - bpnt[0])
bc_angle=np.arctan2(cpnt[1] - bpnt[1],
cpnt[0] - bpnt[0])
old_each_dt_nbrs=each_dt_nbrs
each_dt_nbrs=[]
for nbrs in old_each_dt_nbrs:
nbrs_pnts=af.grid.nodes['x'][nbrs]
diffs=nbrs_pnts - bpnt
angles=np.arctan2(diffs[:,1], diffs[:,0])
# want to make sure that the angles from b to a,nbr,c
# are consecutive
angle_sum = (angles-bc_angle)%(2*np.pi) + (ba_angle-angles)%(2*np.pi)
valid=(angle_sum < 2*np.pi)
each_dt_nbrs.append(nbrs[valid])
each_nbrs=[af.grid.node_to_nodes(n) for n in site.abc]
# flat list of grid neighbors. note that since a-b-c are connected,
# this will include a,b,c, too.
if 0:
all_nbrs=[n for l in each_nbrs for n in l]
else:
all_nbrs=list(site.abc) # the way it's written, only c will be
# picked up by the loops below.
# HERE - this needs to go back to something similar to the old
# code, where the neighbors to avoid are defined by being connected
# along local edges within the given straight-line distance.
he0=af.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
for incr,node,ref_pnt in [ (lambda x: x.rev(),
lambda x: x.node_rev(),
apnt), # walk along b->a
(lambda x: x.fwd(),
lambda x: x.node_fwd(),
cpnt)]: # walk along b->c
trav=incr(he0)
while trav!=he0: # in case of small loops
ntrav=node(trav)
# some decision here about whether to calculate straight line
# distance from a or b, and whether the threshold is
# local_length or some factor thereof
straight_dist=utils.dist(af.grid.nodes['x'][ntrav] - ref_pnt)
if straight_dist > 1.0*site.local_length:
break
all_nbrs.append(ntrav)
trav=incr(trav)
for n,dt_nbrs in zip(site.abc,each_dt_nbrs):
# DBG: maybe only DT neighbors of 'b' can be considered?
# when considering 'a' and 'c', too many possibilities
# of extraneous connections, which in the past were ruled
# out based on looking only at 'b', and by more explicitly
# enumerating local connections
if n!=site.abc[1]:
continue # TESTING
# most of those we are already connected to, weed them out.
good_nbrs=[nbr
for nbr in dt_nbrs
if nbr not in all_nbrs]
if not good_nbrs:
continue
dists=[utils.dist(af.grid.nodes['x'][n] - af.grid.nodes['x'][nbr])
for nbr in good_nbrs]
idx=np.argmin(dists)
if dists[idx]<best_dist:
best_dist=dists[idx]
best_pair=(n,good_nbrs[idx])
# is the best nonlocal node connection good enough?
# not worrying about angles, just proximity
return best_pair[0],best_pair[1],best_dist
def metric(self,site):
# something high if it's bad.
# 0.0 if it looks good
site_node,nonlocal_node,dist = self.nonlocal_pair(site)
scale=site.local_length
if site_node is not None:
# score it such that if the nonlocal connection is
# less than or equal to the target scale away, then
# it gets the highest score, and linearly increasing
# based on being longer than that.
# This may reach too far in some cases, and will need to be
# scaled or have a nonlinear term.
return max(0.0, (dist - scale)/scale)
else:
return np.inf
def execute(self,site):
# as much as it would be nice to blindly execute these
# things, the current state of the cost functions means
# that a really bad nonlocal may not show up in the cost
# function, and that means that best_child() will get tricked
# So until there is a better cost function, this needs to
# be more careful about which edges it will attempt
if self.metric(site) > 0.75:
raise StrategyFailed("NonLocal: too far away")
site_node,nonlocal_node,dist = self.nonlocal_pair(site)
if site_node is None:
raise StrategyFailed()
grid=site.grid
j=grid.add_edge(nodes=[site_node,nonlocal_node],
cells=[grid.UNMESHED,grid.UNMESHED])
return {'nodes': [],
'cells': [],
'edges': [j] }
Wall=WallStrategy()
Cutoff=CutoffStrategy()
Join=JoinStrategy()
Bisect=BisectStrategy()
NonLocal=NonLocalStrategy()
Resample=ResampleStrategy()
class Site(object):
"""
represents a potential location for advancing the front.
"""
def __init__(self):
pass
def metric(self):
""" Smaller number means more likely to be chosen.
"""
assert False
def actions(self):
return []
class FrontSite(object):
resample_status=None
def metric(self):
assert False
def plot(self,ax=None):
assert False
def actions(self):
assert False
class TriangleSite(FrontSite):
"""
When adding triangles, the heuristic is to choose
tight locations.
"""
def __init__(self,af,nodes):
self.af=af
self.grid=af.grid
assert len(nodes)==3
self.abc = nodes
def metric(self):
return self.internal_angle
def points(self):
return self.grid.nodes['x'][ self.abc ]
@property
def internal_angle(self):
A,B,C = self.points()
return internal_angle(A,B,C)
@property
def edge_length(self):
return utils.dist( np.diff(self.points(),axis=0) ).mean()
@property
def local_length(self):
scale = self.af.scale
return scale( self.points().mean(axis=0) )
def plot(self,ax=None):
ax=ax or plt.gca()
points=self.grid.nodes['x'][self.abc]
return ax.plot( points[:,0],points[:,1],'r-o' )[0]
def actions(self):
theta=self.internal_angle
return [Wall,Cutoff,Join,Bisect,NonLocal,Resample]
def resample_neighbors(self):
""" may update site! used to be part of AdvancingFront, but
probably better here, as part of the site.
"""
a,b,c = self.abc
local_length = self.af.scale( self.points().mean(axis=0) )
grid=self.af.grid
self.resample_status=True
if self.grid.nodes['fixed'][b] == self.af.HINT:
self.grid.modify_node(b,fixed=self.af.SLIDE)
for n,direction in [ (a,-1),
(c,1) ]:
# used to check for SLIDE and degree
# not sure whether we should let SLIDE through...
# probably want to relax this to allow for subdividing
# long edges if the edge itself is not RIGID. But
# we still avoid FREE nodes, since they are not on the boundary
# and cannot be resampled
if grid.nodes['fixed'][n] in [self.af.HINT,self.af.SLIDE,self.af.RIGID]:
try:
n_res=self.af.resample(n=n,anchor=b,scale=local_length,direction=direction)
except Curve.CurveException as exc:
self.resample_status=False
n_res=n
if n!=n_res:
log.info("resample_neighbors changed a node")
if n==a:
self.abc[0]=n_res
else:
self.abc[2]=n_res
n=n_res # so that modify_node below operates on the right one.
# is this the right time to change the fixed status?
if grid.nodes['fixed'][n] == self.af.HINT:
grid.modify_node(n,fixed=self.af.SLIDE)
return self.resample_status
# without a richer way of specifying the scales, have to start
# with marked edges
class QuadCutoffStrategy(Strategy):
def metric(self,site):
# how to get scale here?
# FIX
return 1.0 # ?
def execute(self,site):
"""
Apply this strategy to the given Site.
Returns a dict with nodes,cells which were modified
"""
nodes=[site.abcd[0],site.abcd[3]]
j=site.grid.nodes_to_edge(nodes)
if j is None: # typ. case
# Set cells to unmeshed, and one will be overwritten by add_cell.
j=site.grid.add_edge(nodes=nodes,
para=site.grid.edges['para'][site.js[1]],
cells=[site.grid.UNMESHED,site.grid.UNMESHED])
else:
log.info("Cutoff found edge %d already exists"%j)
cnew=site.grid.add_cell(nodes=site.abcd)
return {'edges': [j],
'cells': [cnew] }
QuadCutoff=QuadCutoffStrategy()
class QuadSite(FrontSite):
def __init__(self,af,nodes):
self.af=af
self.grid=af.grid
assert len(nodes)==4
self.abcd = nodes
self.js=[ self.grid.nodes_to_edge(nodes[:2]),
self.grid.nodes_to_edge(nodes[1:3]),
self.grid.nodes_to_edge(nodes[2:])]
def metric(self):
return 1.0 # ?
def points(self):
return self.grid.nodes['x'][ self.abcd ]
# def internal_angle(self): ...
# def edge_length(self): ...
# def local_length(self): ...
def plot(self,ax=None):
ax=ax or plt.gca()
points=self.grid.nodes['x'][self.abcd]
return ax.plot( points[:,0],points[:,1],'r-o' )[0]
def actions(self):
return [QuadCutoff] # ,FloatLeft,FloatRight,FloatBoth,NonLocal?]
def resample_neighbors(self):
""" may update site!
if resampling failed, returns False. It's possible that some
nodes have been updated, but no guarantee that they are as far
away as requested.
this is where HINT nodes which part of the site are set to SLIDE nodes.
"""
a,b,c,d = self.abcd
print("call to QuadSite: resample_neighbors, %d %d %d %d"%(a,b,c,d))
# could extend to something more dynamic, like triangle does
local_para=self.af.para_scale
local_perp=self.af.perp_scale
g=self.af.grid
if g.edges['para'][self.js[1]] == self.af.PARA:
scale=local_perp
else:
scale=local_para
for n in [b,c]:
if self.grid.nodes['fixed'][n] == self.af.HINT:
self.grid.modify_node(n,fixed=self.af.SLIDE)
self.resample_status=True
for n,anchor,direction in [ (a,b,-1),
(d,c,1) ]:
# this used to check SLIDE and degree
# not sure if we should let SLIDE through now...
if self.grid.nodes['fixed'][n] in [self.af.HINT,self.af.SLIDE]:
try:
n_res=self.af.resample(n=n,anchor=anchor,scale=scale,direction=direction)
except Curve.CurveException as exc:
log.warning("Unable to resample neighbors")
self.resample_status=False
continue
# is this the right time to change the fixed status?
if self.grid.nodes['fixed'][n_res] == self.af.HINT:
self.grid.modify_node(n_res,fixed=self.af.SLIDE)
if n!=n_res:
log.info("resample_neighbors changed a node")
if n==a:
self.abcd[0]=n_res
else:
self.abcd[3]=n_res
return self.resample_status
class AdvancingFront(object):
"""
Implementation of advancing front
"""
grid=None
cdt=None
# 'fixed' flags:
# in order of increasing degrees of freedom in its location.
# don't use 0 here, so that it's easier to detect uninitialized values
UNSET=0
RIGID=1 # should not be moved at all
SLIDE=2 # able to slide along a ring.
FREE=3 # not constrained
HINT=4 # slidable and can be removed.
StrategyFailed=StrategyFailed
def __init__(self,grid=None):
"""
"""
self.log = logging.getLogger("AdvancingFront")
if grid is None:
grid=unstructured_grid.UnstructuredGrid()
self.grid = self.instrument_grid(grid)
self.curves=[]
def add_curve(self,curve=None,interior=None,nodes=None,closed=True):
"""
Add a Curve, upon which nodes can be slid.
curve: [N,2] array of point locations, or a Curve instance.
interior: true to force this curve to be an island.
nodes: use existing nodes, given by the indices here.
Any node which is already part of another ring will be set to RIGID,
but will retain its original oring.
The nodes must have existing edges connecting them, and those edges
will be assigned to this ring via edges['oring'] and ['ring_sign']
"""
if nodes is not None:
curve=self.grid.nodes['x'][nodes]
if not isinstance(curve,Curve):
if interior is not None:
ccw=not interior
else:
ccw=None
curve=Curve(curve,ccw=ccw,closed=closed)
elif interior is not None:
assert curve.closed
a=curve.signed_area()
if a>0 and interior:
curve=curve.reverse()
self.curves.append( curve )
oring=len(self.curves) # 1-based
if nodes is not None:
# Update nodes to be on this curve:
on_a_ring=self.grid.nodes['oring'][nodes]>0
self.grid.nodes['oring'][nodes[~on_a_ring]]=oring
# curve.distances has an extra entry when a closed loop
self.grid.nodes['ring_f'][nodes[~on_a_ring]]=curve.distances[:len(nodes)][~on_a_ring]
self.grid.nodes['fixed'][nodes[~on_a_ring]]=self.HINT
self.grid.nodes['fixed'][nodes[on_a_ring]]=self.RIGID
# And update the edges, too:
if closed:
pairs=utils.circular_pairs(nodes)
else:
pairs=zip(nodes[:-1],nodes[1:])
for a,b in pairs:
j=self.grid.nodes_to_edge([a,b])
self.grid.edges['oring'][j]=oring
if self.grid.edges['nodes'][j,0]==a:
self.grid.edges['ring_sign'][j]=1
elif self.grid.edges['nodes'][j,0]==b: # little sanity check
self.grid.edges['ring_sign'][j]=-1
else:
assert False,"Failed invariant"
return oring-1
def instrument_grid(self,g):
"""
Add fields to the given grid to support advancing front
algorithm. Modifies grid in place, and returns it.
Also creates a Triangulation which follows modifications to
the grid, keeping a constrained Delaunay triangulation around.
"""
# oring is stored 1-based, so that the default 0 value is
# indicates no data / missing.
g.add_node_field('oring',np.zeros(g.Nnodes(),'i4'),on_exists='pass')
g.add_node_field('fixed',np.zeros(g.Nnodes(),'i1'),on_exists='pass')
g.add_node_field('ring_f',-1*np.ones(g.Nnodes(),'f8'),on_exists='pass')
# track a fixed field on edges, too, as it is not always sufficient
# to tag nodes as fixed, since a long edge between two fixed nodes may
# or may not be subdividable. Note that for edges, we are talking about
# topology, not the locations, since locations are part of nodes.
# for starters, support RIGID (cannot subdivide) and 0, meaning no
# additional information beyond existing node and topological constraints.
g.add_edge_field('fixed',np.zeros(g.Nedges(),'i1'),on_exists='pass')
# if nonzero, which curve this edge follows
g.add_edge_field('oring',np.zeros(g.Nedges(),'i4'),on_exists='pass')
# if oring nonzero, then +1 if n1=>n2 is forward on the curve, -1
# otherwise
g.add_edge_field('ring_sign',np.zeros(g.Nedges(),'i1'),on_exists='pass')
# Subscribe to operations *before* they happen, so that the constrained
# DT can signal that an invariant would be broken
self.cdt=self.shadow_cdt_factory(g)
return g
def shadow_cdt_factory(self,g):
try:
klass=shadow_cdt.ShadowCGALCDT
except AttributeError:
klass=shadow_cdt.ShadowCDT
return klass(g)
def initialize_boundaries(self):
for curve_i,curve in enumerate(self.curves):
curve_points,srcs=curve.upsample(self.scale,return_sources=True)
# add the nodes in:
# used to initialize as SLIDE
nodes=[self.grid.add_node(x=curve_points[j],
oring=curve_i+1,
ring_f=srcs[j],
fixed=self.HINT)
for j in range(len(curve_points))]
if curve.closed:
Ne=len(curve_points)
else:
Ne=len(curve_points) - 1
pairs=zip( np.arange(Ne),
(np.arange(Ne)+1)%Ne)
for na,nb in pairs:
self.grid.add_edge( nodes=[nodes[na],nodes[nb]],
cells=[self.grid.UNMESHED,
self.grid.UNDEFINED],
oring=curve_i+1,
ring_sign=1 )
def enumerate_sites(self):
raise Exception("Implement in subclass")
def choose_site(self):
sites=self.enumerate_sites()
if len(sites):
scores=[ site.metric()
for site in sites ]
best=np.argmin( scores )
return sites[best]
else:
return None
def free_span(self,he,max_span,direction):
"""
returns the distance, and the nodes making up the
span, starting from anchor (the rev node of he),
and going until either max_span distance is found,
it wraps around, or encounters a non-SLIDE-able node.
the reason this works with halfedges is that we only
move along nodes which are simply connected (degree 2)
TODO: this reports along edge distances, but it's
used (exclusively?) in walking along boundaries which
might be resampled. It would be better to look at
the distance in discrete jumps.
"""
span=0.0
if direction==1:
trav0=he.node_fwd()
anchor=he.node_rev()
else:
trav0=he.node_rev()
anchor=he.node_fwd()
last=anchor
trav=trav0
nodes=[last] # anchor is included
def pred(n):
# N.B. possible for trav0 to be SLIDE
degree=self.grid.node_degree(n)
return ( (n==trav0) or (self.grid.nodes['fixed'][n]==self.HINT)) and (degree==2)
while pred(trav) and (trav != anchor) and (span<max_span):
span += utils.dist( self.grid.nodes['x'][last] -
self.grid.nodes['x'][trav] )
nodes.append(trav)
if direction==1:
he=he.fwd()
last,trav = trav,he.node_fwd()
elif direction==-1:
he=he.rev()
last,trav = trav,he.node_rev()
else:
assert False
# could use some loop retrofitting..
span += utils.dist( self.grid.nodes['x'][last] -
self.grid.nodes['x'][trav] )
nodes.append(trav)
return span,nodes
max_span_factor=4
def resample(self,n,anchor,scale,direction):
"""
move/replace n, such that from anchor to n/new_n the edge
length is close to scale.
If n has more than 2 neighbors, does nothing and returns n as is.
Used to assume that n was SLIDE or HINT. Now checks for either
nodes['fixed'][n] in (SLIDE,HINT), or that the edge can be subdivided.
normally, a SLIDE node cannot be deleted. in some cases resample will
create a new node for n, and it will be a SLIDE node. in that case, should
n retain SLIDE, too? is it the responsibility of resample(), or the caller?
can we at least guarantee that no other nodes need to be changing status?
in the past, new nodes created here were given fixed=SLIDE. This is
probably better set to HINT, as the SLIDE nodes can get in the way if
they aren't used immediately for a cell.
Returns the resampled node index -- often same as n, but may be a different
node.
"""
#self.log.debug("resample %d to be %g away from %d in the %s direction"%(n,scale,anchor,
# direction) )
if direction==1: # anchor to n is t
he=self.grid.nodes_to_halfedge(anchor,n)
elif direction==-1:
he=self.grid.nodes_to_halfedge(n,anchor)
else:
assert False
n_deg=self.grid.node_degree(n)
# must be able to either muck with n, or split the anchor-n edge
# in the past we assumed that this sort of check was already done
j=he.j
edge_resamplable=( (self.grid.edges['fixed'][he.j]!=self.RIGID)
and (self.grid.edges['cells'][j,0]<0)
and (self.grid.edges['cells'][j,1]<0) )
# node_resamplable=(n_deg==2) and (self.grid.nodes['fixed'][n] in [self.HINT,self.SLIDE])
# it's possible to have a node that, based on the above test, is resamplable,
# but the edge is not (because the edge test includes the possibility of
# a cell on the opposite side).
#if not (node_resamplable or edge_resamplable):
if not edge_resamplable:
self.log.debug("Edge and node are RIGID/deg!=2, no resampling possible")
return n
span_length,span_nodes = self.free_span(he,self.max_span_factor*scale,direction)
# anchor-n distance should be in there, already.
# self.log.debug("free span from the anchor is %g"%span_length)
if span_length < self.max_span_factor*scale:
n_segments = max(1,round(span_length / scale))
target_span = span_length / n_segments
else:
target_span=scale
n_segments = None
def handle_one_segment():
# this is a function because there are two times
# (one proactive, one reactive) it might get used below.
# in tight situations, need to make sure
# that for a site a--b--c we're not trying
# move c all the way on top of a.
# it is not sufficient to just force two
# segments, as that just pushes the issue into
# the next iteration, but in an even worse state.
if direction==-1:
he_other=he.fwd()
opposite_node=he_other.node_fwd()
else:
he_other=he.rev()
opposite_node=he_other.node_rev()
if opposite_node==span_nodes[-1]:
# self.log.info("n_segment=1, but that would be an implicit join")
# rather than force two segments, force it
# to remove all but the last edge.
del span_nodes[-1]
# self.log.debug("Only space for 1 segment")
for d in span_nodes[1:-1]:
cp=self.grid.checkpoint()
try:
self.grid.merge_edges(node=d)
except self.cdt.IntersectingConstraints as exc:
self.log.info("handle_one_segment: cut short by exception")
self.grid.revert(cp)
# only got that far..
return d
return span_nodes[-1]
if n_segments==1:
return handle_one_segment()
# first, find a point on the original ring which satisfies the target_span
anchor_oring=self.grid.nodes['oring'][anchor]-1
n_oring=self.grid.nodes['oring'][n]-1
oring=self.grid.edges['oring'][j]-1
# Default, may be overwritten below
anchor_f = self.grid.nodes['ring_f'][anchor]
n_f = self.grid.nodes['ring_f'][n]
if anchor_oring != oring:
self.log.warning('resample: anchor on different rings. Cautiously resample')
if n_oring==oring:
f_start=n_f # can use n to speed up point_to_f
else:
f_start=0.0 # not valid, so full search in point_to_f
anchor_f = self.curves[oring].point_to_f(self.grid.nodes['x'][anchor],
n_f,
direction=0)
if n_oring != oring:
# anchor_f is valid regardless of its original oring
n_f = self.curves[oring].point_to_f(self.grid.nodes['x'][n],
anchor_f,
direction=0)
# Easing into use of explicit edge orings
assert oring==self.grid.edges['oring'][j]-1
curve = self.curves[oring]
# at any point might encounter a node from a different ring, but want
# to know it's ring_f for this ring.
def node_f(m):
# first two cases are partially to be sure that equality comparisons will
# work.
if m==n:
return n_f
elif m==anchor:
return anchor_f
elif self.grid.nodes['oring'][m]==oring+1:
return self.grid.nodes['ring_f'][m]
else:
return curve.point_to_f(self.grid.nodes['x'][m],
n_f,direction=0)
if 0: # delete this once the new stanza below is trusted
# explicitly record whether the curve has the opposite orientation
# of the edge. Hoping to retire this way.
# This is actually dangerous, as the mid_point does not generally
# fall on the line, and so we have to give it a generous rel_tol.
mid_point = 0.5*(self.grid.nodes['x'][n] + self.grid.nodes['x'][anchor])
mid_f=self.curves[oring].point_to_f(mid_point)
if curve.is_forward(anchor_f,mid_f,n_f):
curve_direction=1
else:
curve_direction=-1
if 1: # "new" way
# logic is confusing
edge_ring_sign=self.grid.edges['ring_sign'][he.j]
curve_direction=(1-2*he.orient)*direction*edge_ring_sign
#assert new_curve_direction==curve_direction
assert edge_ring_sign!=0,"Edge %d has sign %d, should be set"%(he.j,edge_ring_sign)
# a curve forward that bakes in curve_direction
if curve_direction==1:
rel_curve_fwd=lambda a,b,c: curve.is_forward(a,b,c)
else:
rel_curve_fwd=lambda a,b,c: curve.is_reverse(a,b,c)
try:
new_f,new_x = curve.distance_away(anchor_f,curve_direction*target_span)
except Curve.CurveException as exc:
raise
# it's possible that even though the free_span distance yielded
# n_segments>1, distance_away() went too far since it cuts out some
# curvature in the along-curve distance.
# this leads to a liability that new_f is beyond span_nodes[-1], and
# we should follow the same treatment as above for n_segments==1
end_span_f=node_f(span_nodes[-1])
# 2018-02-13: hoping this also changes to curve_direction
if ( rel_curve_fwd(anchor_f,end_span_f,new_f)
and end_span_f!=anchor_f):
self.log.warning("n_segments=%s, but distance_away blew past it"%n_segments)
return handle_one_segment()
# check to see if there are other nodes in the way, and remove them.
# in the past, this started with the node after n, deleting things up
# to, and *including* a node at the location where we want n to be.
# in simple cases, it would be better to delete n, and only move the
# last node. But there is a chance that n cannot be deleted, more likely
# that n cannot be deleted than later nodes. However... free_span
# would not allow those edges, so we can assume anything goes here.
eps=0.001*target_span
nodes_to_delete=[]
trav=he
while True:
# start with the half-edge from anchor to n
# want to loop until trav.node_fwd() (for direction=1)
# is at or beyond our target, and all nodes from n
# until trav.node_rev() are in the list nodes_to_delete.
if direction==1:
n_trav=trav.node_fwd()
else:
n_trav=trav.node_rev()
f_trav=node_f(n_trav)
# EPS needs some TLC here. The corner cases have not been
# sufficiently take care of, i.e. new_f==f_trav, etc.
if rel_curve_fwd(anchor_f, new_f+curve_direction*eps, f_trav ):
break
# that half-edge wasn't far enough
nodes_to_delete.append(n_trav)
if direction==1:
trav=trav.fwd()
else:
trav=trav.rev()
# sanity check.
if trav==he:
self.log.error("Made it all the way around!")
raise Exception("This is probably bad")
# either n was already far enough, in which case we should split
# this edge, or there are some nodes in nodes_to_delete.
# the last of those nodes will be saved, and become the new n
if len(nodes_to_delete):
nnew=nodes_to_delete.pop()
# slide, because it needs to move farther out
method='slide'
else:
# because n is already too far
method='split'
nnew=n
# Maybe better to fix the new node with any sliding necessary,
# and then delete these, but that would require more checks to
# see if it's safe to reposition the node?
for d in nodes_to_delete:
cp=self.grid.checkpoint()
try:
self.grid.merge_edges(node=d)
except self.cdt.IntersectingConstraints as exc:
self.log.info("resample: had to stop short due to intersection")
self.grid.revert(cp)
return d
# on the other hand, it may be that the next node is too far away, and it
# would be better to divide the edge than to shift a node from far away.
# also possible that our neighbor was RIGID and can't be shifted
cp=self.grid.checkpoint()
try:
if method=='slide':
self.grid.modify_node(nnew,x=new_x,ring_f=new_f)
assert self.grid.nodes['oring'][nnew]==oring+1
else: # 'split'
j=self.grid.nodes_to_edge([anchor,nnew])
# get a newer nnew
# This used to set fixed=SLIDE, but since there is no additional
# topology attached to nnew, it probably makes more sense for it
# to be HINT. changed 2018-02-26
jnew,nnew = self.grid.split_edge(j,x=new_x,ring_f=new_f,oring=oring+1,
fixed=self.HINT)
except self.cdt.IntersectingConstraints as exc:
self.log.info("resample - slide() failed. will return node at original loc")
self.grid.revert(cp)
return nnew
def resample_neighbors(self,site):
return site.resample_neighbors()
def cost_function(self,n):
raise Exception("Implement in subclass")
def eval_cost(self,n):
fn=self.cost_function(n)
return (fn and fn(self.grid.nodes['x'][n]))
def optimize_nodes(self,nodes,max_levels=3,cost_thresh=2):
"""
iterate over the given set of nodes, optimizing each location,
and possibly expanding the set of nodes in order to optimize
a larger area.
"""
max_cost=0
for level in range(max_levels):
for n in nodes:
# relax_node can return 0 if there was no cost
# function to optimize
max_cost=max(max_cost,self.relax_node(n) or 0.0)
if max_cost <= cost_thresh:
break
if level==0:
# just try re-optimizing once
pass
else:
# pass
# expand list of nodes one level
new_nodes=set(nodes)
for n in nodes:
new_nodes.update(self.grid.node_to_nodes(n))
nodes=list(new_nodes)
def optimize_edits(self,edits,**kw):
"""
Given a set of elements (which presumably have been modified
and need tuning), jostle nodes around to improve the cost function
Returns an updated edits with any additional changes. No promise
that it's the same object or a copy.
"""
if 'nodes' not in edits:
edits['nodes']=[]
nodes = list(edits.get('nodes',[]))
for c in edits.get('cells',[]):
for n in self.grid.cell_to_nodes(c):
if n not in nodes:
nodes.append(n)
def track_node_edits(g,func_name,n,**k):
if n not in edits['nodes']:
edits['nodes'].append(n)
self.grid.subscribe_after('modify_node',track_node_edits)
self.optimize_nodes(nodes,**kw)
self.grid.unsubscribe_after('modify_node',track_node_edits)
return edits
def relax_node(self,n):
""" Move node n, subject to its constraints, to minimize
the cost function. Return the final value of the cost function
"""
# self.log.debug("Relaxing node %d"%n)
if self.grid.nodes['fixed'][n] == self.FREE:
return self.relax_free_node(n)
elif self.grid.nodes['fixed'][n] == self.SLIDE:
return self.relax_slide_node(n)
else:
# Changed to silent pass because ResampleStrategy currently
# tells the truth about nodes it moves, even though they
# are HINT nodes.
# raise Exception("relax_node with fixed=%s"%self.grid.nodes['fixed'][n])
return 0.0
def relax_free_node(self,n):
cost=self.cost_function(n)
if cost is None:
return None
x0=self.grid.nodes['x'][n]
local_length=self.scale( x0 )
new_x = opt.fmin(cost,
x0,
xtol=local_length*1e-4,
disp=0)
dx=utils.dist( new_x - x0 )
# self.log.debug('Relaxation moved node %f'%dx)
cp=self.grid.checkpoint()
try:
if dx !=0.0:
self.grid.modify_node(n,x=new_x)
return cost(new_x)
except self.cdt.IntersectingConstraints as exc:
self.grid.revert(cp)
self.log.info("Relaxation caused intersection, reverting")
return cost(x0)
def relax_slide_node(self,n):
cost_free=self.cost_function(n)
if cost_free is None:
return
x0=self.grid.nodes['x'][n]
f0=self.grid.nodes['ring_f'][n]
ring=self.grid.nodes['oring'][n]-1
assert np.isfinite(f0)
assert ring>=0
local_length=self.scale( x0 )
slide_limits=self.find_slide_limits(n,3*local_length)
# used to just be f, but I think it's more appropriate to
# be f[0]
def cost_slide(f):
# lazy bounded optimization
f=f[0]
fclip=np.clip(f,*slide_limits)
err=(f-fclip)**2
return err+cost_free( self.curves[ring](fclip) )
new_f = opt.fmin(cost_slide,
[f0],
xtol=local_length*1e-4,
disp=0)
if not self.curves[ring].is_forward(slide_limits[0],
new_f,
slide_limits[1]):
# Would be better to just optimize within bounds.
# still, can check the two bounds, and if the
# cost is lower, return one of them.
self.log.warning("Slide went outside limits")
base_cost=cost_free(x0)
slide_length=(slide_limits[1] - slide_limits[0])
lower_f=0.95*slide_limits[0]+0.05*slide_limits[1]
upper_f=0.05*slide_limits[0]+0.95*slide_limits[1]
lower_cost=cost_slide([lower_f])
upper_cost=cost_slide([upper_f])
if lower_cost<upper_cost and lower_cost<base_cost:
self.log.warning("Truncate slide on lower end")
new_f=[lower_f]
elif upper_cost<base_cost:
new_f=[upper_f]
self.log.warning("Truncate slide on upper end")
else:
self.log.warning("Couldn't truncate slide.")
return base_cost
cp=self.grid.checkpoint()
try:
if new_f[0]!=f0:
self.slide_node(n,new_f[0]-f0)
return cost_slide(new_f)
except self.cdt.IntersectingConstraints as exc:
self.grid.revert(cp)
self.log.info("Relaxation caused intersection, reverting")
return cost_free(x0)
def node_ring_f(self,n,ring0):
"""
return effective ring_f for node n in terms of ring0.
if that's the native ring for n, just return ring_f,
otherwise calculates where n would fall on ring0
"""
if self.grid.nodes['oring'][n]-1==ring0:
return self.grid.nodes['ring_f'][n]
else:
return self.curves[ring0].point_to_f(self.grid.nodes['x'][n])
def find_slide_limits(self,n,cutoff=None):
""" Returns the range of allowable ring_f for n.
limits are exclusive
cutoff: a distance along the curve beyond which we don't
care. note that this is not as the crow flies, but tracing
the segments. So a point which is cutoff away may be much
closer as the crow flies.
"""
n_ring=self.grid.nodes['oring'][n]-1
n_f=self.grid.nodes['ring_f'][n]
curve=self.curves[n_ring]
L=curve.total_distance()
# find our two neighbors on the ring:check forward:
nbrs=[]
for nbr in self.grid.node_to_nodes(n):
j=self.grid.nodes_to_edge([n,nbr])
j_ring=self.grid.edges['oring'][j]
if j_ring==0:
continue
assert j_ring-1==n_ring
# The test below is not robust with intersecting curves,
# and is why edges have to track their own ring.
#if self.grid.nodes['oring'][nbr]-1!=n_ring:
# continue
nbrs.append(nbr)
# With the above check on edge oring, this should not be necessary.
# if len(nbrs)>2:
# # annoying, but happens. one or more edges are internal,
# # and two are along the curve.
# nbrs.append(n)
# # sort them along the ring - HERE this logic is likely not robust for open curves
# all_f=(self.grid.nodes['ring_f'][nbrs]-n_f) % L
# order=np.argsort(all_f)
# nbrs=[ nbrs[order[-1]], nbrs[order[1]] ]
assert len(nbrs)==2
if curve.is_forward(self.node_ring_f(nbrs[0],n_ring),
n_f,
self.node_ring_f(nbrs[1],n_ring) ):
pass # already in nice order
else:
nbrs=[nbrs[1],nbrs[0]]
# Backward then forward
stops=[]
for sgn,nbr in zip( [-1,1], nbrs ):
trav=[n,nbr]
while 1:
# beyond cutoff?
if ( (cutoff is not None) and
(sgn*(self.node_ring_f(trav[1],n_ring) - n_f) )%L > cutoff ):
break
# is trav[1] something which limits the sliding of n?
trav_nbrs=self.grid.node_to_nodes(trav[1])
# if len(trav_nbrs)>2:
# break
# if self.grid.nodes['fixed'][trav[1]] != self.SLIDE:
# break
# the transition to HINT
if self.grid.nodes['fixed'][trav[1]] != self.HINT:
break
for nxt in trav_nbrs:
if nxt not in trav:
break
# before updating, check to see if this edge has
# a cell on it. If it does, then even if the node is degree
# 2, we can't slide through it.
j=self.grid.nodes_to_edge( [trav[1],nxt] )
j_c=self.grid.edges['cells'][j]
if j_c[0]>=0 or j_c[1]>=0:
# adjacent cells, can't slide through here.
break
trav=[trav[1],nxt]
stops.append(trav[1])
limits=[self.node_ring_f(m,n_ring)
for m in stops]
# make sure limits are monotonic increasing. for circular,
# this may require bumping up
if curve.closed and (limits[0]>limits[1]):
limits[1] += curve.total_distance()
assert limits[0] < limits[1]
return limits
def find_slide_conflicts(self,n,delta_f):
""" Find nodes in the way of sliding node n
to a new ring_f=old_oring_f + delta_f.
N.B. this does not appear to catch situations
where n falls exactly on an existing node, though
it should (i.e. it's a bug)
"""
n_ring=self.grid.nodes['oring'][n]-1
n_f=self.grid.nodes['ring_f'][n]
new_f=n_f + delta_f
curve=self.curves[n_ring]
# Want to find edges in the direction of travel
# it's a little funny to use half-edges, since what
# really care about is what it's facing
# would like to use half-edges here, but it's not entirely
# well-defined, so rather than introduce some future pitfalls,
# do things a bit more manually.
to_delete=[]
for nbr in self.grid.node_to_nodes(n):
if self.grid.nodes['oring'][nbr]-1!=n_ring:
continue
nbr_f=self.node_ring_f(nbr,n_ring)
if self.grid.node_degree(nbr)!=2:
continue
if delta_f>0:
# either the nbr is outside our slide area, or could
# be in the opposite direction along the ring
if not curve.is_forward(n_f,nbr_f,n_f+delta_f):
continue
to_delete.append(nbr)
he=self.grid.nodes_to_halfedge(n,nbr)
while 1:
he=he.fwd()
nbr=he.node_fwd()
nbr_f=self.node_ring_f(nbr,n_ring)
if curve.is_forward(n_f,n_f+delta_f,nbr_f):
break
to_delete.append(nbr)
break
else:
if not curve.is_reverse(n_f,nbr_f,n_f+delta_f):
continue
to_delete.append(nbr)
he=self.grid.nodes_to_halfedge(nbr,n)
while 1:
he=he.rev()
nbr=he.node_rev()
nbr_f=self.node_ring_f(nbr,n_ring)
if curve.is_reverse(n_f,n_f+delta_f,nbr_f):
break
to_delete.append(nbr)
break
# sanity checks:
for nbr in to_delete:
assert n_ring==self.grid.nodes['oring'][nbr]-1
# OLD COMMENT:
# For now, depart a bit from paver, and rather than
# having HINT nodes, HINT and SLIDE are both fixed=SLIDE,
# but differentiate based on node degree.
# NEW COMMENT:
# actually, that was a bad idea. better to stick with
# how it was in paver
assert self.grid.nodes['fixed'][nbr]==self.HINT # SLIDE
assert self.grid.node_degree(nbr)==2
return to_delete
def slide_node(self,n,delta_f):
conflicts=self.find_slide_conflicts(n,delta_f)
for nbr in conflicts:
self.grid.merge_edges(node=nbr)
n_ring=self.grid.nodes['oring'][n]-1
n_f=self.grid.nodes['ring_f'][n]
new_f=n_f + delta_f
curve=self.curves[n_ring]
self.grid.modify_node(n,x=curve(new_f),ring_f=new_f)
loop_count=0
def loop(self,count=0):
while 1:
site=self.choose_site()
if site is None:
break
if not self.advance_at_site(site):
self.log.error("Failed to advance. Exiting loop early")
return False
count-=1
self.loop_count+=1
if count==0:
break
return True
def advance_at_site(self,site):
# This can modify site! May also fail.
resampled_success = self.resample_neighbors(site)
actions=site.actions()
metrics=[a.metric(site) for a in actions]
bests=np.argsort(metrics)
for best in bests:
try:
cp=self.grid.checkpoint()
self.log.info("Chose strategy %s"%( actions[best] ) )
edits=actions[best].execute(site)
opt_edits=self.optimize_edits(edits)
failures=self.check_edits(opt_edits)
if len(failures['cells'])>0:
self.log.info("Some cells failed")
raise StrategyFailed("Cell geometry violation")
# could commit?
except self.cdt.IntersectingConstraints as exc:
# arguably, this should be caught lower down, and rethrown
# as a StrategyFailed.
self.log.error("Intersecting constraints - rolling back")
self.grid.revert(cp)
continue
except StrategyFailed as exc:
self.log.error("Strategy failed - rolling back")
self.grid.revert(cp)
continue
break
else:
self.log.error("Exhausted the actions!")
return False
return True
def check_edits(self,edits):
return defaultdict(list)
zoom=None
def plot_summary(self,ax=None,
label_nodes=True,
clip=None):
ax=ax or plt.gca()
ax.cla()
for curve in self.curves:
curve.plot(ax=ax,color='0.5',lw=0.4,zorder=-5)
self.grid.plot_edges(ax=ax,clip=clip,lw=1)
if label_nodes:
labeler=lambda ni,nr: str(ni)
else:
labeler=None
self.grid.plot_nodes(ax=ax,labeler=labeler,clip=clip,sizes=10)
ax.axis('equal')
if self.zoom:
ax.axis(self.zoom)
class AdvancingTriangles(AdvancingFront):
"""
Specialization which roughly mimics tom, creating only triangles
"""
scale=None
def __init__(self,grid=None,scale=None):
super(AdvancingTriangles,self).__init__(grid=grid)
if scale is not None:
self.set_edge_scale(scale)
def set_edge_scale(self,scale):
self.scale=scale
def enumerate_sites(self):
sites=[]
# FIX: This doesn't scale!
valid=(self.grid.edges['cells'][:,:]==self.grid.UNMESHED)
J,Orient = np.nonzero(valid)
for j,orient in zip(J,Orient):
if self.grid.edges['deleted'][j]:
continue
he=self.grid.halfedge(j,orient)
he_nxt=he.fwd()
a=he.node_rev()
b=he.node_fwd()
bb=he_nxt.node_rev()
c=he_nxt.node_fwd()
assert b==bb
sites.append( TriangleSite(self,nodes=[a,b,c]) )
return sites
def check_edits(self,edits):
"""
edits: {'nodes':[n1,n2,...],
'cells': ...,
'edges': ... }
Checks for any elements which fail geometric checks, such
as orthogonality.
"""
failures=defaultdict(list)
cells=set( edits.get('cells',[]) )
for n in edits.get('nodes',[]):
cells.update( self.grid.node_to_cells(n) )
for c in list(cells):
pnts=self.grid.nodes['x'][self.grid.cell_to_nodes(c)]
cc=circumcenter_py(pnts[0],pnts[1],pnts[2])
if not self.grid.cell_polygon(c).contains(geometry.Point(cc)):
failures['cells'].append(c)
return failures
cost_method='cc_py'
def cost_function(self,n):
"""
Return a function which takes an x,y pair, and evaluates
a geometric cost function for node n based on the shape and
scale of triangle cells containing n
"""
local_length = self.scale( self.grid.nodes['x'][n] )
my_cells = self.grid.node_to_cells(n)
if len(my_cells) == 0:
return None
cell_nodes = [self.grid.cell_to_nodes(c)
for c in my_cells ]
# for the moment, can only deal with triangles
cell_nodes=np.array(cell_nodes)
# pack our neighbors from the cell list into an edge
# list that respects the CCW condition that pnt must be on the
# left of each segment
for j in range(len(cell_nodes)):
if cell_nodes[j,0] == n:
cell_nodes[j,:2] = cell_nodes[j,1:]
elif cell_nodes[j,1] == n:
cell_nodes[j,1] = cell_nodes[j,0]
cell_nodes[j,0] = cell_nodes[j,2] # otherwise, already set
edges = cell_nodes[:,:2]
edge_points = self.grid.nodes['x'][edges]
def cost(x,edge_points=edge_points,local_length=local_length):
return one_point_cost(x,edge_points,target_length=local_length)
Alist=[ [ e[0],e[1] ]
for e in edge_points[:,0,:] ]
Blist=[ [ e[0],e[1] ]
for e in edge_points[:,1,:] ]
EPS=1e-5*local_length
def cost_cc_and_scale_py(x0):
C=list(x0)
cc_cost=0
scale_cost=0
for A,B in zip(Alist,Blist):
tri_cc=circumcenter_py(A,B,C)
deltaAB=[ tri_cc[0] - A[0],
tri_cc[1] - A[1]]
ABs=[B[0]-A[0],B[1]-A[1]]
magABs=math.sqrt( ABs[0]*ABs[0] + ABs[1]*ABs[1])
vecAB=[ABs[0]/magABs, ABs[1]/magABs]
leftAB=vecAB[0]*deltaAB[1] - vecAB[1]*deltaAB[0]
deltaBC=[tri_cc[0] - B[0],
tri_cc[1] - B[1]]
BCs=[C[0]-B[0], C[1]-B[1]]
magBCs=math.sqrt( BCs[0]*BCs[0] + BCs[1]*BCs[1] )
vecBC=[BCs[0]/magBCs, BCs[1]/magBCs]
leftBC=vecBC[0]*deltaBC[1] - vecBC[1]*deltaBC[0]
deltaCA=[tri_cc[0] - C[0],
tri_cc[1] - C[1]]
CAs=[A[0]-C[0],A[1]-C[1]]
magCAs=math.sqrt(CAs[0]*CAs[0] + CAs[1]*CAs[1])
vecCA=[CAs[0]/magCAs, CAs[1]/magCAs]
leftCA=vecCA[0]*deltaCA[1] - vecCA[1]*deltaCA[0]
cc_fac=-4. # not bad
# cc_fac=-2. # a little nicer shape
# clip to 100, to avoid overflow in math.exp
if 0:
# this can favor isosceles too much
this_cc_cost = ( math.exp(min(100,cc_fac*leftAB/local_length)) +
math.exp(min(100,cc_fac*leftBC/local_length)) +
math.exp(min(100,cc_fac*leftCA/local_length)) )
else:
# maybe?
this_cc_cost = ( math.exp(min(100,cc_fac*leftAB/magABs)) +
math.exp(min(100,cc_fac*leftBC/magBCs)) +
math.exp(min(100,cc_fac*leftCA/magCAs)) )
# mixture
# 0.3: let's the scale vary too much between the cells
# adjacent to n
alpha=1.0
avg_length=alpha*local_length + (1-alpha)*(magABs+magBCs+magCAs)/3
this_scale_cost=( (magABs-avg_length)**2
+ (magBCs-avg_length)**2
+ (magCAs-avg_length)**2 )
this_scale_cost/=avg_length*avg_length
cc_cost+=this_cc_cost
scale_cost+=this_scale_cost
# With even weighting between these, some edges are pushed long rather than
# having nice angles.
# 3 is a shot in the dark.
# 50 is more effective at avoiding a non-orthogonal cell
return 50*cc_cost+scale_cost
if self.cost_method=='base':
return cost
elif self.cost_method=='cc_py':
return cost_cc_and_scale_py
else:
assert False
####
def one_point_quad_cost(x,edge_scales,quads,para_scale,perp_scale):
# orthogonality cost:
ortho_cost=0.0
base_scale=
|
np.sqrt( para_scale**2 + perp_scale**2 )
|
numpy.sqrt
|
#
"""
Utility functions related to tensor shapes.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=no-name-in-module, protected-access, no-member, invalid-name
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
from tensorflow.python.ops import rnn
from tensorflow.python.framework import ops
__all__ = [
"transpose_batch_time",
"get_batch_size",
"get_rank",
"mask_sequences",
"_mask_sequences_tensor",
"_mask_sequences_py",
"flatten"
]
def transpose_batch_time(inputs):
"""Transposes inputs between time-major and batch-major.
Args:
inputs: A Tensor of shape `[batch_size, max_time, ...]` (batch-major)
or `[max_time, batch_size, ...]` (time-major), or a (possibly
nested) tuple of such elements.
Returns:
A Tensor with transposed batch and time dimensions of inputs.
"""
flat_input = nest.flatten(inputs)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
# pylint: disable=protected-access
flat_input = [rnn._transpose_batch_time(input_) for input_ in flat_input]
return nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
def get_batch_size(tensor):
"""Returns a unit `Tensor` representing the batch size, i.e.,
the size of the 1st dimension of :attr:`tensor`.
"""
return tf.shape(tensor)[0]
def get_rank(tensor):
"""Returns the tensor rank as a python `int`. The input tensor can also be
a python array.
Args:
tensor: A Tensor or python array.
Returns:
A python `int` representing the rank of :attr:`tensor`. Returns
`None` if the rank cannot be determined.
"""
if tf.contrib.framework.is_tensor(tensor):
shape = tensor.shape
try:
rank = len(shape.as_list())
except ValueError: # when `shape==TensorShape(None)`
rank = None
else:
array = np.asarray(tensor)
rank = array.ndim
return rank
def mask_sequences(sequence,
sequence_length,
dtype=None,
time_major=False,
tensor_rank=2):
"""Masks out sequence entries that are beyond the respective sequence
lengths. Masks along the time dimension.
:attr:`sequence` and :attr:`sequence_length` can either be python
arrays or Tensors, respectively. If both are python arrays (or None), the
return will be a python array as well.
:attr:`tensor_rank` is ignored when :attr:`sequence` and
:attr:`sequence_length` are both python arrays (rather than Tensors).
Args:
sequence: A Tensor or python array of sequence values.
If `time_major=False` (default), this must be a Tensor of shape:
`[batch_size, max_time, ...]`.
If `time_major=True`, this must be a Tensor of shape:
`[max_time, batch_size, ...].`
sequence_length: A Tensor or python array of shape `[batch_size]`.
Time steps beyond the respective sequence lengths will be
made zero.
dtype (dtype): Type of :attr:`sequence`. If `None`, infer from
:attr:`sequence` automatically.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`sequence` must have shape
`[max_time, batch_size, ...]`.
If `False` (default), :attr:`sequence` must have
shape `[batch_size, max_time, ...]`.
tensor_rank (int): The number of dimensions of :attr:`sequence`.
Default is 2, i.e., :attr:`sequence` is a 2D Tensor consisting
of batch and time dimensions. Ignored if both :attr:`sequence`
and :attr:`sequence_length` are python arrays.
Returns:
The masked sequence, i.e., a Tensor or python array of the same shape
as :attr:`sequence` but with masked-out entries (set to zero).
If both :attr:`sequence` and :attr:`sequence_length` are python
arrays, the returned value is a python array as well.
"""
is_tensor = tf.contrib.framework.is_tensor
if is_tensor(sequence) or is_tensor(sequence_length):
return _mask_sequences_tensor(
sequence, sequence_length, dtype, time_major, tensor_rank)
else:
return _mask_sequences_py(
sequence, sequence_length, dtype, time_major)
def _mask_sequences_tensor(sequence,
sequence_length,
dtype=None,
time_major=False,
tensor_rank=2):
"""Masks out sequence entries that are beyond the respective sequence
lengths. Masks along the time dimension.
Args:
sequence: A Tensor of sequence values.
If `time_major=False` (default), this must be a Tensor of shape:
`[batch_size, max_time, d_2, ..., d_rank]`, where the rank of
the Tensor is specified with :attr:`tensor_rank`.
If `time_major=True`, this must be a Tensor of shape:
`[max_time, batch_size, d_2, ..., d_rank].`
sequence_length: A Tensor of shape `[batch_size]`. Time steps beyond
the respective sequence lengths will be made zero.
dtype (dtype): Type of :attr:`sequence`. If `None`, infer from
:attr:`sequence` automatically.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`sequence` must have shape
`[max_time, batch_size, d_2, ..., d_rank]`.
If `False` (default), :attr:`sequence` must have
shape `[batch_size, max_time, d_2, ..., d_rank]`.
tensor_rank (int): The number of dimensions of :attr:`sequence`.
Default is 2, i.e., :attr:`sequence` is a 2D Tensor consisting
of batch and time dimensions.
Returns:
The masked sequence, i.e., a Tensor of the same shape as
:attr:`sequence` but with masked-out entries (set to zero).
"""
if tensor_rank is None:
tensor_rank = 2
if tensor_rank < 2:
raise ValueError(
"tensor_rank must be > 2. Got tensor_rank = {}".format(tensor_rank))
if time_major:
sequence = rnn._transpose_batch_time(sequence)
max_time = tf.to_int32(tf.shape(sequence)[1])
if dtype is None:
dtype = sequence.dtype
mask = tf.sequence_mask(
tf.to_int32(sequence_length), max_time, dtype=dtype)
for _ in range(2, tensor_rank):
mask = tf.expand_dims(mask, axis=-1)
sequence = sequence * mask
if time_major:
sequence = rnn._transpose_batch_time(sequence)
return sequence
def _mask_sequences_py(sequence,
sequence_length,
dtype=None,
time_major=False):
"""Masks out sequence entries that are beyond the respective sequence
lengths. Masks along the time dimension.
This is the numpy version of :func:`texar.utils.mask_sequences`.
Args:
sequence: An python array of sequence values.
If `time_major=False` (default), this must be an array of shape:
`[batch_size, max_time, ...]`
If `time_major=True`, this must be a Tensor of shape:
`[max_time, batch_size, ...].`
sequence_length: An array of shape `[batch_size]`. Time steps beyond
the respective sequence lengths will be made zero.
dtype (dtype): Type of :attr:`sequence`. If `None`, infer from
:attr:`sequence` automatically.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`sequence` must have shape
`[max_time, batch_size, ...]`.
If `False` (default), :attr:`sequence` must have
shape `[batch_size, max_time, ...]`.
Returns:
The masked sequence, i.e., an array of the same shape as
:attr:`sequence` but with masked-out entries (set to zero).
"""
sequence = np.array(sequence)
sequence_length = np.array(sequence_length)
rank = sequence.ndim
if rank < 2:
raise ValueError("`sequence` must be 2D or higher order.")
batch_size = sequence.shape[0]
max_time = sequence.shape[1]
dtype = dtype or sequence.dtype
if time_major:
sequence =
|
np.transpose(sequence, axes=[1, 0, 2])
|
numpy.transpose
|
from mutation import mutate, mutate_replace, mutate_insert, mutate_shrink
from node_set import PrimitiveSet, TerminalSet
from tree import generate_tree, parse_tree
from crossover import one_point_crossover
from functools import partial
from copy import deepcopy
import multiprocess
import numpy as np
import random
import sys
import os
# Set random seed
random.seed(101)
np.random.seed(101)
# Pool size
if sys.platform == 'win32':
num_proc = int(os.environ['NUMBER_OF_PROCESSORS'])
else:
num_proc = int(os.popen('grep -c cores /proc/cpuinfo').read())
POOL_SIZE = max(2, num_proc - 2)
# print("Using {} processes".format(POOL_SIZE))
# Population size
POP_SIZE = 600
# Number of generations
NGEN = 100
# Mutation probability
MUTPB = 0.25
# Crossover probability
CXPB = 0.96
SEED = "add_x_x(div_x_float(mult_x_x(sub_x_float(sub_x_float(pass_x(x), uniform_5_5(0.3330847400917083)), uniform_5_5(4.762321841272186)), mult_x_x(pass_x(x), pass_x(x))), tan_float(add_float_float(pass_2(2.0), pass_1(1.0)))), div_x_float(sin_x(pass_x(x)), div_float_float(cos_float(pass_2(2.0)), mult_float_float(uniform_5_5(4.709478895399462), uniform_5_5(-3.7382713053737957)))))"
SEED2 = "add_x_x(sub_x_x(sub_x_float(div_x_float(add_x_x(mult_x_x(mult_x_float(pass_x(x), uniform_5_5(4.762321841272186)), sub_x_float(pass_x(x), uniform_5_5(4.762321841272186))), mult_x_float(mult_x_float(cos_x(mult_x_float(pass_x(x), uniform_5_5(4.762321841272186))), pass_3(3.0)), uniform_5_5(4.762321841272186))), pass_2(2.0)), sin_float(pass_1(1.0))), tan_x(mult_x_float(div_x_float(pass_x(x), pass_2(2.0)), pass_3(3.0)))), div_x_x(mult_x_x(add_x_float(pass_x(x), uniform_5_5(1.7955839820267636)), add_x_x(tan_x(tan_x(pass_x(x))), pass_x(x))), div_x_float(exp_x(pass_x(x)), pass_2(2.0))))"
# x^4 + x^3 + x^2 + x
def polynomial(x):
return np.power(x, 4) + np.power(x, 3) + np.power(x, 2) + x
def polynomial2(x):
return np.exp(-1.0 * (
|
np.sin(3 * x)
|
numpy.sin
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Daily rain estimation experiment with the Air Quality dataset
Using a simple MLP model, we estimate the average daily rain from
the local representations of the time series that are learned
in an unsupervised manner.
"""
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import json
import seaborn as sns
sns.set()
from sklearn.metrics import accuracy_score, roc_auc_score
import sys
import tensorflow as tf
from gl_rep.models import EncoderLocal, EncoderGlobal, WindowDecoder
from gl_rep.glr import GLR
from gl_rep.data_loaders import airq_data_loader
from baselines.gpvae import GPVAE, Decoder
from baselines.vae import VAE, Encoder, Decoder
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
mode = 'glr'
window_size = 1 * 24
def main(args):
with open('configs.json') as config_file:
configs = json.load(config_file)['air_quality']
if args.train:
test_loss = []
for cv in range(3):
model = Predictor([32, 8])
trainset, validset, testset, normalization_specs = airq_data_loader(normalize='mean_zero')
label_blocks_train = block_labels(trainset)
label_blocks_valid = block_labels(validset)
label_blocks_test = block_labels(testset)
if mode=='supervised':
n_epochs = 200
lr = 1e-3
file_name = './ckpt/e2e_rain_prediction'
rep_model_file = 'End to end'
rep_model = None
else:
n_epochs = 50
lr = 1e-4
if mode=='glr':
file_name = './ckpt/glr_rain_predictor'
rep_model_file = './ckpt/glr_air_quality_lambda%.1f'%args.lamda
zt_encoder = EncoderLocal(zl_size=configs["zl_size"],
hidden_sizes=configs["glr_local_encoder_size"])
zg_encoder = EncoderGlobal(zg_size=configs["zg_size"],
hidden_sizes=configs["glr_global_encoder_size"])
dec = WindowDecoder(output_size=configs["feature_size"], output_length=configs["window_size"],
hidden_sizes=configs["glr_decoder_size"])
rep_model = GLR(global_encoder=zg_encoder, local_encoder=zt_encoder, decoder=dec,
window_size=configs["window_size"], time_length=configs["t_len"],
data_dim=configs["feature_size"], kernel_scales=configs["kernel_scales"],
kernel=configs["kernels"], beta=configs["beta"], M=configs["mc_samples"], sigma=.5,
lamda=args.lamda, length_scale=configs["length_scale"], p=15)
elif mode=='gpvae':
file_name = './ckpt/gpvae%d_rain_predictor'%args.rep_size
rep_model_file = './ckpt/gpvae%d_air_quality'%args.rep_size
encoder = EncoderLocal(zl_size=args.rep_size, hidden_sizes=configs["baseline_encoder_size"])
decoder = Decoder(output_size=configs["feature_size"],
output_length=configs["window_size"],
hidden_sizes=configs["baseline_decoder_size"])
rep_model = GPVAE(encoder, decoder, time_length=configs["t_len"], data_dim=configs["feature_size"],
window_size=configs["window_size"], kernel=['cauchy'], beta=1., M=1,
sigma=1.0, length_scale=2.0, kernel_scales=4, p=100)
elif mode=='vae':
file_name = './ckpt/vae%d_rain_predictor'%args.rep_size
rep_model_file = './ckpt/vae%d_air_quality'%args.rep_size
encoder = Encoder(zl_size=args.rep_size, hidden_sizes=configs["baseline_encoder_size"])
decoder = Decoder(output_size=configs["feature_size"],
output_length=configs["window_size"],
hidden_sizes=configs["baseline_decoder_size"])
rep_model = VAE(encoder=encoder, decoder=decoder, data_dim=configs["feature_size"], beta=1.,
M=configs["mc_samples"], sample_len=configs["t_len"])
rep_model.load_weights(rep_model_file)
print('Trainig ', rep_model_file )
if mode=='glr':
model(tf.random.normal(shape=(5, 10, zt_encoder.zl_size), dtype=tf.float32),
tf.random.normal(shape=(5, zt_encoder.zl_size), dtype=tf.float32),
x_lens=tf.ones(shape=(5,))*10)
optimizer = tf.keras.optimizers.Adam(lr)
elif mode == 'supervised':
model(tf.random.normal(shape=(5, 10, configs["feature_size"]), dtype=tf.float32), None,
x_lens=tf.ones(shape=(5,))*10)
optimizer = tf.keras.optimizers.Adam(lr)
else:
model(tf.random.normal(shape=(5, 10, encoder.zl_size), dtype=tf.float32), None,
x_lens=tf.ones(shape=(5,))*10)
optimizer = tf.keras.optimizers.Adam(lr)
trainable_vars = model.trainable_variables
losses_train, acc_train, auroc_train = [], [], []
losses_val, acc_val, auroc_val = [], [], []
for epoch in range(n_epochs+1):
epoch_loss = run_epoch(model, trainset, rep_model, optimizer=optimizer, label_blocks = label_blocks_train,
train=True, trainable_vars=trainable_vars)
if epoch % 10 == 0:
print('=' * 30)
print('Epoch %d' % epoch, '(Learning rate: %.5f)' % (lr))
losses_train.append(epoch_loss)
print("Training loss = %.3f" % (epoch_loss))
epoch_loss = run_epoch(model, validset, rep_model, label_blocks = label_blocks_valid, train=False)
losses_val.append(epoch_loss)
print("Validation loss = %.3f" % (epoch_loss))
print('Test loss = %.3f'%run_epoch(model, testset, rep_model, label_blocks = label_blocks_test, train=False))
test_loss.append(run_epoch(model, testset, rep_model, label_blocks = label_blocks_test, train=False))
print("\n\n Final performance \t loss = %.3f +- %.3f" % (np.mean(test_loss), np.std(test_loss)))
plt.figure()
plt.plot(losses_train, label='Train loss')
plt.plot(losses_val, label='Validation loss')
plt.legend()
plt.savefig('./plots/evaluations/rain_prediction_loss_%s.pdf' % (mode))
model.save_weights(file_name)
else:
trainset, validset, testset, normalization_specs = airq_data_loader(normalize='mean_zero')
if mode == 'supervised':
file_name = './ckpt/e2e_rain_prediction'
rep_model = None
else:
if mode == 'glr':
file_name = './ckpt/glr_rain_predictor'
rep_model_file = './ckpt/glr_air_quality_lambda%.1f' %(args.lamda)
zt_encoder = EncoderLocal(zl_size=configs["zl_size"],
hidden_sizes=configs["glr_local_encoder_size"])
zg_encoder = EncoderGlobal(zg_size=configs["zg_size"],
hidden_sizes=configs["glr_global_encoder_size"])
dec = WindowDecoder(output_size=configs["feature_size"], output_length=configs["window_size"],
hidden_sizes=configs["glr_decoder_size"])
rep_model = GLR(global_encoder=zg_encoder, local_encoder=zt_encoder, decoder=dec,
window_size=configs["window_size"], time_length=configs["t_len"],
data_dim=configs["feature_size"], kernel_scales=configs["kernel_scales"],
kernel=configs["kernels"], beta=configs["beta"], M=configs["mc_samples"], sigma=.5,
lamda=args.lamda, length_scale=configs["length_scale"], p=15)
elif mode == 'gpvae':
file_name = './ckpt/gpvae%d_rain_predictor'%args.rep_size
rep_model_file = './ckpt/gpvae%d_air_quality'%args.rep_size
encoder = EncoderLocal(zl_size=args.rep_size, hidden_sizes=configs["baseline_encoder_size"])
decoder = Decoder(output_size=configs["feature_size"],
output_length=configs["window_size"],
hidden_sizes=configs["baseline_decoder_size"])
rep_model = GPVAE(encoder, decoder, time_length=configs["t_len"], data_dim=configs["feature_size"],
window_size=configs["window_size"], kernel=['cauchy'], beta=1., M=1,
sigma=1.0, length_scale=2.0, kernel_scales=4, p=100)
elif mode=='vae':
file_name = './ckpt/vae%d_rain_predictor'%args.rep_size
rep_model_file = './ckpt/vae%d_air_quality'%args.rep_size
encoder = Encoder(zl_size=args.rep_size, hidden_sizes=configs["baseline_encoder_size"])
decoder = Decoder(output_size=configs["feature_size"],
output_length=configs["window_size"],
hidden_sizes=configs["baseline_decoder_size"])
rep_model = VAE(encoder=encoder, decoder=decoder, data_dim=configs["feature_size"], beta=1.,
M=configs["mc_samples"], sample_len=configs["t_len"])
rep_model.load_weights(rep_model_file)
model = Predictor([32, 8])
if not mode=='supervised':
rep_model.load_weights(rep_model_file)#.expect_partial()
model.load_weights(file_name).expect_partial()
# Plot the estimated daily rain for a random sample
for batch in testset:
x_seq = batch[0]
mask_seq, x_lens = batch[1], batch[2]
rnd_sample = tf.math.argmax(x_lens)
all_labels = batch[3][:, :, 1]
if mode=='glr':
global_sample_len = int(x_seq.shape[1] * 0.3)
rnd_t_g = np.random.randint(0, x_seq.shape[1] - global_sample_len)
z_g = rep_model.global_encoder(x_seq[:, rnd_t_g:rnd_t_g + global_sample_len, :],
mask=mask_seq[:, rnd_t_g:rnd_t_g + global_sample_len, :]).sample()
pz_t = rep_model.local_encoder(x_seq,mask=mask_seq, window_size=window_size)
z_t = tf.transpose(pz_t.sample(), perm=(0, 2, 1))
pred = model(z_t, z_g, x_lens)[rnd_sample]
all_labels_blocks = tf.split(all_labels, num_or_size_splits=all_labels.shape[1] // window_size, axis=1)
labels = tf.stack([tf.math.reduce_sum(block, axis=1) for block in all_labels_blocks], axis=-1)[rnd_sample]
elif mode=='gpvae':
z_t, _ = rep_model.encode(x_seq, mask_seq)
z_g = None
pred = model(z_t, z_g, x_lens)[rnd_sample]
all_labels_blocks = tf.split(all_labels, num_or_size_splits=all_labels.shape[1] // window_size, axis=1)
labels = tf.stack([tf.experimental.numpy.nanmean(block, axis=1) for block in all_labels_blocks], axis=-1)[rnd_sample]
elif mode=='vae':
z_t = rep_model.encoder(x_seq, mask_seq, rep_model.window_size).sample()
z_g = None
pred = model(z_t, z_g, x_lens)[rnd_sample]
all_labels_blocks = tf.split(all_labels, num_or_size_splits=all_labels.shape[1] // window_size, axis=1)
labels = tf.stack([tf.experimental.numpy.nanmean(block, axis=1) for block in all_labels_blocks], axis=-1)[rnd_sample]
elif mode == 'supervised':
pred = model(x_seq, None, x_lens)[rnd_sample]
labels = all_labels[rnd_sample]
plt.figure(figsize=(10,4))
plt.plot(labels, label='Average Daily Rain')
plt.plot(pred, label='Estimated Daily Rain')
plt.legend()
plt.savefig('./plots/evaluations/estimated_rain_%s.pdf'%mode)
break
class Predictor(tf.keras.Model):
"""Simple classifier layer to classify the subgroup of data
Args:
fc_sizes: Hidden size of the predictor MLP
"""
def __init__(self, fc_sizes):
super(Predictor, self).__init__()
self.fc_sizes = fc_sizes
self.fc = tf.keras.Sequential([tf.keras.layers.Dense(h, activation=tf.nn.relu, dtype=tf.float32)
for h in fc_sizes])
self.prob = tf.keras.layers.Dense(1, activation=tf.nn.relu, dtype=tf.float32)
def __call__(self, local_encs, global_encs, x_lens):
if not global_encs is None:
h = tf.concat([local_encs, tf.tile(tf.expand_dims(global_encs, axis=1), [1, local_encs.shape[1], 1])], axis=-1)
h = tf.keras.layers.BatchNormalization()(h)
else:
h = local_encs
logits = (self.fc(h))
probs = tf.keras.layers.Dropout(rate=0.3)(self.prob(logits))
return probs[...,0]
def run_epoch(model, dataset, glr_model, optimizer=None, label_blocks=None, train=False , trainable_vars=None):
"Training epoch for training the classifier"
mse_loss = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
mae_loss = tf.keras.losses.MeanAbsoluteError(reduction=tf.keras.losses.Reduction.NONE)
epoch_loss, epoch_acc, epoch_auroc = [], [], []
if label_blocks is None:
all_labels_blocks = tf.concat([b[3][:, :, 1] for b in dataset], 0)
all_labels_blocks = tf.split(all_labels_blocks, num_or_size_splits=all_labels_blocks.shape[1] // window_size, axis=1)
label_blocks = tf.stack([tf.math.reduce_sum(block, axis=1) for block in all_labels_blocks], axis=-1)
b_start = 0
for batch_i, batch in dataset.enumerate():
x_seq = batch[0]
mask_seq, x_lens = batch[1], batch[2]
all_labels = batch[3][:, :, 1]
if mode=='supervised':
labels = all_labels
else:
labels = label_blocks[b_start:b_start+len(x_seq)]
b_start += len(x_seq)
labels = tf.where(tf.math.is_nan(labels), tf.zeros_like(labels), labels)
if mode=='glr':
global_sample_len = int(x_seq.shape[1] * 0.3)
rnd_t_g =
|
np.random.randint(0, x_seq.shape[1] - global_sample_len)
|
numpy.random.randint
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 15:59:25 2016
@author: timothyb0912
"""
import unittest
import warnings
from collections import OrderedDict
from copy import deepcopy
import mock
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
import numpy.testing as npt
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import xrange
except ImportError:
pass
# Use the following to always show the warnings
np.seterr(all='warn')
warnings.simplefilter("always")
def temp_utility_transform(sys_utility_array, *args, **kwargs):
"""
Parameters
----------
sys_utility_array : numpy array.
Should have 1D or 2D. Should have been created by the dot product of a
design matrix and an array of index coefficients.
Returns
-------
2D numpy array.
The returned array will contain a representation of the
`sys_utility_array`. If `sys_utility_array` is 2D, then
`sys_utility_array` will be returned unaltered. Else, the function will
return `sys_utility_array[:, None]`.
"""
# Return a 2D array of systematic utility values
if len(sys_utility_array.shape) == 1:
systematic_utilities = sys_utility_array[:, np.newaxis]
else:
systematic_utilities = sys_utility_array
return systematic_utilities
class NormalDrawsTests(unittest.TestCase):
def test_return_format(self):
n_obs = 10
n_draws = 5
n_vars = 3
random_draws = mlc.get_normal_draws(n_obs, n_draws, n_vars)
self.assertIsInstance(random_draws, list)
self.assertEqual(len(random_draws), n_vars)
for draws in random_draws:
self.assertIsInstance(draws, np.ndarray)
self.assertAlmostEqual(draws.shape, (n_obs, n_draws))
return None
class MixingNamesToPositions(unittest.TestCase):
def test_convert_mixing_names_to_positions(self):
fake_index_vars = ["foo", "bar", "cake", "cereal"]
fake_mixing_vars = ["bar", "cereal"]
args = (fake_mixing_vars, fake_index_vars)
mix_pos = mlc.convert_mixing_names_to_positions(*args)
self.assertIsInstance(mix_pos, list)
self.assertEqual(len(mix_pos), len(fake_mixing_vars))
for pos, idx_val in enumerate(mix_pos):
current_var = fake_mixing_vars[pos]
self.assertEqual(idx_val, fake_index_vars.index(current_var))
return None
class MixedLogitCalculations(unittest.TestCase):
# Note that for this set up, we will consider a situation with the
# following parameters:
# 3 Alternatives per individual
# 2 Individuals
# Individual 1 has 2 observed choice situations
# Individual 2 has 1 observed choice situation
# The true systematic utility depends on ASC_1, ASC_2, and a single X
# The X coefficient is randomly distributed
def setUp(self):
# Fake random draws where Row 1 is for observation 1 and row 2 is
# for observation 2. Column 1 is for draw 1 and column 2 is for draw 2
self.fake_draws = np.array([[0.4, 0.8], [0.6, 0.2]])
# Create the betas to be used during the tests
self.fake_betas = np.array([0.3, -0.6, 0.2])
self.fake_std = 1
self.fake_betas_ext = np.concatenate((self.fake_betas,
np.array([self.fake_std])),
axis=0)
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
self.fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 1, 2.5],
[0, 0, 3.5],
[1, 0, 0.5],
[0, 1, 1.0],
[0, 0, 1.5]])
# Record what positions in the design matrix are being mixed over
self.mixing_pos = [2]
# Create the arrays that specify the choice situation, individual id
# and alternative ids
self.situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
self.individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2])
self.alternative_ids =
|
np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
|
numpy.array
|
from math import pi, sqrt, sin, cos
from pulse.preprocessing.acoustic_element import DOF_PER_NODE
import numpy as np
from pulse.preprocessing.node import Node, distance, DOF_PER_NODE_STRUCTURAL
NODES_PER_ELEMENT = 2
DOF_PER_ELEMENT = DOF_PER_NODE_STRUCTURAL * NODES_PER_ELEMENT
ENTRIES_PER_ELEMENT = DOF_PER_ELEMENT ** 2
decoupling_matrix =
|
np.ones((DOF_PER_ELEMENT,DOF_PER_ELEMENT), dtype=int)
|
numpy.ones
|
import domains.gym_taxi
from domains.gym_taxi.simulator.taxi_world import TaxiWorldSimulator
import domains.gym_taxi.utils.representations as convert
import cv2
from matplotlib import pyplot as plt
import numpy as np
# test small -> big -> small conversion fidelity
img2 = np.random.randint(0, 2, [4, 39, 39], dtype=np.uint8)
for i in range(1000):
img =
|
np.random.randint(0, 2, [4, 39, 39], dtype=np.uint8)
|
numpy.random.randint
|
#!/usr/bin/env python3
# -*-coding: utf-8-*-
# Author : LiangjunFeng
# Blog : http://my.csdn.net/Liangjun_Feng
# GitHub : https://www.github.com/LiangjunFeng
# File : FastICA.py
# Date : 2017/10/15 6:25
# Version: 0.1
# Description: fastICA algorithm ,split voice signals
import wave
import os
import numpy as np
import math
import matplotlib.pyplot as plt
from numpy import random
from sklearn import preprocessing
import scipy
import scipy.io as sio
def LoadSoundSet(path):
filename= os.listdir(path)
data = []
for i in range(len(filename)):
f = wave.open(path+filename[i],'rb')
params = f.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
strData = f.readframes(nframes)
waveData = np.fromstring(strData,dtype=np.int16)
waveData = waveData*1.0/(max(abs(waveData)))
data += waveData.tolist()
time = np.arange(0,nframes*len(filename))*(1.0 / framerate)
return time.tolist(),data
def LoadSound(path):
f = wave.open(path,'rb')
params = f.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
strData = f.readframes(nframes) #read the wav file
waveData = np.fromstring(strData,dtype=np.int16)
waveData = waveData*1.0/(max(abs(waveData))) #normalize the sound wave
time = np.arange(0,nframes*nchannels)*(1.0 / framerate)
return time.tolist(),waveData.tolist()
def ShowRes(data):
print("//==========================================================//")
x = np.linspace(0,1,data.shape[1])
plt.figure()
fig = plt.gcf()
fig.set_size_inches(6.5, 1*data.shape[0])
for i in range(data.shape[0]):
axes = plt.subplot(data.shape[0],1,i+1)
axes.set_frame_on(False)
axes.set_axis_off()
plt.plot(x,data[i,:].T,color = 'black')
plt.show()
print("//==========================================================//")
def getRandomW(length,height): #make a random matrix
W = random.random(size=(length,height))
return W
def eigOrth(Data): #eigenormalize the data
data = Data.copy()
D,E = np.linalg.eig(data.dot(data.T))
for i in range(len(D)):
if D[i] < 1e-7:
D[i] = 0.01
D[i] = D[i]**0.5
D = np.mat(np.diag(D))
D = D.I
data = E*D*E.T*data
return data.real
def GFunction(data): #the first derivate function in ICA
def G(x):
y = x*math.exp(-0.5*(x**2))
return y
length,bordth = data.shape
output = np.zeros((length,bordth))
for i in range(length):
for j in range(bordth):
output[i,j] = G(data[i,j])
return output
def gFunction(data): #the second derivate function in ICA
def g(x):
y = -1*(x**2)*math.exp(-0.5*(x**2))
return y
length,bordth = data.shape
output = np.zeros((length,bordth))
for i in range(length):
for j in range(bordth):
output[i,j] = g(data[i,j])
return output
def distance(W,oldW): #using at judging convergence
return abs(abs(float(W.T*oldW)) - 1)
class ICA: #ICA
def __init__(self,conponent = -1):
self._W = []
self._conponent = conponent
self._data = 0
def fit_transform(self,data):
data = preprocessing.scale(data.T)
data = np.mat(eigOrth(data.T))
self._data = data
if self._conponent == -1:
self._conponent = data.shape[0]
W = getRandomW(data.shape[0],self._conponent)
W = eigOrth(W.T).T
MAX_T = 10000
for i in range(W.shape[1]):
w = W[:,i]
j,t = 0,1
while (j < MAX_T) and (t > 1e-8):
oldw = w.copy()
w = np.mean(data*GFunction(w.T*data).T,1) - np.mean(gFunction(w.T*data))*w
temp = np.zeros((W.shape[0],1))
for k in range(i):
temp += float(w.T*W[:,k])*W[:,k]
w = w - temp
w = w/math.sqrt(w.T*w)
W[:,i] = w
t = distance(w,oldw)
print(i+1,t)
j += 1
self._W = W
return (self._W.T*data)
def transfer(self,data):
data = preprocessing.scale(data.T)
data = np.mat(eigOrth(data.T))
return (self._W.T*data)
def calculateObj(self):
data = self._data
firstPart = np.mean(GFunction(self._W.T.dot(data)),1)
x = np.arange(-data.shape[1]/2000,data.shape[1]/2000,0.001)
y = np.mat(np.mean(scipy.stats.norm.pdf(x,0,1)))
K = np.mean(GFunction(y))
ICAPart = np.multiply((firstPart - K),(firstPart - K))
diffData = makeDiff(data)
SlowPart = np.zeros((1,self._W.shape[0]))
for i in range(self._W.shape[0]):
w = self._W[:,i]
secondPart = (w.T*diffData*diffData.T*w)
SlowPart[0,i] = float(secondPart)
print("ICA ICAPart:\n",ICAPart)
print("ICA SlowPart:\n",np.ravel(SlowPart))
if __name__ == '__main__':
#========================================================================
#Load the data and make them the same size
file1 = "/Users/zhuxiaoxiansheng/Desktop/SICA_data/LDC2017S07.clean.wav"
file2 = "/Users/zhuxiaoxiansheng/Desktop/SICA_data/LDC2017S10.embed.wav"
file3 = "/Users/zhuxiaoxiansheng/Desktop/SICA_data/LDC93S1.wav"
noise1 = sio.loadmat(u'/Users/zhuxiaoxiansheng/Desktop/SICA_data/noise2.mat')['noise2']
time2,noise2 = LoadSound(file2)
time1,data1 = LoadSound(file1)
time2,data2 = LoadSound(file3)
data3 = sio.loadmat(u"/Users/zhuxiaoxiansheng/Desktop/SICA_data/voice.mat")['voice']
time1 = time1[1000:-1000]
data1 =
|
np.mat(data1[1000:-1000])
|
numpy.mat
|
import numpy
from numpy.testing import assert_equal, assert_almost_equal
# OE surface in form of conic equation:
# ccc[0]*X^2 + ccc[1]*Y^2 + ccc[2]*Z^2 +
# ccc[3]*X*Y + ccc[4]*Y*Z + ccc[5]*X*Z +
# ccc[6]*X + ccc[7]*Y + ccc[8]*Z + ccc[9] = 0
class SurfaceConic(object):
def __init__(self, ccc=numpy.zeros(10)):
if ccc is not None:
self.ccc = ccc.copy()
else:
self.ccc = numpy.zeros(10)
@classmethod
def initialize_from_coefficients(cls, ccc):
if numpy.array(ccc).size != 10:
raise Exception("Invalid coefficients (dimension must be 10)")
return SurfaceConic(ccc=ccc)
@classmethod
def initialize_as_plane(cls):
return SurfaceConic(numpy.array([0,0,0,0,0,0,0,0,-1.,0]))
#
# initializers from focal distances
#
@classmethod
def initialize_as_sphere_from_focal_distances(cls,p, q, theta1, cylindrical=0, cylangle=0.0, switch_convexity=0):
ccc = SurfaceConic()
ccc.set_sphere_from_focal_distances(p,q,theta1)
if cylindrical:
ccc.set_cylindrical(cylangle)
if switch_convexity:
ccc.switch_convexity()
return ccc
@classmethod
def initialize_as_ellipsoid_from_focal_distances(cls,p, q, theta1, cylindrical=0, cylangle=0.0, switch_convexity=0):
ccc = SurfaceConic()
ccc.set_ellipsoid_from_focal_distances(p,q,theta1)
if cylindrical:
ccc.set_cylindrical(cylangle)
if switch_convexity:
ccc.switch_convexity()
return ccc
@classmethod
def initialize_as_paraboloid_from_focal_distances(cls,p, q, theta1, cylindrical=0, cylangle=0.0, switch_convexity=0):
ccc = SurfaceConic()
ccc.set_paraboloid_from_focal_distances(p,q,theta1)
if cylindrical:
ccc.set_cylindrical(cylangle)
if switch_convexity:
ccc.switch_convexity()
return ccc
@classmethod
def initialize_as_hyperboloid_from_focal_distances(cls,p, q, theta1, cylindrical=0, cylangle=0.0, switch_convexity=0):
ccc = SurfaceConic()
ccc.set_hyperboloid_from_focal_distances(p,q,theta1)
if cylindrical:
ccc.set_cylindrical(cylangle)
if switch_convexity:
ccc.switch_convexity()
return ccc
#
# initializars from surface parameters
#
@classmethod
def initialize_as_sphere_from_curvature_radius(cls, radius, cylindrical=0, cylangle=0.0, switch_convexity=0):
ccc = SurfaceConic()
ccc.set_sphere_from_curvature_radius(radius)
if cylindrical:
ccc.set_cylindrical(cylangle)
if switch_convexity:
ccc.switch_convexity()
return ccc
def duplicate(self):
return SurfaceConic.initialize_from_coefficients(self.ccc.copy())
#
# getters
#
def get_coefficients(self):
return self.ccc.copy()
#
# setters
#
def set_coefficients(self,ccc):
if numpy.array(ccc).size != 10:
raise Exception("Invalid coefficients (dimension must be 10)")
self.ccc = ccc
def vector_reflection(self,v1,normal):
tmp = v1 * normal
tmp2 = tmp[0,:] + tmp[1,:] + tmp[2,:]
tmp3 = normal.copy()
for jj in (0,1,2):
tmp3[jj,:] = tmp3[jj,:] * tmp2
v2 = v1 - 2 * tmp3
v2mod = numpy.sqrt(v2[0,:]**2 + v2[1,:]**2 + v2[2,:]**2)
v2 /= v2mod
return v2
def get_normal(self,x2):
# ;
# ; Calculates the normal at intercept points x2 [see shadow's normal.F]
# ;
normal = numpy.zeros_like(x2)
normal[0,:] = 2 * self.ccc[1-1] * x2[0,:] + self.ccc[4-1] * x2[1,:] + self.ccc[6-1] * x2[2,:] + self.ccc[7-1]
normal[1,:] = 2 * self.ccc[2-1] * x2[1,:] + self.ccc[4-1] * x2[0,:] + self.ccc[5-1] * x2[2,:] + self.ccc[8-1]
normal[2,:] = 2 * self.ccc[3-1] * x2[2,:] + self.ccc[5-1] * x2[1,:] + self.ccc[6-1] * x2[0,:] + self.ccc[9-1]
normalmod = numpy.sqrt( normal[0,:]**2 + normal[1,:]**2 + normal[2,:]**2 )
normal[0,:] /= normalmod
normal[1,:] /= normalmod
normal[2,:] /= normalmod
return normal
def apply_specular_reflection_on_beam(self,newbeam):
# ;
# ; TRACING...
# ;
x1 = newbeam.get_columns([1,2,3]) # numpy.array(a3.getshcol([1,2,3]))
v1 = newbeam.get_columns([4,5,6]) # numpy.array(a3.getshcol([4,5,6]))
flag = newbeam.get_column(10) # numpy.array(a3.getshonecol(10))
t,iflag = self.calculate_intercept(x1,v1)
x2 = x1 + v1 * t
for i in range(flag.size):
if iflag[i] < 0: flag[i] = -100
# ;
# ; Calculates the normal at each intercept [see shadow's normal.F]
# ;
normal = self.get_normal(x2)
# ;
# ; reflection
# ;
v2 = self.vector_reflection(v1,normal)
# ;
# ; writes the mirr.XX file
# ;
newbeam.set_column(1, x2[0])
newbeam.set_column(2, x2[1])
newbeam.set_column(3, x2[2])
newbeam.set_column(4, v2[0])
newbeam.set_column(5, v2[1])
newbeam.set_column(6, v2[2])
newbeam.set_column(10, flag )
return newbeam
def calculate_intercept(self,XIN,VIN,keep=0):
# FUNCTION conicintercept,ccc,xIn1,vIn1,iflag,keep=keep
#
#
# ;+
# ;
# ; NAME:
# ; CONICINTERCEPT
# ;
# ; PURPOSE:
# ; This function Calculates the intersection of a
# ; conic (defined by its 10 coefficients in ccc)
# ; with a straight line, defined by a point xIn and
# ; an unitary direction vector vIn
# ;
# ; CATEGORY:
# ; SHADOW tools
# ;
# ; CALLING SEQUENCE:
# ; t = conicIntercept(ccc,xIn,vIn,iFlag)
# ;
# ; INPUTS:
# ; ccc: the array with the 10 coefficients defining the
# ; conic.
# ; xIn: a vector DblArr(3) or stack of vectors DblArr(3,nvectors)
# ; vIn: a vector DblArr(3) or stack of vectors DblArr(3,nvectors)
# ;
# ; OUTPUTS
# ; t the "travelled" distance between xIn and the surface
# ;
# ; OUTPUT KEYWORD PARAMETERS
# ; IFLAG: A flag (negative if no intersection)
# ;
# ; KEYWORD PARAMETERS
# ; keep: 0 [default] keep the max t from both solutions
# ; 1 keep the MIN t from both solutions
# ; 2 keep the first solution
# ; 3 keep the second solution
# ; ALGORITHM:
# ; Adapted from SHADOW/INTERCEPT
# ;
# ; Equation of the conic:
# ;
# ; c[0]*X^2 + c[1]*Y^2 + c[2]*Z^2 +
# ; c[3]*X*Y + c[4]*Y*Z + c[5]*X*Z +
# ; c[6]*X + c[7]*Y + c[8]*Z + c[9] = 0
# ;
# ; NOTE that the vectors, that are usually DblArr(3) can be
# ; stacks of vectors DblArr(3,nvectors). In such a case,
# ; the routine returns t
# ;
# ;
# ; AUTHOR:
# ; <NAME> <EMAIL>, Sept. 29, 2009
# ;
# ; MODIFICATION HISTORY:
# ;
# ;-
#
#
# ;CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
# ;C
# ;C subroutine intercept ( xin, vin, tpar, iflag)
# ;C
# ;C purpose computes the intercepts onto the mirror surface
# ;C
# ;C arguments xin ray starting position mirror RF
# ;C vin ray direction mirror RF
# ;C tpar distance from start of
# ;C intercept
# ;C iflag input 1 ordinary case
# ;C -1 ripple case
# ;C iflag output 0 success
# ;C -1 complex sol.
# ;C
# ;CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
#
CCC = self.ccc
if XIN.shape==(3,):
XIN.shape = (3,1)
if VIN.shape==(3,):
VIN.shape = (3,1)
AA = CCC[1-1]*VIN[1-1,:]**2 \
+ CCC[2-1]*VIN[2-1,:]**2 \
+ CCC[3-1]*VIN[3-1,:]**2 \
+ CCC[4-1]*VIN[1-1,:]*VIN[2-1,:] \
+ CCC[5-1]*VIN[2-1,:]*VIN[3-1,:] \
+ CCC[6-1]*VIN[1-1,:]*VIN[3-1,:]
BB = CCC[1-1] * XIN[1-1,:] * VIN[1-1,:]*2 \
+ CCC[2-1] * XIN[2-1,:] * VIN[2-1,:]*2 \
+ CCC[3-1] * XIN[3-1,:] * VIN[3-1,:]*2 \
+ CCC[4-1] * (XIN[2-1,:] * VIN[1-1,:] \
+ XIN[1-1,:] * VIN[2-1,:]) \
+ CCC[5-1]*(XIN[3-1,:]*VIN[2-1,:] \
+ XIN[2-1,:]*VIN[3-1,:]) \
+ CCC[6-1]*(XIN[1-1,:]*VIN[3-1,:] \
+ XIN[3-1,:]*VIN[1-1,:]) \
+ CCC[7-1] * VIN[1-1,:] \
+ CCC[8-1] * VIN[2-1,:] \
+ CCC[9-1] * VIN[3-1,:]
CC = CCC[1-1] * XIN[1-1,:]**2 \
+ CCC[2-1] * XIN[2-1,:]**2 \
+ CCC[3-1] * XIN[3-1,:]**2 \
+ CCC[4-1] * XIN[2-1,:] * XIN[1-1,:] \
+ CCC[5-1] * XIN[2-1,:] * XIN[3-1,:] \
+ CCC[6-1] * XIN[1-1,:] * XIN[3-1,:] \
+ CCC[7-1] * XIN[1-1,:] \
+ CCC[8-1] * XIN[2-1,:] \
+ CCC[9-1] * XIN[3-1,:] \
+ CCC[10-1]
# ;C
# ;C Solve now the second deg. equation **
# ;C
DENOM = AA*0.0
DETER = AA*0.0
TPAR1 = AA*0.0
TPAR2 = AA*0.0
IFLAG = numpy.ones(AA.size) # int(AA*0)+1
#itest1 = numpy.argwhere( numpy.abs(AA) > 1e-15)
#if len(itest1) > 0:
# DENOM[itest1] = 0.5 / AA[itest1]
# DETER[itest1] = BB[itest1]**2 - CC[itest1] * AA[itest1] * 4
# TMP = DETER[itest1]
# ibad = numpy.argwhere(TMP < 0)
# if len(ibad) == 0:
# IFLAG[itest1[ibad]] = -1
# igood = numpy.argwhere(TMP >= 0)
# if len(igood) > 0:
# itmp = itest1[igood]
# TPAR1[itmp] = -(BB[itmp] + numpy.sqrt(DETER[itmp])) * DENOM[itmp]
# TPAR2[itmp] = -(BB[itmp] - numpy.sqrt(DETER[itmp])) * DENOM[itmp]
# if keep == 0:
# TPAR = numpy.maximum(TPAR1,TPAR2)
# elif keep == 1:
# TPAR = numpy.minimum(TPAR1,TPAR2)
# elif keep == 2:
# TPAR = TPAR1
# elif keep == 3:
# TPAR = TPAR2
# else:
# TPAR = TPAR1
#else:
# TPAR = - CC / BB
TPAR = numpy.zeros_like(AA)
T_SOURCE = 10.0
# TODO: remove loop!
for i in range(AA.size):
if numpy.abs(AA[i]) < 1e-15:
TPAR1[i] = - CC[i] / BB[i]
TPAR2[i] = TPAR1[i]
else:
DENOM = 0.5 / AA[i]
DETER = BB[i] ** 2 - CC[i] * AA[i] * 4
if DETER < 0.0:
TPAR[i] = 0.0
IFLAG[i] = -1
else:
TPAR1[i] = -(BB[i] + numpy.sqrt(DETER)) * DENOM
TPAR2[i] = -(BB[i] - numpy.sqrt(DETER)) * DENOM
#if ( numpy.abs(TPAR1-T_SOURCE) <= numpy.abs(TPAR2-T_SOURCE)):
# TPAR[i] = TPAR1
#else:
# TPAR[i] = TPAR2
return TPAR1, TPAR2, IFLAG
def set_cylindrical(self,CIL_ANG):
COS_CIL = numpy.cos(CIL_ANG)
SIN_CIL = numpy.sin(CIL_ANG)
A_1 = self.ccc[1-1]
A_2 = self.ccc[2-1]
A_3 = self.ccc[3-1]
A_4 = self.ccc[4-1]
A_5 = self.ccc[5-1]
A_6 = self.ccc[6-1]
A_7 = self.ccc[7-1]
A_8 = self.ccc[8-1]
A_9 = self.ccc[9-1]
A_10 = self.ccc[10-1]
self.ccc[1-1] = A_1 * SIN_CIL**4 + A_2 * COS_CIL**2 * SIN_CIL**2 - A_4 * COS_CIL * SIN_CIL**3
self.ccc[2-1] = A_2 * COS_CIL**4 + A_1 * COS_CIL**2 * SIN_CIL**2 - A_4 * COS_CIL**3 * SIN_CIL
self.ccc[3-1] = A_3 # Z^2
self.ccc[4-1] = - 2*A_1 * COS_CIL * SIN_CIL**3 - 2 * A_2 * COS_CIL**3 * SIN_CIL + 2 * A_4 * COS_CIL**2 *SIN_CIL**2 # X Y
self.ccc[5-1] = A_5 * COS_CIL**2 - A_6 * COS_CIL * SIN_CIL # Y Z
self.ccc[6-1] = A_6 * SIN_CIL**2 - A_5 * COS_CIL * SIN_CIL # X Z
self.ccc[7-1] = A_7 * SIN_CIL**2 - A_8 * COS_CIL * SIN_CIL # X
self.ccc[8-1] = A_8 * COS_CIL**2 - A_7 * COS_CIL * SIN_CIL # Y
self.ccc[9-1] = A_9 # Z
self.ccc[10-1]= A_10
def switch_convexity(self):
self.ccc[5-1] = - self.ccc[5-1]
self.ccc[6-1] = - self.ccc[6-1]
self.ccc[9-1] = - self.ccc[9-1]
def set_sphere_from_focal_distances(self, ssour, simag, theta_grazing):
# todo: implement also sagittal bending
print("Theta grazing is: %f" %(theta_grazing))
theta = (numpy.pi/2) - theta_grazing
print("Theta is: %f" %(theta))
print('>>>> set_sphere_from_focal_distances: Angle with respect to the surface normal [rad]:',theta)
rmirr = ssour * simag * 2 / numpy.cos(theta) / (ssour + simag)
self.ccc[1-1] = 1.0 # X^2 # = 0 in cylinder case
self.ccc[2-1] = 1.0 # Y^2
self.ccc[3-1] = 1.0 # Z^2
self.ccc[4-1] = .0 # X*Y # = 0 in cylinder case
self.ccc[5-1] = .0 # Y*Z
self.ccc[6-1] = .0 # X*Z # = 0 in cylinder case
self.ccc[7-1] = .0 # X # = 0 in cylinder case
self.ccc[8-1] = .0 # Y
self.ccc[9-1] = -2 * rmirr # Z
self.ccc[10-1] = .0 # G
print(">>>> set_sphere_from_focal_distances: Spherical radius: %f \n"%(rmirr))
def set_sphere_from_curvature_radius(self,rmirr):
self.ccc[1-1] = 1.0 # X^2 # = 0 in cylinder case
self.ccc[2-1] = 1.0 # Y^2
self.ccc[3-1] = 1.0 # Z^2
self.ccc[4-1] = .0 # X*Y # = 0 in cylinder case
self.ccc[5-1] = .0 # Y*Z
self.ccc[6-1] = .0 # X*Z # = 0 in cylinder case
self.ccc[7-1] = .0 # X # = 0 in cylinder case
self.ccc[8-1] = .0 # Y
self.ccc[9-1] = -2 * rmirr # Z
self.ccc[10-1] = .0 # G
def set_ellipsoid_from_focal_distances(self, ssour, simag, theta_grazing):
theta = (numpy.pi/2) - theta_grazing
COSTHE = numpy.cos(theta)
SINTHE = numpy.sin(theta)
AXMAJ = ( ssour + simag )/2
AXMIN = numpy.sqrt( simag * ssour) * COSTHE
AFOCI = numpy.sqrt( AXMAJ**2 - AXMIN**2 )
ECCENT = AFOCI/AXMAJ
# ;C
# ;C The center is computed on the basis of the object and image positions
# ;C
YCEN = (ssour - simag) * 0.5 / ECCENT
ZCEN = -numpy.sqrt( 1 - YCEN**2 / AXMAJ**2) * AXMIN
# ;C
# ;C Computes now the normal in the mirror center.
# ;C
RNCEN = numpy.zeros(3)
RNCEN[1-1] = 0.0
RNCEN[2-1] = -2 * YCEN / AXMAJ**2
RNCEN[3-1] = -2 * ZCEN / AXMIN**2
# ;CALL NORM(RNCEN,RNCEN)
RNCEN = RNCEN / numpy.sqrt((RNCEN**2).sum())
# ;C
# ;C Computes the tangent versor in the mirror center.
# ;C
RTCEN = numpy.zeros(3)
RTCEN[1-1] = 0.0
RTCEN[2-1] = RNCEN[3-1]
RTCEN[3-1] = -RNCEN[2-1]
# txt = [txt, $
# String('Rev Ellipsoid a: ', $
# AXMAJ, Format='(A40,G20.15)'), $
# String('Rev Ellipsoid b: ', $
# AXMIN, Format='(A40,G20.15)'), $
# String('Rev Ellipsoid c=sqrt(a^2-b^2): ', $
# AFOCI, Format='(A40,G20.15)'), $
# String('Rev Ellipsoid focal discance c^2: ', $
# AFOCI^2, Format='(A40,G20.15)'), $
# String('Rev Ellipsoid excentricity: ', $
# ECCENT, Format='(A40,G20.15)'),$
# 'Mirror center at: '+vect2string([0,YCEN,ZCEN]), $
# 'Mirror normal: '+vect2string(RNCEN), $
# 'Mirror tangent: '+vect2string(RTCEN) ]
# ;C Computes now the quadric coefficient with the mirror center
# ;C located at (0,0,0) and normal along (0,0,1)
# ;C
A = 1 / AXMIN**2
B = 1 / AXMAJ**2
C = A
self.ccc[0] = A
self.ccc[1] = B * RTCEN[2-1]**2 + C * RTCEN[3-1]**2
self.ccc[2] = B * RNCEN[2-1]**2 + C * RNCEN[3-1]**2
self.ccc[3] = 0.0
self.ccc[4] = 2 * (B * RNCEN[2-1] * RTCEN[2-1] + C * RNCEN[3-1] * RTCEN[3-1])
self.ccc[5] = 0.0
self.ccc[6] = 0.0
self.ccc[7] = 0.0
self.ccc[8] = 2 * (B * YCEN * RNCEN[2-1] + C * ZCEN * RNCEN[3-1])
self.ccc[9] = 0.0
def set_paraboloid_from_focal_distance(self, SSOUR, SIMAG, theta_grazing, infinity_location):
# ;C
# ;C Computes the parabola
# ;C
theta = (numpy.pi/2) - theta_grazing
COSTHE = numpy.cos(theta)
SINTHE = numpy.sin(theta)
if infinity_location=="q":
PARAM = 2 * SSOUR * COSTHE**2
YCEN = -SSOUR * SINTHE**2
ZCEN = -2 * SSOUR * SINTHE * COSTHE
fact = -1.0
elif infinity_location == "p":
PARAM = 2 * SIMAG * COSTHE**2
YCEN = - SIMAG * SINTHE**2
ZCEN = - 2 * SIMAG * SINTHE * COSTHE
fact = 1.0
# txt = [txt, $
# String('Parabolois p: ', $
# PARAM, Format='(A40,G20.15)')]
self.ccc[0] = 1.0
self.ccc[1] = COSTHE**2
self.ccc[2] = SINTHE**2
self.ccc[3] = 0.0
self.ccc[4] = 2 * fact * COSTHE * SINTHE
self.ccc[5] = 0.0
self.ccc[6] = 0.0
self.ccc[7] = 0.0
self.ccc[8] = 2 * ZCEN * SINTHE - 2 * PARAM * COSTHE
self.ccc[9] = 0.0
def set_hyperboloid_from_focal_distances(self, SSOUR, SIMAG, theta_grazing):
theta = (numpy.pi/2) - theta_grazing
COSTHE = numpy.cos(theta)
SINTHE = numpy.sin(theta)
AXMAJ = (SSOUR - SIMAG)/2
# ;C
# ;C If AXMAJ > 0, then we are on the left branch of the hyp. Else we
# ;C are onto the right one. We have to discriminate between the two cases
# ;C In particular, if AXMAJ.LT.0 then the hiperb. will be convex.
# ;C
AFOCI = 0.5 * numpy.sqrt( SSOUR**2 + SIMAG**2 + 2 * SSOUR * SIMAG * numpy.cos(2 * theta) )
# ;; why this works better?
# ;; AFOCI = 0.5D0*SQRT( SSOUR^2 + SIMAG^2 - 2*SSOUR*SIMAG*COS(2*THETA) )
AXMIN = numpy.sqrt( AFOCI**2 - AXMAJ**2 )
ECCENT = AFOCI / numpy.abs( AXMAJ )
BRANCH = -1.0 #; branch=+1,-1
# ;C
# ;C Computes the center coordinates in the hiperbola RF.
# ;C
# ;IF AXMAJ GT 0.0D0 THEN BEGIN
# ; YCEN = ( AXMAJ - SSOUR )/ECCENT ; < 0
# ;ENDIF ELSE BEGIN
# ; YCEN = ( SSOUR - AXMAJ )/ECCENT ; > 0
# ;ENDELSE
if AXMAJ>0:
YCEN = (SSOUR - AXMAJ) / ECCENT
else:
YCEN = (SSOUR - AXMAJ) / ECCENT
#YCEN = numpy.abs( SSOUR - AXMAJ ) / ECCENT * BRANCH
ZCEN_ARG =
|
numpy.abs( YCEN**2 / AXMAJ**2 - 1.0)
|
numpy.abs
|
from __future__ import print_function
"""Neuron class for making neuron object and extracting the features."""
import numpy as np
from numpy import mean, dot, transpose
from numpy import linalg as LA
import math
from scipy.sparse import csr_matrix, lil_matrix
from builtins import str
from copy import deepcopy
import matplotlib.pyplot as plt
from numpy.linalg import inv
import sys
#np.random.seed(0)
# -*- coding: utf-8 -*-
class Neuron(object):
"""Neuron Class
This class represents the neuron by a list of `Node`s. Borrowed from swc format, each node indicates a point on the neuron. each node has parent and children (at most two children) and set of all node with their parents make a tree structure; a connected graph without loop. The Soma represents by a few nodes and one of them is called root node and it's decendent of all the nodes in the neuron (including other soma nodes). Notice that all nodes from index 0 to index of `n_soma` in the `nodes_list` are soma.
This class contains the attributes to calculate different features of the neuron. The iput file can be a swc file or the list of nodes.
all indexing for not-soma nodes (or the nodes that are very close to the soma) coming from self.nodes_list
Attributes
----------
n_soma : int
The number of the nodes that represents the soma.
n_node : int
The number of all the nodes in the neuron.
nodes_list : list of Node
The list of all the nodes in the neuron.
root : Node
The represented node of root.
location : array of shape = [3, n_node]
Three dimentional location of the nodes.
parent_index : array of shape = [n_node]
The index of the parent of each node in the nodes_list.
child_index : array of shape = [2, n_node]
first row: The index of first child of the node ( the ordering of the nodes are arrbitraty).
second row: nan if neuron is order oner and index of second child of the node if it's a branching node.
branch_order : array of shape = [n_node]
The number of children of the nodes. It can be and integer number for the root (first element) and only 0, 1 or 2 for other nodes.
ext_red_list : array of shape = [3, n_node]
first row: end points and order one nodes (for extension)
second row: end points (for removing)
third row: end point wich thier parents are order one nodes (for extension)
connection : array of shape = [n_node, n_node]
The matrix of connectivity of the nodes. The element (i,j) of the matrix is not np.nan if node i is a decendent of node j. The value at this array is the distance of j to its parent. It's useful for the calculation of the neural distance over Euclidain distance.
frustum : array of shape = [n_node] !!!NOT IMPLEMENTED!!!
The value of th fustum from the node toward its parent.
branch_order : array of shape = [n_node]
The number of children for each of nodes. Notice that for the nodes rther than root it should be 0, 1 or 2. For root it can be any integer number.
rall_ratio : array of shape = [n_node] !!!NOT IMPLEMENTED!!!
It's not nan only in branching nodes which its value is the rall ratio.
distance_from_root : array of shape = [n_node]
Euclidain distance toward the root.
distance_from_parent : array of shape = [n_node]
Euclidain distance toward the parent of the node.
slope : array of shape = [n_node]
ratio of euclidain distance toward the parent of the node over their diameter difference.
branch_angle : array of shape [3, n_nodes]
it shows the angles at the branching nodes: First row is the angle of two outward segments at the branching point Second and third rows are the angle betwen two outward segments and previous segment at the branching in arbitrary order (nan at other nodes).
angle_global : array of shape = [n_node]
The angle between the line linking the node to the root and the line likning it to its parent.
local_angle : array of shape = [n_node]
The angle between the line linking the node to its parent and its child and nan otherwise.
References
----------
.. [1] R.Farhoodi, K.P.Kording, "Generating Neuron Morphologies using naive Bayes MCMC"
"""
def __init__(self, file_format = None, input_file = None):
"""
Default constructor.
Parameters
-----------
file_format : string, optional (default=None)
- In 'swc' the swc file in given and the output is a Neuron calss
with all of its attributes.
- In 'swc without attributes' the swc file in given and the output
is a Neuron calss without its attributes. It's useful for the case
that only nodes are important, e.g. visualization of the neurpn,
in a fast way.
- In 'only list of nodes' the list of all the nodes of the neuron is
given.
- In 'Matrix of swc' a numpy array of the shape n*7 is presented,
and the Neuron will be made accordingly.
input_file : string or list
- In 'swc' it contains a .swc file
- In 'swc without attributes' it contains a .swc file
- In 'only list of nodes' it contains the list of all the nodes of
the neuron is given.
if it is string, it will read the swc file from that address,
if it is list, the elements of the list should be object from Node's class
and corrsponding Tree is created.
"""
if(file_format == 'swc'): # swc is given
self.read_swc(input_file)
self.ratio_red_to_ext = 1.
self.n_node = len(self.nodes_list)
#self.set_n_soma_n_node()
self.set_parent()
self.parent_index = self.parent_index.astype(int)
#self.set_loc_diam()
#self.fit()
if (file_format=='eswc'):
self.read_eswc(input_file)
# self.parent_indexs = int(self.parent_indexs)
nodes = self.cauclate_child_num(self.nodes_list, self.parent_indexs)
print(nodes)
if(file_format == 'swc without attributes'):
self.read_swc(input_file)
self.set_parent()
self.parent_index = self.parent_index.astype(int)
if(file_format == 'only list of nodes'):
self.nodes_list = input_file
self.root = self.find_root(self.nodes_list[0])
self.set_n_soma_n_node()
self.set_parent()
self.parent_index = self.parent_index.astype(int)
self.set_loc_diam()
self.set_location()
self.set_branch_order()
if(file_format == 'Matrix of swc'):
# the n*7 array is given.
self.read_swc_matrix(input_file)
self.set_parent()
self.parent_index = self.parent_index.astype(int) # transform data type -->int
self.set_branch_order()
self.set_distance_from_parent() # accumlate child from parent Elucid distance
#self.set_sholl()
def __str__(self):
"""
describtion.
"""
return "Neuron found with " + str(self.n_node) + " number of nodes and"+ str(self.n_soma) + "number of node representing soma."
def fit(self):
"""
dependency:
self.nodes_list
self.n_soma
self.location
self.diameter
self.parent_index
self.child_index
self.root
self.n_nodes
output attributes are:
self.branch_order
self.connection
self.ext_red_list
self.rall_ratio
self.distance_from_root
self.distance_from_parent
self.branch_angle
self.global_angle
self.local_angle
self.frustum
"""
self.set_branch_order()
self.set_distance_from_root()
self.set_distance_from_parent()
# self.set_connection2() ----not used-------
#self.set_rall_ratio()
self.set_branch_angle()
self.set_global_angle()
self.set_local_angle()
#self.sholl_r = np.array([0]) # the position of the jumps for sholl analysis
#self.sholl_n = np.array([0]) # the value at the jumping (the same size as self.sholl_x)
#self.set_sholl()
self.ratio_red_to_ext = 1.
self.set_ext_red_list()
# self.set_features() -----not used---------
def set_n_soma_n_node(self):
self.n_soma = 0
for n in self.nodes_list:
if n.type is 'soma':
self.n_soma += 1
if(self.n_soma == 0): # for no soma representation
self.n_soma = 1
self.n_node = len(self.nodes_list)
def set_features(self):
self.features = {}
self.features['Nnodes'] = np.array([self.n_node - self.n_soma])
#self.features['asymetric']
(num_branches,) = np.where(self.branch_order[self.n_soma:] == 2)
self.features['Nbranch'] = np.array([len(num_branches)])
self.features['initial_segments'] = np.array([len(self.root.children)])
self.features['global_angle'] = np.pi - self.global_angle[self.n_soma:]
#self.features['diameter'] = self.diameter[self.n_soma:]
self.features['distance_from_parent'] = self.distance_from_parent[self.n_soma:]
self.features['distance_from_root'] = self.distance_from_root[self.n_soma:]
self.features['ratio_euclidian_neuronal'] = np.nansum(self.connection[self.n_soma:,self.n_soma:],axis = 1)/self.distance_from_root[self.n_soma:]
x = np.abs(self.branch_angle[0,self.n_soma:])
self.features['branch_angle'] = x[~np.isnan(x)]
x = self.local_angle[self.n_soma:]
self.features['local_angle'] = x[~np.isnan(x)]
if(len(self.features['local_angle'])==0):
self.features['local_angle'] = np.array([0])
if(len(self.features['branch_angle']) == 0):
self.features['branch_angle'] = np.array([0])
self.features['discrepancy_space'] = np.array([self.discrepancy(10.,10.,10.)])
#self.features['repellent'] = np.array([self.repellent(10.,10.,10.)])
self.features['curvature'] = self.set_curvature()
important_node = self.get_index_main_nodes()
parent_important = self.parent_index_for_node_subset(important_node)
(neural, euclidan) = self.get_neural_and_euclid_lenght(important_node, parent_important)
self.features['neural_important'] = neural
self.features['euclidian_important'] = euclidan
self.features['ratio_neural_euclidian_important'] = neural/euclidan
self.features['branch_angle_segment'] = self.set_branch_angle_segment(important_node, parent_important)
def get_index_main_nodes(self):
"""
Returing the index of end points and branching points.
Returns
-------
important_node: numpy array
the index of main points.
"""
(branch_index, ) = np.where(self.branch_order[self.n_soma:] == 2)
(end_nodes, ) = np.where(self.branch_order[self.n_soma:] == 0)
important_node =
|
np.append(branch_index, end_nodes)
|
numpy.append
|
#!/usr/local/bin/python
#
# sound-card APRS decoder
#
# <NAME>, AB1HL
#
import numpy
import wave
import weakaudio
import weakutil
import time
import scipy
import sys
import os
import math
from scipy.signal import lfilter, filtfilt
import numpy.lib.stride_tricks
# optimizable tuning parameters.
smoothwindow = 2.0 # symbols, 1.0 0.8 0.7 1.7(for hamming smoother)
slicewindow = 25.0 # symbols, 20 30 20
tonegain = 2.0 # 2.0 (useful for track 02)
advance = 8.0 # symbols 1.0 8.0
# http://gordoncluster.wordpress.com/2014/02/13/python-numpy-how-to-generate-moving-averages-efficiently-part-2/
def smooth(values, window):
#weights = numpy.repeat(1.0, window)/window
weights = numpy.hamming(window)
sma =
|
numpy.convolve(values, weights, 'valid')
|
numpy.convolve
|
import numpy as np
from .get_number_default_boxes import get_number_default_boxes
from utils.bbox_utils import center_to_corner, corner_to_center
def generate_default_boxes_for_feature_map(
feature_map_size,
image_size,
offset,
scale,
next_scale,
aspect_ratios,
variances,
extra_box_for_ar_1,
clip_boxes=True,
):
""" Generates a 4D Tensor representing default boxes.
Note:
- The structure of a default box is [xmin, ymin, xmax, ymax]
Args:
- feature_map_size: The size of the feature map. (must be square)
- image_size: The size of the input image. (must be square)
- offset: The offset for the center of the default boxes. The order is (offset_x, offset_y)
- scale: The current scale of the default boxes.
- next_scale: The next scale of the default boxes.
- aspect_ratios: A list of aspect ratios representing the default boxes.
- variance: ...
- extra_box_for_ar_1: Whether to add an extra box for default box with aspect ratio 1.
Returns:
- A 4D numpy array of shape (feature_map_size, feature_map_size, num_default_boxes, 8)
Raises:
- offset does not have a len of 2
Code References:
- https://github.com/pierluigiferrari/ssd_keras/blob/master/keras_layers/keras_layer_AnchorBoxes.py
"""
assert len(offset) == 2, "offset must be of len 2"
grid_size = image_size / feature_map_size
offset_x, offset_y = offset
num_default_boxes = get_number_default_boxes(
aspect_ratios,
extra_box_for_ar_1=extra_box_for_ar_1
)
# get all width and height of default boxes
wh_list = []
for ar in aspect_ratios:
if ar == 1.0 and extra_box_for_ar_1:
wh_list.append([
image_size * np.sqrt(scale * next_scale) * np.sqrt(ar),
image_size * np.sqrt(scale * next_scale) * (1 / np.sqrt(ar)),
])
wh_list.append([
image_size * scale * np.sqrt(ar),
image_size * scale * (1 / np.sqrt(ar)),
])
wh_list = np.array(wh_list, dtype=np.float)
# get all center points of each grid cells
cx =
|
np.linspace(offset_x * grid_size, image_size - (offset_x * grid_size), feature_map_size)
|
numpy.linspace
|
"""Class for calculating the strain energy of ellipsoidal inclusions."""
import numpy as np
from cemc_cpp_code import PyEshelbyTensor, PyEshelbySphere
from cemc.tools import rot_matrix, rotate_tensor, to_mandel, to_full_tensor
from cemc.tools import rot_matrix_spherical_coordinates
from cemc.tools import rotate_rank4_mandel
from itertools import product
from scipy.optimize import minimize
class StrainEnergy(object):
"""Class for calculating strain energy of ellipsoidal inclusions.
:param list aspect: Aspect ratio of the ellipsoid.
NOTE: The convention aspect[0] >= aspect[1] >= aspect[2]
is used. If the ellipsoid is oriented in a different way,
it has to be rotated after.
:param misfit: Misfit strain of the inclusion
:type misfit: 3x3 ndarray or list of length 6 (Mandel notation)
:param float poisson: Poisson ratio
"""
def __init__(self, aspect=[1.0, 1.0, 1.0],
misfit=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], poisson=0.3):
"""Initialize Strain energy class."""
aspect = np.array(aspect)
self.eshelby = StrainEnergy.get_eshelby(aspect, poisson)
self.misfit = np.array(misfit)
if len(self.misfit.shape) == 2:
self.misfit = to_mandel(self.misfit)
self.poisson = poisson
@staticmethod
def get_eshelby(aspect, poisson):
"""Return the Eshelby tensor.
:param float poisson: Poisson ratio
"""
tol = 1E-6
if np.all(np.abs(aspect-aspect[0]) < tol):
eshelby = PyEshelbySphere(aspect[0], aspect[0], aspect[0], poisson)
else:
eshelby = PyEshelbyTensor(aspect[0], aspect[1], aspect[2],
poisson)
return eshelby
def make_isotropic(self, C):
"""Convert the elastic tensor to an isotropic tensor by
averaging.
:param np.ndarray C: Elastic tensor in Mandel format
"""
# Calculate Bulk and shear modulus according to
# https://wiki.materialsproject.org/Elasticity_calculations
B = C[0, 0] + C[1, 1] + C[2, 2] + \
2*(C[0, 1] + C[1, 2] + C[0, 2])
B /= 9.0
# NOTE: 0.5 on the last line is because the tensor is assumed
# by given by its Mandel representation and the webpage listed
# above assumes Voigt representation
G = C[0, 0] + C[1, 1] + C[2, 2] - \
(C[0, 1] + C[1, 2] + C[0, 2]) + \
3*0.5*(C[3, 3] + C[4, 4] + C[5, 5])
G /= 15.0
print(B, G)
isotropic = np.zeros((6, 6))
isotropic[0, 0] = isotropic[1, 1] = isotropic[2, 2] = B + 4*G/3
isotropic[0, 1] = isotropic[0, 2] = \
isotropic[1, 0] = isotropic[1, 2] = \
isotropic[2, 0] = isotropic[2, 1] = B - 2*G/3
isotropic[3, 3] = isotropic[4, 4] = isotropic[5, 5] = \
2*G
return isotropic
def _check_ellipsoid(self, ellipsoid):
"""Check that the ellipsoid arguments is correct.
:param dict ellipsoid: Dictionary describing the ellipsoid
"""
required_keys = ["aspect"]
for key in ellipsoid.keys():
if key not in required_keys:
msg = "The ellipsoid dictionary has to "
msg += "include {}".format(required_keys)
raise ValueError(msg)
if len(ellipsoid["aspect"]) != 3:
raise ValueError("aspect ratio should be a list/array of length 3")
def equivalent_eigenstrain(self, C_matrix=None, C_prec=None,
scale_factor=None):
"""Compute the equivalent eigenstrain.
:param ndarray C_matrix: 6x6 elastic tensor of the matrix material
:param ndarray C_prec: 6x6 elastic tensor of the inclusion
:param float scale_factor: The elastic tensor of the inclustion is assumed to
be scale_factor*elast_matrix
"""
if C_matrix is None:
raise ValueError("Elastic tensor for the matrix material "
"must be passed!")
if C_prec is None and scale_factor is not None:
C_prec = scale_factor*C_matrix
if C_prec is None:
raise ValueError("Elastic tensor or a scale factor for "
"the precipitating material must be "
"passed")
S = np.array(self.eshelby.aslist())
A = (C_prec - C_matrix).dot(S) + C_matrix
b = C_prec.dot(self.misfit)
return np.linalg.solve(A, b)
def stress(self, equiv_strain, C_matrix=None):
"""Compute the stress tensor.
:param list equiv_strain: Equivalent eigenstrain in Mandel notation
:param C_matrix: 6x6 elastic tensor of the matrix material (Mandel)
"""
S = np.array(self.eshelby.aslist())
sigma = C_matrix.dot(S.dot(equiv_strain) - equiv_strain)
return sigma
def strain_energy(self, C_matrix=None, C_prec=None,
scale_factor=None):
"""Compute the strain energy per volume.
:param ndarray C_matrix: 6x6 elastic tensor of the matrix material
:param ndarray C_prec: 6x6 elastic tensor of the precipitate material
:param float scale_factor: If given and C_prec=None,
C_pref = scale_factor*C_matrix
"""
eq_strain = self.equivalent_eigenstrain(
C_matrix=C_matrix, C_prec=C_prec,
scale_factor=scale_factor)
sigma = self.stress(eq_strain, C_matrix)
# Off diagonal elements should be multiplied by sqrt(2)
strain = self.misfit.copy()
return -0.5*sigma.dot(strain)
def is_isotropic(self, matrix, mat_type="mandel"):
"""Check tensor represent an isotropic material."""
factor = 1.0
if mat_type == "mandel":
factor = 2.0
shear = matrix[3, 3]/factor
if not np.allclose(np.diag(matrix)[3:]/factor, shear):
return False
if not np.allclose(np.diag(matrix)[:3], matrix[0, 0]):
return False
if not np.allclose(matrix[:3, 3:], 0.0):
return False
if not np.allclose(matrix[3:, :3], 0.0):
return False
# Check that all off diagonal elements in the uppder
# 3x3 are the same
for indx in product([0, 1, 2], repeat=2):
if indx[0] == indx[1]:
continue
if abs(matrix[indx[0], indx[1]] - matrix[0, 1]) > 1E-4:
return False
# At this point we know that the material is
# isotropic. Just apply one final consistency check
# bulk_mod = matrix[0, 0] - 4.0*shear/3.0
# expected = bulk_mod - 2.0*shear/3.0
# print(matrix[0,1 ], factor*expected)
# assert abs(matrix[0, 1] - factor*expected) < 1E-6
return True
def explore_orientations(self, ellipsoid, C_matrix, step=10,
fname="", theta_ax="y", phi_ax="z"):
"""Explore the strain energy as a function of ellipse orientation.
:param dict ellipsoid: Dictionary with information of the ellipsoid
The format should be {"aspect": [1.0, 1.0, 1.0], "C_prec": ...,
"scale_factor": 1.0}
C_prec is the elastic tensor of the precipitate material.
If not given, scale_factor has to be given, and the elastic
tensor of the precipitate material is taken as this factor
multiplied by the elastic tensor of the matrix material
:param numpy.ndarray C_matrix: Elastic tensor of the matrix material
:param float step: Angle step size in degree
:param str fname: If given the result of the exploration is
stored in a csv file with this filename
:param str theta_ax: The first rotation is performed
around this axis
:param str phi_ax: The second rotation is performed around this
axis (in the new coordinate system after the first rotation
is performed)
"""
from itertools import product
#self._check_ellipsoid(ellipsoid)
scale_factor = ellipsoid.get("scale_factor", None)
C_prec = ellipsoid.get("C_prec", None)
if C_prec is None:
C_prec = scale_factor*C_matrix
# Convert the tensors to their isotropic representation
C_matrix = self.make_isotropic(C_matrix)
C_prec = self.make_isotropic(C_prec)
aspect = np.array(ellipsoid["aspect"])
self.eshelby = StrainEnergy.get_eshelby(aspect, self.poisson)
result = []
misfit_orig = to_full_tensor(self.misfit)
theta = np.arange(0.0, np.pi, step*np.pi / 180.0)
phi = np.arange(0.0, 2.0 * np.pi, step * np.pi / 180.0)
theta = np.append(theta, [np.pi])
phi = np.append(phi, [2.0*np.pi])
C_matrix_orig = C_matrix.copy()
C_prec_orig = C_prec.copy()
for ang in product(theta, phi):
th = ang[0]
p = ang[1]
theta_deg = th*180/np.pi
phi_deg = p*180/np.pi
seq = [(theta_ax, -theta_deg), (phi_ax, -phi_deg)]
matrix = rot_matrix(seq)
#matrix = rot_matrix_spherical_coordinates(p, th)
# Rotate the strain tensor
strain = rotate_tensor(misfit_orig, matrix)
self.misfit = to_mandel(strain)
# Rotate the elastic tensor of the matrix material
C_matrix = rotate_rank4_mandel(C_matrix_orig, matrix)
# Rotate the elastic tensor of the precipitate material
C_prec = rotate_rank4_mandel(C_prec_orig, matrix)
if abs(p) < 1E-3 and (abs(th-np.pi/4.0) < 1E-3 or abs(th-3.0*np.pi/4.0) < 1E-3):
print(self.eshelby.aslist())
energy = self.strain_energy(C_matrix=C_matrix, C_prec=C_prec)
res = {"energy": energy, "theta": th, "phi": p}
a = matrix.T.dot([1.0, 0.0, 0.0])
b = matrix.T.dot([0.0, 1.0, 0.0])
c = matrix.T.dot([0.0, 0.0, 1.0])
res["half_axes"] = {"a": a, "b": b, "c": c}
res["misfit"] = self.misfit
result.append(res)
if fname != "":
self.save_orientation_result(result, fname)
# Sort the result from low energy to high energy
energies = [res["energy"] for res in result]
sorted_indx = np.argsort(energies)
result = [result[indx] for indx in sorted_indx]
# Reset the strain
self.misfit = to_mandel(misfit_orig)
return result
def save_orientation_result(self, result, fname):
"""Store the orientation result.
:param list result: List with result from exploration
each item is a dictionary with the containing keys
theta, phi and energy.
:param str fname: Filename (csv-file)
"""
theta = [res["theta"] for res in result]
phi = [res["phi"] for res in result]
energy = [res["energy"] for res in result]
data = np.vstack((theta, phi, energy)).T
np.savetxt(fname, data, header="Polar angle, Azm. angle, Energy",
delimiter=",")
print("Orientation results written to {}".format(fname))
def log(self, msg):
"""Log message to screen."""
print(msg)
def plot_explore_result(self, explore_result, latex=False):
"""Plot a diagonistic plot over the exploration result."""
from matplotlib import pyplot as plt
from scipy.interpolate import griddata
energy = []
phi = []
theta = []
for res in explore_result:
energy.append(res["energy"]*1000.0)
phi.append(res["phi"])
theta.append(res["theta"])
th_fine = np.linspace(0.0, np.pi, 90)
phi_fine = np.linspace(0.0, 2.0*np.pi, 90)
phi_min = np.min(phi_fine) * 180.0 / np.pi
phi_max = np.max(phi_fine) * 180.0 / np.pi
theta_min = np.min(th_fine) * 180.0 / np.pi
theta_max = np.max(th_fine) * 180.0 / np.pi
# Create plot with griddata
data =
|
np.vstack((theta, phi))
|
numpy.vstack
|
"""
======================================
Generalized hyperfine component fitter
======================================
.. moduleauthor:: <NAME> <<EMAIL>>
Module API
^^^^^^^^^^
"""
import numpy as np
from astropy import units as u
from astropy import constants
import copy
from . import model
from . import fitter
# should be imported in the future
ckms = 2.99792458e5
hoverk = (constants.h.cgs/constants.k_B.cgs).value
class hyperfinemodel(object):
"""
Wrapper for the hyperfine model class. Specify the offsets and relative
strengths when initializing, then you've got yourself a hyperfine modeler.
There are a wide variety of different fitter attributes, each designed to
free a different subset of the parameters. Their purposes should be
evident from their names.
"""
def __init__(self, line_names, voff_lines_dict, freq_dict,
line_strength_dict, relative_strength_total_degeneracy):
"""
Initialize the various parameters defining the hyperfine transitions
Parameters
----------
line_names: list
list of the line names to be used as indices for the dictionaries
voff_lines_dict: dict
a linename:v_off dictionary of velocity offsets for the hyperfine
components. Technically, this is redundant with freq_dict
freq_dict: dict
frequencies of the indvidual transitions
line_strength_dict: dict
Relative strengths of the hyperfine components, usually determined
by their degeneracy and Einstein A coefficients
"""
self.line_names = tuple(line_names)
self.voff_lines_dict = voff_lines_dict
self.freq_dict = freq_dict
self.line_strength_dict = line_strength_dict
self.relative_strength_total_degeneracy = relative_strength_total_degeneracy
self.fitter = model.SpectralModel(self,4,
parnames=['Tex','tau','center','width'],
parlimited=[(False,False), (True,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
# specify the parameter names (LaTeX is OK)
shortvarnames=("T_{ex}","\\tau","v","\\sigma"),
guess_types=['amplitude+2.73', 1.0, 'center', 'width'],
fitunit='Hz')
self.nlines = len(line_names)
self.varyhf_fitter = model.SpectralModel(self.hyperfine_varyhf,3+self.nlines,
parnames=['Tex','center','width']+['tau%s' % k for k in self.line_names],
parlimited=[(False,False), (False,False), (True,False)]
+ [(True,False),]*self.nlines,
parlimits=[(0,0), (0,0), (0,0)]+[(0,0),]*self.nlines,
shortvarnames=("T_{ex}","v","\\sigma") +
tuple(("\\tau(\\mathrm{%s})" % k for k in self.line_names)),
fitunit='Hz')
self.varyhf_amp_fitter = model.SpectralModel(self.hyperfine_varyhf_amp, 2+self.nlines,
parnames=['center','width']+['amp%s' % k for k in self.line_names],
parlimited=[(False,False), (True,False)] + [(True,False),]*self.nlines,
parlimits=[(0,0), (0,0)]+[(0,0),]*self.nlines,
shortvarnames=("v","\\sigma") +
tuple(("amp(\\mathrm{%s})" % k for k in self.line_names)),
fitunit='Hz')
self.varyhf_amp_width_fitter = model.SpectralModel(self.hyperfine_varyhf_amp_width,1+self.nlines*2,
parnames=['center']+['amp%s' % k for k in self.line_names]+['width%s' % k for k in self.line_names],
parlimited=[(False,False)] + [(True,False),]*self.nlines + [(True,False)]*self.nlines,
parlimits=[(0,0)]+[(0,0),]*self.nlines*2,
shortvarnames=(("v",) +
tuple(("amp(\\mathrm{%s})" % k for k in self.line_names)) +
tuple(("\\sigma(\\mathrm{%s})" % k for k in self.line_names))),
# specify the parameter names (TeX is OK)
fitunit='Hz' )
self.vheight_fitter = model.SpectralModel(fitter.vheightmodel(self),5,
parnames=['height','Tex','tau','center','width'],
parlimited=[(False,False), (False,False), (True,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","T_{ex}","\\tau","v","\\sigma"), # specify the parameter names (TeX is OK)
guess_types=[0.0, 'amplitude+2.73', 1.0, 'center', 'width'],
fitunit='Hz' )
self.background_fitter = model.SpectralModel(self.hyperfine_addbackground,5,
parnames=['Tbackground','Tex','tau','center','width'],
parlimited=[(True,False), (False,False), (False,False), (True,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0), (0,0), (0,0)],
shortvarnames=('T_{BG}',"T_{ex}","\\tau","v","\\sigma"), # specify the parameter names (TeX is OK)
guess_types=[2.73, 'amplitude+2.73', 1.0, 'center', 'width'],
fitunit='Hz')
self.background_contsub_fitter = model.SpectralModel(self.hyperfine_background,5,
parnames=['Tbackground','Tex','tau','center','width'],
parlimited=[(True,False), (False,False), (False,False), (True,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0), (0,0), (0,0)],
shortvarnames=('T_{BG}',"T_{ex}","\\tau","v","\\sigma"), # specify the parameter names (TeX is OK)
guess_types=[0.0, 'amplitude+2.73', 1.0, 'center', 'width'],
fitunit='Hz')
self.ampfitter = model.SpectralModel(self.hyperfine_amp,3,
parnames=['amp','center','width'],
parlimited=[(False,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("amp","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
self.taufitter = model.SpectralModel(self.hyperfine_tau,3,
parnames=['tau','center','width'],
parlimited=[(True,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=(r'\tau',"v","\\sigma"), # specify the parameter names (TeX is OK)
guess_types=[1.0, 'center', 'width'],
fitunit='Hz')
self.totaltaufitter = model.SpectralModel(self.hyperfine_tau_total,3,
parnames=['tau','center','width'],
parlimited=[(True,False), (False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=(r'\tau',"v","\\sigma"), # specify the parameter names (TeX is OK)
guess_types=[1.0, 'center', 'width'],
fitunit='Hz')
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
# A deep copy of the hyperfine model is OK to just do regular copies of
# all attributes, since none of them are meant to be modified
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __call__(self, *args, **kwargs):
"""
Generate a model spectrum given an excitation temperature, optical depth, offset velocity, and velocity width.
"""
return self.hyperfine(*args,**kwargs)
def hyperfine_amp(self, xarr, amp=None, xoff_v=0.0, width=1.0,
return_hyperfine_components=False, Tbackground=2.73,
Tex=5.0, tau=0.1):
"""
wrapper of self.hyperfine with order of arguments changed
"""
return self.hyperfine(xarr, amp=amp, Tex=Tex, tau=tau, xoff_v=xoff_v,
width=width, return_hyperfine_components=return_hyperfine_components,
Tbackground=Tbackground)
def hyperfine_tau(self, xarr, tau, xoff_v, width, **kwargs):
""" same as hyperfine, but with arguments in a different order, AND
tau is returned instead of exp(-tau)"""
return self.hyperfine(xarr, tau=tau, xoff_v=xoff_v, width=width,
return_tau=True, **kwargs)
def hyperfine_tau_total(self, xarr, tau_total, xoff_v, width, **kwargs):
""" same as hyperfine, but with arguments in a different order, AND
tau is returned instead of exp(-tau), AND the *peak* tau is used"""
return self.hyperfine(xarr, tau_total=tau_total, xoff_v=xoff_v, width=width,
return_tau=True, **kwargs)
def hyperfine_varyhf(self, xarr, Tex, xoff_v, width, *args, **kwargs):
""" Wrapper of hyperfine for using a variable number of peaks with specified
tau """
return self.hyperfine(xarr, Tex=Tex, xoff_v=xoff_v, width=width,
tau=dict(zip(self.line_names,args)),
vary_hyperfine_tau=True, **kwargs)
def hyperfine_varyhf_amp(self, xarr, xoff_v, width, *args, **kwargs):
""" Wrapper of hyperfine for using a variable number of peaks with specified
amplitude (rather than tau). Uses some opaque tricks: Tex is basically ignored,
and return_tau means you're actually returning the amplitude,
which is just passed in as tau"""
return self.hyperfine(xarr, xoff_v=xoff_v, width=width,
tau=dict(zip(self.line_names,args)),
vary_hyperfine_tau=True,
return_tau=True, **kwargs)
def hyperfine_varyhf_amp_width(self, xarr, xoff_v, *args, **kwargs):
""" Wrapper of hyperfine for using a variable number of peaks with specified
amplitude (rather than tau). Uses some opaque tricks: Tex is basically ignored,
and return_tau means you're actually returning the amplitude,
which is just passed in as tau"""
if len(args) % 2 != 0:
raise ValueError("Incorrect number of arguments for varying amplitude"
" and width. Need N amplitudes, N widths.")
nargs = int(len(args)/2)
return self.hyperfine(xarr, xoff_v=xoff_v,
tau=dict(zip(self.line_names,args[:nargs])),
width=dict(zip(self.line_names,args[nargs:])),
vary_hyperfine_tau=True,
vary_hyperfine_width=True,
return_tau=True, **kwargs)
def hyperfine_addbackground(self, xarr, Tbackground=2.73, Tex=5.0, tau=0.1,
xoff_v=0.0, width=1.0, return_tau=False,
**kwargs):
"""
Identical to hyperfine, but adds Tbackground as a constant continuum
level
"""
if return_tau:
raise ValueError("Cannot return tau when adding a continuum background.")
return (self.hyperfine(xarr, Tbackground=Tbackground, Tex=Tex, tau=tau,
xoff_v=xoff_v, width=width, return_tau=False,
**kwargs)
+ Tbackground)
def hyperfine_background(self, xarr, Tbackground=2.73, Tex=5.0, tau=0.1,
xoff_v=0.0, width=1.0, return_tau=False,
**kwargs):
"""
Identical to hyperfine, but with Tbackground free. Assumes already
background-subtracted
"""
if return_tau:
raise ValueError("Cannot return tau when adding a continuum background.")
return self.hyperfine(xarr, Tbackground=Tbackground, Tex=Tex, tau=tau,
xoff_v=xoff_v, width=width, return_tau=False,
**kwargs)
def hyperfine(self, xarr, Tex=5.0, tau=0.1, xoff_v=0.0, width=1.0,
return_hyperfine_components=False, Tbackground=2.73, amp=None,
return_tau=False, tau_total=None, vary_hyperfine_tau=False,
vary_hyperfine_width=False):
"""
Generate a model spectrum given an excitation temperature, optical
depth, offset velocity, and velocity width.
Parameters
----------
return_tau : bool
If specified, return just the tau spectrum, ignoring Tex
tau_total : bool
If specified, use this *instead of tau*, and it tries to normalize
to the *peak of the line*
vary_hyperfine_tau : bool
If set to true, allows the hyperfine transition amplitudes to vary and
does not use the line_strength_dict. If set, `tau` must be a dict
"""
# Convert X-units to frequency in Hz
try:
xarr = xarr.as_unit('Hz').value
except AttributeError:
xarr = xarr.to('Hz').value
# Ensure parameters are scalar / have no extra dims
if not np.isscalar(Tex): Tex = Tex.squeeze()
if not np.isscalar(xoff_v): xoff_v = xoff_v.squeeze()
if vary_hyperfine_width:
if not isinstance(width, dict):
raise TypeError("If varying the amplitude of the hyperfine lines, must specify tau as a dict")
else:
if not np.isscalar(width): width = width.squeeze()
if vary_hyperfine_tau:
if not isinstance(tau, dict):
raise TypeError("If varying the amplitude of the hyperfine lines, must specify tau as a dict")
else:
if not np.isscalar(tau): tau = tau.squeeze()
# Generate an optical depth spectrum as a function of the X-axis
tau_nu_cumul = np.zeros(len(xarr))
# Error check: inputing NANs results in meaningless output - return without computing a model
if (np.any(np.isnan((Tex,xoff_v))) or
((not vary_hyperfine_tau) and np.isnan(tau)) or
((not vary_hyperfine_width) and np.isnan(width))):
if return_hyperfine_components:
return [tau_nu_cumul] * len(self.line_names)
else:
return tau_nu_cumul
if tau_total is not None:
tau = 1
components =[]
for linename in self.line_names:
voff_lines =
|
np.array(self.voff_lines_dict[linename])
|
numpy.array
|
import numpy as np
import xarray as xr
from ark.segmentation import signal_extraction
from ark.utils import synthetic_spatial_datagen
from skimage.measure import regionprops
def test_positive_pixels_extraction():
# sample params
size_img = (1024, 1024)
cell_radius = 10
nuc_radius = 3
memb_thickness = 5
nuc_signal_strength = 10
memb_signal_strength = 100
nuc_uncertainty_length = 0
memb_uncertainty_length = 0
# generate sample segmentation mask and channel data
sample_segmentation_mask, sample_channel_data = \
synthetic_spatial_datagen.generate_two_cell_chan_data(
size_img=size_img,
cell_radius=cell_radius,
nuc_radius=nuc_radius,
memb_thickness=memb_thickness,
nuc_signal_strength=nuc_signal_strength,
memb_signal_strength=memb_signal_strength,
nuc_uncertainty_length=nuc_uncertainty_length,
memb_uncertainty_length=memb_uncertainty_length
)
# extract the cell regions for cells 1 and 2
coords_1 = np.argwhere(sample_segmentation_mask == 1)
coords_2 = np.argwhere(sample_segmentation_mask == 2)
# test default extraction (threshold == 0)
channel_counts_1 = signal_extraction.positive_pixels_extraction(
cell_coords=coords_1,
image_data=xr.DataArray(sample_channel_data)
)
channel_counts_2 = signal_extraction.positive_pixels_extraction(
cell_coords=coords_2,
image_data=xr.DataArray(sample_channel_data)
)
# test signal counts for different channels
assert np.all(channel_counts_1 == [25, 0])
assert np.all(channel_counts_2 == [0, 236])
# test with new threshold == 10
kwargs = {'threshold': 10}
channel_counts_1 = signal_extraction.positive_pixels_extraction(
cell_coords=coords_1,
image_data=xr.DataArray(sample_channel_data),
**kwargs
)
channel_counts_2 = signal_extraction.positive_pixels_extraction(
cell_coords=coords_2,
image_data=xr.DataArray(sample_channel_data),
**kwargs
)
assert np.all(channel_counts_1 == [0, 0])
assert np.all(channel_counts_2 == [0, 236])
# test for multichannel thresholds
kwargs = {'threshold': np.array([0, 10])}
channel_counts_1 = signal_extraction.positive_pixels_extraction(
cell_coords=coords_1,
image_data=xr.DataArray(sample_channel_data),
**kwargs
)
channel_counts_2 = signal_extraction.positive_pixels_extraction(
cell_coords=coords_2,
image_data=xr.DataArray(sample_channel_data),
**kwargs
)
assert np.all(channel_counts_1 == [25, 0])
assert np.all(channel_counts_2 == [0, 236])
def test_center_weighting_extraction():
# sample params
size_img = (1024, 1024)
cell_radius = 10
nuc_radius = 3
memb_thickness = 5
nuc_signal_strength = 10
memb_signal_strength = 10
nuc_uncertainty_length = 1
memb_uncertainty_length = 1
# generate sample segmentation mask and channel data
sample_segmentation_mask, sample_channel_data = \
synthetic_spatial_datagen.generate_two_cell_chan_data(
size_img=size_img,
cell_radius=cell_radius,
nuc_radius=nuc_radius,
memb_thickness=memb_thickness,
nuc_signal_strength=nuc_signal_strength,
memb_signal_strength=memb_signal_strength,
nuc_uncertainty_length=nuc_uncertainty_length,
memb_uncertainty_length=memb_uncertainty_length
)
# extract the cell regions for cells 1 and 2
coords_1 = np.argwhere(sample_segmentation_mask == 1)
coords_2 = np.argwhere(sample_segmentation_mask == 2)
# extract the centroids and coords
region_info = regionprops(sample_segmentation_mask.astype(np.int16))
kwarg_1 = {'centroid': region_info[0].centroid}
kwarg_2 = {'centroid': region_info[1].centroid}
coords_1 = region_info[0].coords
coords_2 = region_info[1].coords
channel_counts_1_center_weight = signal_extraction.center_weighting_extraction(
cell_coords=coords_1,
image_data=xr.DataArray(sample_channel_data),
**kwarg_1
)
channel_counts_2_center_weight = signal_extraction.center_weighting_extraction(
cell_coords=coords_2,
image_data=xr.DataArray(sample_channel_data),
**kwarg_2
)
channel_counts_1_base_weight = signal_extraction.total_intensity_extraction(
cell_coords=coords_1,
image_data=xr.DataArray(sample_channel_data)
)
channel_counts_2_base_weight = signal_extraction.total_intensity_extraction(
cell_coords=coords_2,
image_data=xr.DataArray(sample_channel_data)
)
# cell 1 and cell 2 nuclear signal should be lower for weighted than default
assert channel_counts_1_center_weight[0] < channel_counts_1_base_weight[0]
assert channel_counts_2_center_weight[1] < channel_counts_2_base_weight[1]
# assert effect of "bleeding" membrane signal is less with weighted than default
assert channel_counts_1_center_weight[1] < channel_counts_1_base_weight[1]
def test_total_intensity_extraction():
# sample params
size_img = (1024, 1024)
cell_radius = 10
nuc_radius = 3
memb_thickness = 5
nuc_signal_strength = 10
memb_signal_strength = 10
nuc_uncertainty_length = 0
memb_uncertainty_length = 0
# generate sample segmentation mask and channel data
sample_segmentation_mask, sample_channel_data = \
synthetic_spatial_datagen.generate_two_cell_chan_data(
size_img=size_img,
cell_radius=cell_radius,
nuc_radius=nuc_radius,
memb_thickness=memb_thickness,
nuc_signal_strength=nuc_signal_strength,
memb_signal_strength=memb_signal_strength,
nuc_uncertainty_length=nuc_uncertainty_length,
memb_uncertainty_length=memb_uncertainty_length
)
# extract the cell regions for cells 1 and 2
coords_1 = np.argwhere(sample_segmentation_mask == 1)
coords_2 = np.argwhere(sample_segmentation_mask == 2)
channel_counts_1 = signal_extraction.total_intensity_extraction(
cell_coords=coords_1,
image_data=xr.DataArray(sample_channel_data)
)
channel_counts_2 = signal_extraction.total_intensity_extraction(
cell_coords=coords_2,
image_data=xr.DataArray(sample_channel_data)
)
# test signal counts for different channels
assert
|
np.all(channel_counts_1 == [250, 0])
|
numpy.all
|
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer flag manipulation."""
import numpy as np
import os
import warnings
import h5py
import pathlib
from ..uvbase import UVBase
from .. import parameter as uvp
from ..uvdata import UVData
from ..uvcal import UVCal
from .. import utils as uvutils
from .. import telescopes as uvtel
__all__ = ["UVFlag", "flags2waterfall", "and_rows_cols", "lst_from_uv"]
def and_rows_cols(waterfall):
"""Perform logical and over rows and cols of a waterfall.
For a 2D flag waterfall, flag pixels only if fully flagged along
time and/or frequency
Parameters
----------
waterfall : 2D boolean array of shape (Ntimes, Nfreqs)
Returns
-------
wf : 2D array
A 2D array (size same as input) where only times/integrations
that were fully flagged are flagged.
"""
wf = np.zeros_like(waterfall, dtype=np.bool)
Ntimes, Nfreqs = waterfall.shape
wf[:, (np.sum(waterfall, axis=0) / Ntimes) == 1] = True
wf[(np.sum(waterfall, axis=1) / Nfreqs) == 1] = True
return wf
def lst_from_uv(uv):
"""Calculate the lst_array for a UVData or UVCal object.
Parameters
----------
uv : a UVData or UVCal object.
Object from which lsts are calculated
Returns
-------
lst_array: array of float
lst_array corresponding to time_array and at telescope location.
Units are radian.
"""
if not isinstance(uv, (UVCal, UVData)):
raise ValueError(
"Function lst_from_uv can only operate on " "UVCal or UVData object."
)
tel = uvtel.get_telescope(uv.telescope_name)
lat, lon, alt = tel.telescope_location_lat_lon_alt_degrees
lst_array = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)
return lst_array
def flags2waterfall(uv, flag_array=None, keep_pol=False):
"""Convert a flag array to a 2D waterfall of dimensions (Ntimes, Nfreqs).
Averages over baselines and polarizations (in the case of visibility data),
or antennas and jones parameters (in case of calibrationd data).
Parameters
----------
uv : A UVData or UVCal object
Object defines the times and frequencies, and supplies the
flag_array to convert (if flag_array not specified)
flag_array : Optional,
flag array to convert instead of uv.flag_array.
Must have same dimensions as uv.flag_array.
keep_pol : bool
Option to keep the polarization axis intact.
Returns
-------
waterfall : 2D array or 3D array
Waterfall of averaged flags, for example fraction of baselines
which are flagged for every time and frequency (in case of UVData input)
Size is (Ntimes, Nfreqs) or (Ntimes, Nfreqs, Npols).
"""
if not isinstance(uv, (UVData, UVCal)):
raise ValueError(
"flags2waterfall() requires a UVData or UVCal object as "
"the first argument."
)
if flag_array is None:
flag_array = uv.flag_array
if uv.flag_array.shape != flag_array.shape:
raise ValueError("Flag array must align with UVData or UVCal object.")
if isinstance(uv, UVCal):
if keep_pol:
waterfall = np.swapaxes(np.mean(flag_array, axis=(0, 1)), 0, 1)
else:
waterfall = np.mean(flag_array, axis=(0, 1, 4)).T
else:
if keep_pol:
waterfall = np.zeros((uv.Ntimes, uv.Nfreqs, uv.Npols))
for i, t in enumerate(np.unique(uv.time_array)):
waterfall[i, :] = np.mean(
flag_array[uv.time_array == t, 0, :, :], axis=0
)
else:
waterfall = np.zeros((uv.Ntimes, uv.Nfreqs))
for i, t in enumerate(np.unique(uv.time_array)):
waterfall[i, :] = np.mean(
flag_array[uv.time_array == t, 0, :, :], axis=(0, 2)
)
return waterfall
class UVFlag(UVBase):
"""Object to handle flag arrays and waterfalls for interferometric datasets.
Supports reading/writing, and stores all relevant information to combine
flags and apply to data.
Initialization of the UVFlag object requires some parameters. Metadata is
copied from indata object. If indata is subclass of UVData or UVCal,
the weights_array will be set to all ones.
Lists or tuples are iterated through, treating each entry with an
individual UVFlag init.
Parameters
----------
indata : UVData, UVCal, str, pathlib.Path, list of compatible combination
Input to initialize UVFlag object. If str, assumed to be path to previously
saved UVFlag object. UVData and UVCal objects cannot be directly combined,
unless waterfall is True.
mode : {"metric", "flag"}, optional
The mode determines whether the object has a floating point metric_array
or a boolean flag_array.
copy_flags : bool, optional
Whether to copy flags from indata to new UVFlag object
waterfall : bool, optional
Whether to immediately initialize as a waterfall object, with flag/metric
axes: time, frequency, polarization.
history : str, optional
History string to attach to object.
label: str, optional
String used for labeling the object (e.g. 'FM').
run_check : bool
Option to check for the existence and proper shapes of parameters
after creating UVFlag object.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
creating UVFlag object.
Attributes
----------
UVParameter objects :
For full list see the UVFlag Parameters Documentation.
(https://pyuvdata.readthedocs.io/en/latest/uvflag_parameters.html)
Some are always required, some are required for certain phase_types
and others are always optional.
"""
def __init__(
self,
indata=None,
mode="metric",
copy_flags=False,
waterfall=False,
history="",
label="",
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Initialize the object."""
# standard angle tolerance: 10 mas in radians.
# Should perhaps be decreased to 1 mas in the future
radian_tol = 10 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)
desc = (
"The mode determines whether the object has a "
"floating point metric_array or a boolean flag_array. "
'Options: {"metric", "flag"}. Default is "metric".'
)
self._mode = uvp.UVParameter(
"mode",
description=desc,
form="str",
expected_type=str,
acceptable_vals=["metric", "flag"],
)
desc = (
"String used for labeling the object (e.g. 'FM'). "
"Default is empty string."
)
self._label = uvp.UVParameter(
"label", description=desc, form="str", expected_type=str
)
desc = (
"The type of object defines the form of some arrays "
" and also how metrics/flags are combined. "
'Accepted types:"waterfall", "baseline", "antenna"'
)
self._type = uvp.UVParameter(
"type",
description=desc,
form="str",
expected_type=str,
acceptable_vals=["antenna", "baseline", "waterfall"],
)
self._Ntimes = uvp.UVParameter(
"Ntimes", description="Number of times", expected_type=int
)
desc = "Number of baselines. " 'Only Required for "baseline" type objects.'
self._Nbls = uvp.UVParameter(
"Nbls", description=desc, expected_type=int, required=False
)
self._Nblts = uvp.UVParameter(
"Nblts",
description="Number of baseline-times "
"(i.e. number of spectra). Not necessarily "
"equal to Nbls * Ntimes",
expected_type=int,
)
self._Nspws = uvp.UVParameter(
"Nspws",
description="Number of spectral windows "
"(ie non-contiguous spectral chunks). "
"More than one spectral window is not "
"currently supported.",
expected_type=int,
required=False,
)
self._Nfreqs = uvp.UVParameter(
"Nfreqs", description="Number of frequency channels", expected_type=int
)
self._Npols = uvp.UVParameter(
"Npols", description="Number of polarizations", expected_type=int
)
desc = (
"Floating point metric information, only availble in metric mode. "
"shape (Nblts, Nspws, Nfreq, Npols)."
)
self._metric_array = uvp.UVParameter(
"metric_array",
description=desc,
form=("Nblts", "Nspws", "Nfreqs", "Npols"),
expected_type=np.float,
required=False,
)
desc = (
"Boolean flag, True is flagged, only availble in flag mode. "
"shape (Nblts, Nspws, Nfreq, Npols)."
)
self._flag_array = uvp.UVParameter(
"flag_array",
description=desc,
form=("Nblts", "Nspws", "Nfreqs", "Npols"),
expected_type=np.bool,
required=False,
)
desc = "Floating point weight information, shape (Nblts, Nspws, Nfreq, Npols)."
self._weights_array = uvp.UVParameter(
"weights_array",
description=desc,
form=("Nblts", "Nspws", "Nfreqs", "Npols"),
expected_type=np.float,
)
desc = (
"Floating point weight information about sum of squares of weights"
" when weighted data converted from baseline to waterfall mode."
)
self._weights_square_array = uvp.UVParameter(
"weights_square_array",
description=desc,
form=("Nblts", "Nspws", "Nfreqs", "Npols"),
expected_type=np.float,
required=False,
)
desc = (
"Array of times, center of integration, shape (Nblts), " "units Julian Date"
)
self._time_array = uvp.UVParameter(
"time_array",
description=desc,
form=("Nblts",),
expected_type=np.float,
tols=1e-3 / (60.0 * 60.0 * 24.0),
) # 1 ms in days
desc = "Array of lsts, center of integration, shape (Nblts), " "units radians"
self._lst_array = uvp.UVParameter(
"lst_array",
description=desc,
form=("Nblts",),
expected_type=np.float,
tols=radian_tol,
)
desc = (
"Array of first antenna indices, shape (Nblts). "
'Only available for "baseline" type objects. '
"type = int, 0 indexed"
)
self._ant_1_array = uvp.UVParameter(
"ant_1_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of second antenna indices, shape (Nblts). "
'Only available for "baseline" type objects. '
"type = int, 0 indexed"
)
self._ant_2_array = uvp.UVParameter(
"ant_2_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of antenna numbers, shape (Nants_data), "
'Only available for "antenna" type objects. '
"type = int, 0 indexed"
)
self._ant_array = uvp.UVParameter(
"ant_array", description=desc, expected_type=int, form=("Nants_data",)
)
desc = (
"Array of baseline indices, shape (Nblts). "
'Only available for "baseline" type objects. '
"type = int; baseline = 2048 * (ant1+1) + (ant2+1) + 2^16"
)
self._baseline_array = uvp.UVParameter(
"baseline_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of frequencies, center of the channel, "
"shape (Nspws, Nfreqs), units Hz"
)
self._freq_array = uvp.UVParameter(
"freq_array",
description=desc,
form=("Nspws", "Nfreqs"),
expected_type=np.float,
tols=1e-3,
) # mHz
desc = (
"Array of polarization integers, shape (Npols). "
"AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); "
"circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). "
"NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations "
'"Stokes", but this is inaccurate as visibilities cannot be in '
"true Stokes polarizations for physical antennas. We adopt the "
"term pseudo-Stokes to refer to linear combinations of instrumental "
"visibility polarizations (e.g. pI = xx + yy)."
)
self._polarization_array = uvp.UVParameter(
"polarization_array",
description=desc,
expected_type=int,
acceptable_vals=list(np.arange(-8, 0)) + list(np.arange(1, 5)),
form=("Npols",),
)
self._history = uvp.UVParameter(
"history",
description="String of history, units English",
form="str",
expected_type=str,
)
# ---antenna information ---
desc = (
"Number of antennas in the array. "
'Only available for "baseline" type objects. '
"May be larger than the number of antennas with data."
)
self._Nants_telescope = uvp.UVParameter(
"Nants_telescope", description=desc, expected_type=int, required=False
)
desc = (
"Number of antennas with data present. "
'Only available for "baseline" or "antenna" type objects.'
"May be smaller than the number of antennas in the array"
)
self._Nants_data = uvp.UVParameter(
"Nants_data", description=desc, expected_type=int, required=True
)
# --extra information ---
desc = (
"Orientation of the physical dipole corresponding to what is "
'labelled as the x polarization. Options are "east" '
'(indicating east/west orientation) and "north" (indicating '
"north/south orientation)"
)
self._x_orientation = uvp.UVParameter(
"x_orientation",
description=desc,
required=False,
expected_type=str,
acceptable_vals=["east", "north"],
)
# initialize the underlying UVBase properties
super(UVFlag, self).__init__()
self.history = "" # Added to at the end
self.label = "" # Added to at the end
if isinstance(indata, (list, tuple)):
self.__init__(
indata[0],
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
if len(indata) > 1:
for i in indata[1:]:
fobj = UVFlag(
i,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
self.__add__(
fobj,
run_check=run_check,
inplace=True,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
del fobj
elif issubclass(indata.__class__, (str, pathlib.Path)):
# Given a path, read indata
self.read(
indata,
history,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
elif issubclass(indata.__class__, UVData):
self.from_uvdata(
indata,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
elif issubclass(indata.__class__, UVCal):
self.from_uvcal(
indata,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
elif indata is not None:
raise ValueError(
"input to UVFlag.__init__ must be one of: "
"list, tuple, string, pathlib.Path, UVData, or UVCal."
)
@property
def _data_params(self):
"""List of strings giving the data-like parameters."""
if not hasattr(self, "mode") or self.mode is None:
return None
elif self.mode == "flag":
return ["flag_array"]
elif self.mode == "metric":
if self.weights_square_array is None:
return ["metric_array", "weights_array"]
else:
return ["metric_array", "weights_array", "weights_square_array"]
else:
raise ValueError(
"Invalid mode. Mode must be one of "
+ ", ".join(["{}"] * len(self._mode.acceptable_vals)).format(
*self._mode.acceptable_vals
)
)
@property
def data_like_parameters(self):
"""Return iterator of defined parameters which are data-like."""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def pol_collapsed(self):
"""Determine if this object has had pols collapsed."""
if not hasattr(self, "polarization_array") or self.polarization_array is None:
return False
elif isinstance(self.polarization_array.item(0), str):
return True
else:
return False
def _check_pol_state(self):
if self.pol_collapsed:
# collapsed pol objects have a different type for
# the polarization array.
self._polarization_array.expected_type = str
self._polarization_array.acceptable_vals = None
else:
self._polarization_array.expected_type = int
self._polarization_array.acceptable_vals = list(np.arange(-8, 0)) + list(
np.arange(1, 5)
)
def _set_mode_flag(self):
"""Set the mode and required parameters consistent with a flag object."""
self.mode = "flag"
self._flag_array.required = True
self._metric_array.required = False
self._weights_array.required = False
if self.weights_square_array is not None:
self.weights_square_array = None
return
def _set_mode_metric(self):
"""Set the mode and required parameters consistent with a metric object."""
self.mode = "metric"
self._flag_array.required = False
self._metric_array.required = True
self._weights_array.required = True
if self.weights_array is None and self.metric_array is not None:
self.weights_array = np.ones_like(self.metric_array, dtype=float)
return
def _set_type_antenna(self):
"""Set the type and required propertis consistent with an antenna object."""
self.type = "antenna"
self._ant_array.required = True
self._baseline_array.required = False
self._ant_1_array.required = False
self._ant_2_array.required = False
self._Nants_telescope.required = False
self._Nants_data.required = True
self._Nbls.required = False
self._Nspws.required = True
self._Nblts.required = False
desc = (
"Floating point metric information, "
"has shape (Nants_data, Nspws, Nfreqs, Ntimes, Npols)."
)
self._metric_array.desc = desc
self._metric_array.form = ("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Npols")
desc = (
"Boolean flag, True is flagged, "
"has shape (Nants_data, Nspws, Nfreqs, Ntimes, Npols)."
)
self._flag_array.desc = desc
self._flag_array.form = ("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Npols")
desc = (
"Floating point weight information, "
"has shape (Nants_data, Nspws, Nfreqs, Ntimes, Npols)."
)
self._weights_array.desc = desc
self._weights_array.form = ("Nants_data", "Nspws", "Nfreqs", "Ntimes", "Npols")
desc = (
"Array of unique times, center of integration, shape (Ntimes), "
"units Julian Date"
)
self._time_array.form = ("Ntimes",)
desc = (
"Array of unique lsts, center of integration, shape (Ntimes), "
"units radians"
)
self._lst_array.form = ("Ntimes",)
desc = (
"Array of frequencies, center of the channel, "
"shape (Nspws, Nfreqs), units Hz"
)
self._freq_array.form = ("Nspws", "Nfreqs")
def _set_type_baseline(self):
"""Set the type and required propertis consistent with a baseline object."""
self.type = "baseline"
self._ant_array.required = False
self._baseline_array.required = True
self._ant_1_array.required = True
self._ant_2_array.required = True
self._Nants_telescope.required = True
self._Nants_data.required = True
self._Nbls.required = True
self._Nblts.required = True
self._Nspws.required = True
if self.time_array is not None:
self.Nblts = len(self.time_array)
desc = "Floating point metric information, shape (Nblts, Nspws, Nfreqs, Npols)."
self._metric_array.desc = desc
self._metric_array.form = ("Nblts", "Nspws", "Nfreqs", "Npols")
desc = "Boolean flag, True is flagged, shape (Nblts, Nfreqs, Npols)"
self._flag_array.desc = desc
self._flag_array.form = ("Nblts", "Nspws", "Nfreqs", "Npols")
desc = "Floating point weight information, has shape (Nblts, Nfreqs, Npols)."
self._weights_array.desc = desc
self._weights_array.form = ("Nblts", "Nspws", "Nfreqs", "Npols")
desc = (
"Array of unique times, center of integration, shape (Ntimes), "
"units Julian Date"
)
self._time_array.form = ("Nblts",)
desc = (
"Array of unique lsts, center of integration, shape (Ntimes), "
"units radians"
)
self._lst_array.form = ("Nblts",)
desc = (
"Array of frequencies, center of the channel, "
"shape (Nspws, Nfreqs), units Hz"
)
self._freq_array.form = ("Nspws", "Nfreqs")
def _set_type_waterfall(self):
"""Set the type and required propertis consistent with a waterfall object."""
self.type = "waterfall"
self._ant_array.required = False
self._baseline_array.required = False
self._ant_1_array.required = False
self._ant_2_array.required = False
self._Nants_telescope.required = False
self._Nants_data.required = False
self._Nbls.required = False
self._Nspws.required = False
self._Nblts.required = False
desc = "Floating point metric information, shape (Ntimes, Nfreqs, Npols)."
self._metric_array.desc = desc
self._metric_array.form = ("Ntimes", "Nfreqs", "Npols")
desc = "Boolean flag, True is flagged, shape (Ntimes, Nfreqs, Npols)"
self._flag_array.desc = desc
self._flag_array.form = ("Ntimes", "Nfreqs", "Npols")
desc = "Floating point weight information, has shape (Ntimes, Nfreqs, Npols)."
self._weights_array.desc = desc
self._weights_array.form = ("Ntimes", "Nfreqs", "Npols")
desc = (
"Floating point weight information about sum of squares of weights"
" when weighted data converted from baseline to waterfall mode."
" Has shape (Ntimes, Nfreqs, Npols)."
)
self._weights_square_array.desc = desc
self._weights_square_array.form = ("Ntimes", "Nfreqs", "Npols")
desc = (
"Array of unique times, center of integration, shape (Ntimes), "
"units Julian Date"
)
self._time_array.form = ("Ntimes",)
desc = (
"Array of unique lsts, center of integration, shape (Ntimes), "
"units radians"
)
self._lst_array.form = ("Ntimes",)
desc = (
"Array of frequencies, center of the channel, " "shape (Nfreqs), units Hz"
)
self._freq_array.form = ("Nfreqs",)
def clear_unused_attributes(self):
"""Remove unused attributes.
Useful when changing type or mode or to save memory.
Will set all non-required attributes to None, except x_orientation and
weights_square_array.
"""
for p in self:
attr = getattr(self, p)
if (
not attr.required
and attr.value is not None
and attr.name != "x_orientation"
and attr.name != "weights_square_array"
):
attr.value = None
setattr(self, p, attr)
def __eq__(self, other, check_history=True, check_extra=True):
"""Check Equality of two UVFlag objects.
Parameters
----------
other: UVFlag
object to check against
check_history : bool
Include the history keyword when comparing UVFlag objects.
check_extra : bool
Include non-required parameters when comparing UVFlag objects.
"""
if check_history:
return super(UVFlag, self).__eq__(other, check_extra=check_extra)
else:
# initial check that the classes are the same
# then strip the histories
if isinstance(other, self.__class__):
_h1 = self.history
self.history = None
_h2 = other.history
other.history = None
truth = super(UVFlag, self).__eq__(other, check_extra=check_extra)
self.history = _h1
other.history = _h2
return truth
else:
print("Classes do not match")
return False
def __ne__(self, other, check_history=True, check_extra=True):
"""Not Equal."""
return not self.__eq__(
other, check_history=check_history, check_extra=check_extra
)
def antpair2ind(self, ant1, ant2):
"""Get blt indices for given (ordered) antenna pair.
Parameters
----------
ant1 : int or array_like of int
Number of the first antenna
ant2 : int or array_like of int
Number of the second antenna
Returns
-------
int or array_like of int
baseline number(s) corresponding to the input antenna number
"""
if self.type != "baseline":
raise ValueError(
"UVFlag object of type " + self.type + " does not "
"contain antenna pairs to index."
)
return np.where((self.ant_1_array == ant1) & (self.ant_2_array == ant2))[0]
def baseline_to_antnums(self, baseline):
"""Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int
baseline number
Returns
-------
tuple
Antenna numbers corresponding to baseline.
"""
assert self.type == "baseline", 'Must be "baseline" type UVFlag object.'
return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)
def get_baseline_nums(self):
"""Return numpy array of unique baseline numbers in data."""
assert self.type == "baseline", 'Must be "baseline" type UVFlag object.'
return np.unique(self.baseline_array)
def get_antpairs(self):
"""Return list of unique antpair tuples (ant1, ant2) in data."""
assert self.type == "baseline", 'Must be "baseline" type UVFlag object.'
return [self.baseline_to_antnums(bl) for bl in self.get_baseline_nums()]
def get_ants(self):
"""
Get the unique antennas that have data associated with them.
Returns
-------
ndarray of int
Array of unique antennas with data associated with them.
"""
if self.type == "baseline":
return np.unique(np.append(self.ant_1_array, self.ant_2_array))
elif self.type == "antenna":
return np.unique(self.ant_array)
elif self.type == "waterfall":
raise ValueError("A waterfall type UVFlag object has no sense of antennas.")
def get_pols(self):
"""
Get the polarizations in the data.
Returns
-------
list of str
list of polarizations (as strings) in the data.
"""
return uvutils.polnum2str(
self.polarization_array, x_orientation=self.x_orientation
)
def parse_ants(self, ant_str, print_toggle=False):
"""
Get antpair and polarization from parsing an aipy-style ant string.
Used to support the the select function.
This function is only useable when the UVFlag type is 'baseline'.
Generates two lists of antenna pair tuples and polarization indices based
on parsing of the string ant_str. If no valid polarizations (pseudo-Stokes
params, or combinations of [lr] or [xy]) or antenna numbers are found in
ant_str, ant_pairs_nums and polarizations are returned as None.
Parameters
----------
ant_str : str
String containing antenna information to parse. Can be 'all',
'auto', 'cross', or combinations of antenna numbers and polarization
indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used
in front of an antenna number or baseline to exclude it from being
output in ant_pairs_nums. If ant_str has a minus sign as the first
character, 'all,' will be appended to the beginning of the string.
See the tutorial for examples of valid strings and their behavior.
print_toggle : bool
Boolean for printing parsed baselines for a visual user check.
Returns
-------
ant_pairs_nums : list of tuples of int or None
List of tuples containing the parsed pairs of antenna numbers, or
None if ant_str is 'all' or a pseudo-Stokes polarizations.
polarizations : list of int or None
List of desired polarizations or None if ant_str does not contain a
polarization specification.
"""
if self.type != "baseline":
raise ValueError(
"UVFlag objects can only call 'parse_ants' function "
"if type is 'baseline'."
)
return uvutils.parse_ants(
self,
ant_str=ant_str,
print_toggle=print_toggle,
x_orientation=self.x_orientation,
)
def collapse_pol(
self,
method="quadmean",
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Collapse the polarization axis using a given method.
If the original UVFlag object has more than one polarization,
the resulting polarization_array will be a single element array with a
comma separated string encoding the original polarizations.
Parameters
----------
method : str, {"quadmean", "absmean", "mean", "or", "and"}
How to collapse the dimension(s).
run_check : bool
Option to check for the existence and proper shapes of parameters
after collapsing polarizations.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
collapsing polarizations.
"""
method = method.lower()
if self.mode == "flag":
darr = self.flag_array
else:
darr = self.metric_array
if len(self.polarization_array) > 1:
if self.mode == "metric":
_weights = self.weights_array
else:
_weights = np.ones_like(darr)
# Collapse pol dimension. But note we retain a polarization axis.
d, w = uvutils.collapse(
darr, method, axis=-1, weights=_weights, return_weights=True
)
darr = np.expand_dims(d, axis=d.ndim)
if self.mode == "metric":
self.weights_array = np.expand_dims(w, axis=w.ndim)
self.polarization_array = np.array(
[",".join(map(str, self.polarization_array))], dtype=np.str_
)
self.Npols = len(self.polarization_array)
self._check_pol_state()
else:
warnings.warn(
"Cannot collapse polarization axis when only one pol present."
)
return
if ((method == "or") or (method == "and")) and (self.mode == "flag"):
self.flag_array = darr
else:
self.metric_array = darr
self._set_mode_metric()
self.clear_unused_attributes()
self.history += "Pol axis collapse. "
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def to_waterfall(
self,
method="quadmean",
keep_pol=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
return_weights_square=False,
):
"""Convert an 'antenna' or 'baseline' type object to waterfall.
Parameters
----------
method : str, {"quadmean", "absmean", "mean", "or", "and"}
How to collapse the dimension(s).
keep_pol : bool
Whether to also collapse the polarization dimension
If keep_pol is False, and the original UVFlag object has more
than one polarization, the resulting polarization_array
will be a single element array with a comma separated string
encoding the original polarizations.
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting to waterfall type.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting to waterfall type.
return_weights_square: bool
Option to compute the sum of the squares of the weights when
collapsing baseline object to waterfall. Not used if type is not
baseline to begin with. Fills an optional parameter if so.
"""
method = method.lower()
if self.type == "waterfall" and (
keep_pol or (len(self.polarization_array) == 1)
):
warnings.warn("This object is already a waterfall. Nothing to change.")
return
if (not keep_pol) and (len(self.polarization_array) > 1):
self.collapse_pol(method)
if self.mode == "flag":
darr = self.flag_array
else:
darr = self.metric_array
if self.type == "antenna":
d, w = uvutils.collapse(
darr,
method,
axis=(0, 1),
weights=self.weights_array,
return_weights=True,
)
darr =
|
np.swapaxes(d, 0, 1)
|
numpy.swapaxes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import unittest
import anndata
import numpy as np
import pandas as pd
import scipy.sparse
import wot
class TestGeneSetScores(unittest.TestCase):
def test_score_gene_set_command(self):
subprocess.call(args=['wot', 'gene_set_scores',
'--matrix',
os.path.abspath(
'inputs/score_gene_sets/matrix.txt'),
'--gene_sets', os.path.abspath(
'inputs/score_gene_sets/gene_sets.gmx'),
'--out', 'test_gene_set_test_output',
'--method', 'mean', '--format', 'txt'],
cwd=os.getcwd(),
stderr=subprocess.STDOUT)
set_names = ['s1', 's2', 's3']
scores = np.array([[1, 0, 1.5], [4, 0, 4.5]])
for i in range(len(set_names)):
output_file = 'test_gene_set_test_output_' + set_names[i] + '.txt'
output = pd.read_csv(output_file, index_col=0, sep='\t')
np.testing.assert_array_equal(output[set_names[i]].values, scores[:, i])
os.remove(output_file)
def test_p_value1(self):
ds = anndata.AnnData(X=np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]), obs=None, var=None)
gs = anndata.AnnData(X=np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8).T, obs=None, var=None)
result = wot.score_gene_sets(ds=ds, gs=gs, method=None, permutations=10,
random_state=1234)
np.testing.assert_array_equal(result['p_value'][0], 11.0 / 12.0)
# def test_p_value2(self):
# ds = anndata.AnnData(X=np.array([[1, 2, 3], [4, 5, 6]]), obs=None, var=None)
# gs = anndata.AnnData(X=np.array([[1, 1, 1]], dtype=np.uint8).T, obs=None, var=None)
# result = wot.score_gene_sets(ds=ds, gs=gs, method=None, permutations=100, smooth_p_values=False,
# random_state=1234)
def test_score_gene_sets_sparse_ds(self):
ds = anndata.AnnData(
X=scipy.sparse.csr_matrix(np.array([[0, 0, 0, 4, 5, 6, 7, 8, 9, 10]])),
obs=None,
var=None)
gs = anndata.AnnData(X=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.uint8).T, obs=None, var=None)
result = wot.score_gene_sets(ds=ds, gs=gs, method=None, permutations=100,
random_state=1234,
smooth_p_values=False)
self.assertEqual(result['k'][0], 100)
def test_score_gene_sets_sparse_ds_zscore(self):
ds = anndata.AnnData(
X=scipy.sparse.csr_matrix(np.array([[0, 0, 0, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 5, 6, 7, 9, 9, 19, 11]])),
obs=None,
var=None)
gs = anndata.AnnData(X=np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.uint8).T, obs=None, var=None)
result = wot.score_gene_sets(ds=ds, gs=gs, method='mean_z_score', permutations=100,
random_state=1234,
smooth_p_values=False)
self.assertEqual(result['k'][0], 100)
def test_score_gene_sets_sparse_gs(self):
ds = anndata.AnnData(
X=np.array([[0, 0, 0, 4, 5, 6, 7, 8, 9, 10]]),
obs=None,
var=None)
gs = anndata.AnnData(X=scipy.sparse.csr_matrix(np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=np.uint8).T),
obs=None, var=None)
result = wot.score_gene_sets(ds=ds, gs=gs, method=None,
permutations=100,
random_state=1234,
smooth_p_values=False)
self.assertEqual(result['k'][0], 100)
def test_score_gene_sets_basic(self):
ds = anndata.AnnData(X=np.array([[1.0, 2.0, 3, 0], [4, 5, 6.0, 0]]),
obs=pd.DataFrame(
index=['c1', 'c2']),
var=pd.DataFrame(
index=['g1', 'g2',
'g3', 'g4']))
gs = anndata.AnnData(X=np.array([[1, 0, 1], [0, 0, 1], [0, 0, 0], [0, 1, 0]], dtype=np.uint8),
obs=pd.DataFrame(
index=['g1', 'g2', 'g3', 'g4']),
var=pd.DataFrame(
index=['s1', 's2', 's3']))
expected =
|
np.array([[1, 0, 1.5], [4, 0, 4.5]])
|
numpy.array
|
import concurrent.futures
import re
from functools import cached_property
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
import albumentations
import cv2
import numpy as np
import tifffile
from ..base import MaskDataset
from ..types import BundledPath
class BBBC020(MaskDataset):
"""Murine bone-marrow derived macrophages
The image set consists of 25 images, each consisting of three channels. The
samples were stained with DAPI and CD11b/APC. In addition to this, a merged
image is provided. DAPI labels the nuclei and CD11b/APC the cell surface.
Parameters
----------
root_dir : str
Path to root directory
output : {'both', 'image', 'mask'}, default: 'both'
Change outputs. 'both' returns {'image': image, 'mask': mask}.
transforms : albumentations.Compose, optional
An instance of Compose (albumentations pkg) that defines augmentation in
sequence.
num_samples : int, optional
Useful when ``transforms`` is set. Define the total length of the
dataset. If it is set, it overwrites ``__len__``.
grayscale : bool, default: False
Convert images to grayscale
grayscale_mode : {'equal', 'cv2', Sequence[float]}, default: 'equal'
How to convert to grayscale. If set to 'cv2', it follows opencv
implementation. Else if set to 'equal', it sums up values along channel
axis, then divides it by the number of expected channels.
image_ch : {'cell', 'nuclei'}, default: ('cell', 'nuclei')
Which channel(s) to load as image. Make sure to give it as a Sequence
when choose a single channel.
anno_ch : {'nuclei', 'cells'}, default: ('nuclei',)
Which channel(s) to load as annotation. Make sure to give it as a
Sequence when choose a single channel.
drop_missing_pairs : bool, default: True
Valid only if `output='both'`. It will drop images that do not have mask
pairs.
Warnings
--------
5 annotations are missing: ind={17,18,19,20,21}
[jw-30min 1, jw-30min 2, jw-30min 3, jw-30min 4, jw-30min 5]
- ./BBBC020_v1_images/jw-30min 1/jw-30min 1_(c1+c5).TIF
- ./BBBC020_v1_images/jw-30min 2/jw-30min 2_(c1+c5).TIF
- ./BBBC020_v1_images/jw-30min 3/jw-30min 3_(c1+c5).TIF
- ./BBBC020_v1_images/jw-30min 4/jw-30min 4_(c1+c5).TIF
- ./BBBC020_v1_images/jw-30min 5/jw-30min 5_(c1+c5).TIF
- BBC020_v1_outlines_nuclei/jw-15min 5_c5_43.TIF exists but corrupted
Notes
-----
- Anotations are instance segmented where each of them is saved as a single
image file. It loads and aggregates them as a single array. Label loaded
after will override the one loaded before. If you do not want this
behavior, make a subclass out of this class and override ``get_mask()``
method, accordingly.
- 2 channels; R channel is the same as G, R==G!=B
Assign 0 to red channel
- BBBC has received a complaint that "BBB020_v1_outlines_nuclei" appears
incomplete and we have been unable to obtain the missing images from the
original contributor.
- Nuclei anno looks good
- Should separte nuclei and cells annotation; if ``anno_ch=None``,
``anno_dict`` becomes a mess.
References
----------
.. [1] https://bbbc.broadinstitute.org/BBBC020
See Also
--------
MaskDataset : Super class
Dataset : Base class
DatasetInterface : Interface
"""
# Dataset's acronym
acronym = 'BBBC020'
def __init__(
self,
root_dir: str,
*,
output: str = 'both',
transforms: Optional[albumentations.Compose] = None,
num_samples: Optional[int] = None,
grayscale: bool = False,
grayscale_mode: Union[str, Sequence[float]] = 'equal',
# specific to this dataset
image_ch: Sequence[str] = ('nuclei', 'cells'),
anno_ch: Sequence[str] = ('nuclei',),
drop_missing_pairs: bool = True,
**kwargs
):
self._root_dir = root_dir
self._output = output
self._transforms = transforms
self._num_samples = num_samples
self._grayscale = grayscale
self._grayscale_mode = grayscale_mode
# specific to this dataset
self._num_channels = 2 # explicit for `grayscale`
self.image_ch = image_ch
self.anno_ch = anno_ch
if not any([ch in ('nuclei', 'cells') for ch in image_ch]):
raise ValueError("Set `image_ch` in ('nuclei', 'cells') in sequence")
if not any([ch in ('nuclei', 'cells') for ch in anno_ch]):
raise ValueError("Set `anno_ch` in ('nuclei', 'cells') in sequence")
self.drop_missing_pairs = drop_missing_pairs
if self.output == 'both' and self.drop_missing_pairs:
self.file_list, self.anno_dict = self._drop_missing_pairs()
def get_image(self, p: Path) -> np.ndarray:
img = tifffile.imread(p)
# R==G, zero 0
img[..., 0] = 0
if len(ch := self.image_ch) == 1:
if ch[0] == 'cells':
return cv2.cvtColor(img[..., 1], cv2.COLOR_GRAY2RGB)
elif ch[0] == 'nuclei':
return cv2.cvtColor(img[..., 2], cv2.COLOR_GRAY2RGB)
else:
raise ValueError
return img
def get_mask(self, lst_p: Union[BundledPath, List[BundledPath]]) -> np.ndarray:
def _assign_index(
mask: np.ndarray,
fn: Union[str, Path],
ind: int
):
"""For threading"""
tif: np.ndarray = tifffile.imread(fn)
idx_nz = tif.nonzero()
mask[idx_nz] = ind
if len(self.anno_ch) == 1:
tif: np.ndarray = tifffile.imread(lst_p[0])
mask =
|
np.zeros_like(tif)
|
numpy.zeros_like
|
import os
import re
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np
from torch.nn.modules.loss import _Loss
import PIL
from PIL import Image
from torchvision import transforms, datasets
import torch.nn.functional as F
import cv2
from torch.utils.data import Sampler
import random
import math
from SGD import SGD
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
self.val = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def save_checkpoint(model, iters, path, optimizer=None, scheduler=None):
if not os.path.exists(path):
os.makedirs(path)
print("Saving checkpoint to file {}".format(path))
state_dict = {}
new_state_dict = OrderedDict()
for k, v in model.state_dict().items():
key = k
if k.split('.')[0] == 'module':
key = k[7:]
new_state_dict[key] = v
state_dict['model'] = new_state_dict
state_dict['iteration'] = iters
if optimizer is not None:
state_dict['optimizer'] = optimizer.state_dict()
if scheduler is not None:
state_dict['scheduler'] = scheduler.state_dict()
filename = os.path.join("{}/checkpoint.pth".format(path))
try:
torch.save(state_dict, filename)
except:
print('save {} failed, continue training'.format(path))
def sgd_optimizer(model, base_lr, momentum, weight_decay):
params = []
for key, value in model.named_parameters():
params.append(value)
param_group = [{'params': params,
'weight_decay': weight_decay}]
optimizer = SGD(param_group, lr = base_lr, momentum=momentum)
return optimizer
## data augmentation functions
class OpencvResize(object):
def __init__(self, size=256):
self.size = size
def __call__(self, img):
assert isinstance(img, PIL.Image.Image)
img = np.asarray(img) # (H,W,3) RGB
img = img[:,:,::-1] # 2 BGR
img = np.ascontiguousarray(img)
H, W, _ = img.shape
target_size = (int(self.size/H * W + 0.5), self.size) if H < W else (self.size, int(self.size/W * H + 0.5))
img = cv2.resize(img, target_size, interpolation=cv2.INTER_LINEAR)
img = img[:,:,::-1] # 2 RGB
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
return img
class ToBGRTensor(object):
def __call__(self, img):
assert isinstance(img, (np.ndarray, PIL.Image.Image))
if isinstance(img, PIL.Image.Image):
img = np.asarray(img)
img = img[:,:,::-1] # 2 BGR
img = np.transpose(img, [2, 0, 1]) # 2 (3, H, W)
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).float()
return img
class RandomResizedCrop(object):
def __init__(self, scale=(0.08, 1.0), target_size:int=224, max_attempts:int=10):
assert scale[0] <= scale[1]
self.scale = scale
assert target_size > 0
self.target_size = target_size
assert max_attempts >0
self.max_attempts = max_attempts
def __call__(self, img):
assert isinstance(img, PIL.Image.Image)
img = np.asarray(img, dtype=np.uint8)
H, W, C = img.shape
well_cropped = False
for _ in range(self.max_attempts):
crop_area = (H*W) * random.uniform(self.scale[0], self.scale[1])
crop_edge = round(math.sqrt(crop_area))
dH = H - crop_edge
dW = W - crop_edge
crop_left = random.randint(min(dW, 0), max(dW, 0))
crop_top = random.randint(min(dH, 0), max(dH, 0))
if dH >= 0 and dW >= 0:
well_cropped = True
break
crop_bottom = crop_top + crop_edge
crop_right = crop_left + crop_edge
if well_cropped:
crop_image = img[crop_top:crop_bottom,:,:][:,crop_left:crop_right,:]
else:
roi_top = max(crop_top, 0)
padding_top = roi_top - crop_top
roi_bottom = min(crop_bottom, H)
padding_bottom = crop_bottom - roi_bottom
roi_left = max(crop_left, 0)
padding_left = roi_left - crop_left
roi_right = min(crop_right, W)
padding_right = crop_right - roi_right
roi_image = img[roi_top:roi_bottom,:,:][:,roi_left:roi_right,:]
crop_image = cv2.copyMakeBorder(roi_image, padding_top, padding_bottom, padding_left, padding_right,
borderType=cv2.BORDER_CONSTANT, value=0)
target_image = cv2.resize(crop_image, (self.target_size, self.target_size), interpolation=cv2.INTER_LINEAR)
target_image = PIL.Image.fromarray(target_image.astype('uint8'))
return target_image
class LighteningJitter(object):
def __init__(self, eigen_vecs, eigen_values, max_eigen_jitter=0.1):
self.eigen_vecs = np.array(eigen_vecs, dtype=np.float32)
self.eigen_values = np.array(eigen_values, dtype=np.float32)
self.max_eigen_jitter = max_eigen_jitter
def __call__(self, img):
assert isinstance(img, PIL.Image.Image)
img = np.asarray(img, dtype=np.float32)
img = np.ascontiguousarray(img/255)
cur_eigen_jitter =
|
np.random.normal(scale=self.max_eigen_jitter, size=self.eigen_values.shape)
|
numpy.random.normal
|
import numpy
import array
import copy
import re,os,sys,copy
from glob import glob
from scipy.interpolate import griddata
from scipy.integrate import simps,quad
from scipy.optimize import leastsq, fsolve
#from sm_functions import read_ised,read_ised2,calc_lyman,calc_beta
from astropy import units as U
from astropy import constants as C
from astropy import cosmology as cos
cosmo = cos.FlatLambdaCDM(H0=70,Om0=0.3)
f = open("error.log", "w")
original_stderr = sys.stderr
sys.stderr = f
class ised(object):
def __init__(self,path):
self.file = path
self.read_ised(self.file)
def read_ised(self,filename):
"""
This function reads data from Bruzual & Charlot binary format
SSP files and returns the necessary data in an array The input files
should be '.ised' files, either 2003 or 2007.
'ks' in the binary files is slightly different between 03/07 files
so the read length and index should be set appropriately, therefore
the function tries '03 format first and retries with the '07 format
if the returned number of ages isn't as expected (e.g. 221 ages)
"""
with open(filename,'rb') as f:
check = array.array('i')
check.fromfile(f,2)
if check[1] == 221:
ksl, ksi = 2, 1
F_l, F_i = 3, 2
else:
ksl, ksi = 3, 2
F_l, F_i = 5, 4
with open(filename,'rb') as f:
ks = array.array('i')
ks.fromfile(f,ksl)
ta = array.array('f')
ta.fromfile(f,ks[ksi])
self.ta = numpy.array(ta)
tmp = array.array('i')
tmp.fromfile(f,3)
self.ml,self.mul,iseg = tmp
if iseg > 0:
tmp = array.array('f')
tmp.fromfile(f,iseg*6)
tmp = array.array('f')
tmp.fromfile(f,5)
self.totm, self.totn, self.avs, self.jo, self.tauo = tmp
self.ids= array.array('c')
self.ids.fromfile(f,80)
tmp = array.array('f')
tmp.fromfile(f,4)
self.tcut = tmp[0]
self.ttt = tmp[1:]
ids = array.array('c')
ids.fromfile(f,80)
self.ids = array.array('c')
self.ids.fromfile(f,80)
self.igw = array.array('i')
self.igw.fromfile(f,1)
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.iw = array.array('i')
self.iw.fromfile(f,1)
wave = array.array('f')
wave.fromfile(f,self.iw[0])
self.wave = numpy.array(wave)
#SED Section
self.F = array.array('i')
self.F.fromfile(f,F_l)
self.iw = self.F[F_i] #Number of wavelength elements
self.sed = numpy.zeros((self.iw,ks[ksi]),dtype=numpy.float32)
G = array.array('f')
G.fromfile(f,self.iw)
self.sed[:,0] = G
ik = array.array('i')
ik.fromfile(f,1)
self.h = numpy.empty((ik[0],ks[ksi]),'f')
H = array.array('f')
H.fromfile(f,ik[0])
self.h[:,0] = H
for i in range(1,ks[ksi]): #Fill rest of array with SEDs
F = array.array('i')
F.fromfile(f,F_l)
iw = F[F_i]
G = array.array('f')
G.fromfile(f,iw)
self.sed[:,i] = G
ik = array.array('i')
ik.fromfile(f,1)
H = array.array('f')
H.fromfile(f,ik[0])
self.h[:,i] = H
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.bflx = array.array('f')
self.bflx.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
strm = array.array('f')
strm.fromfile(f,tmp[F_i])
self.strm = numpy.array(strm)
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.evf = array.array('f')
self.evf.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.evf = array.array('f')
self.evf.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.snr = array.array('f')
self.snr.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.pnr = array.array('f')
self.pnr.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.sn = array.array('f')
self.sn.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.bh = array.array('f')
self.bh.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
self.wd = array.array('f')
self.wd.fromfile(f,tmp[F_i])
tmp = array.array('i')
tmp.fromfile(f,F_l)
rmtm = array.array('f')
rmtm.fromfile(f,tmp[F_i])
self.rmtm = numpy.array(rmtm)
class CSP:
def __init__(self,SSPpath = '../ssp/bc03/salpeter/lr/',
age=None,sfh=None,dust=None,metal_ind=None,fesc=None,
sfh_law='exp',dustmodel = 'calzetti',neb_cont=True,neb_met=True):
self.SSPpath = SSPpath
self.files = glob(self.SSPpath + '*.ised')
self.files.sort()
self.iseds = []
self.ta_arr = []
self.metal_arr = []
self.iw_arr = []
self.wave_arr = []
self.sed_arr = []
self.strm_arr = []
self.rmtm_arr = []
#Set up
for file in self.files:
ised_binary = ised(file)
self.ta_arr.append(ised_binary.ta)
self.metal_arr.append(ised_binary.ids)
self.iw_arr.append(ised_binary.iw)
self.wave_arr.append(ised_binary.wave)
self.sed_arr.append(ised_binary.sed)
self.strm_arr.append(ised_binary.strm)
self.rmtm_arr.append(ised_binary.rmtm)
self.iseds.append(ised_binary)
#Find closest match for each tg value in ta - set tg to these values
nebular = numpy.loadtxt('nebular_emission.dat',skiprows=1)
self.neb_cont = nebular[:,1]
self.neb_hlines = nebular[:,2]
self.neb_metal = nebular[:,3:]
self.neb_wave = nebular[:,0]
if None not in (age,sfh,dust,metal_ind):
if fesc == None:
self.build(age,sfh,dust,metal_ind,sfh_law=sfh_law,dustmodel=dustmodel,
neb_cont=neb_cont,neb_met=neb_met)
else:
self.build(age,sfh,dust,metal_ind,fesc,sfh_law,dustmodel,neb_cont,neb_met)
def _sfh_exp(self,t,tau):
sfh = numpy.exp(-1*t/tau)/abs(tau)
return sfh
def _sfh_pow(self,t,alpha):
sfh = numpy.power(t/1.e9,alpha)
return sfh
def _sfh_del(self,t,tau):
sfh = t/(tau**2)*numpy.exp(-t/tau)
return sfh
def _sfh_tru(self,t,tstop):
sfh = numpy.ones_like(t)
sfh[t > tstop*numpy.max(t)] = 0.
sfh /= numpy.trapz(sfh,t)
return sfh
def dust_func(self,lam,ai,bi,ni,li):
"""
Functional form for SMC, LMC and MW extinction curves of
Pei et al. 1992
"""
lam = numpy.array(lam) / 1e4
ki = numpy.power((lam / li),ni) + numpy.power((li / lam),ni) + bi
eta_i = ai / ki
return eta_i
def build(self,age,sfh,dust,metal,fesc=1.,sfh_law='exp',dustmodel = 'calzetti',
neb_cont=True,neb_met=True):
"""
"""
self.tg = age*1.e9
if sfh_law == 'exp':
self.tau = sfh*1.e9
elif sfh_law == 'del':
self.tau = sfh*1.e9
else:
self.tau = sfh
self.tauv = dust
self.mi = int(abs(metal))
self.fesc = fesc
self.sfh_law = sfh_law
self.inc_cont= neb_cont
self.inc_met = neb_met
self.dust_model = dustmodel
mu = 0.3
epsilon = 0.
self.ta = self.ta_arr[self.mi]
self.wave = self.wave_arr[self.mi]
[T1,T2] = numpy.meshgrid(self.tg,self.ta)
tgi = numpy.argmin(numpy.abs(self.tg-self.ta))
self.tg = self.ta[tgi]
if len(self.neb_wave) != len(self.wave):
self.neb_cont = griddata(self.neb_wave,self.neb_cont,self.wave)
self.neb_hlines = griddata(self.neb_wave,self.neb_hlines,self.wave)
neb_metaln = numpy.zeros((len(self.wave),3))
for i in range(3):
neb_metaln[:,i] = griddata(self.neb_wave,self.neb_metal[:,i],self.wave)
self.neb_metal = neb_metaln
self.neb_wave = self.wave
#quietprint("Metallicity "+str(self.mi+1)+":")
#print ".ised file: "+files[abs(SSP)]
sed = self.sed_arr[self.mi]
strm = self.strm_arr[self.mi]
rmtm = self.rmtm_arr[self.mi]
self.iw = self.iw_arr[self.mi]
metal=str((self.metal_arr[self.mi]))[12:-3].strip()
#quietprint(metal[self.mi] + "\nInclude nebular emission: " + str(add_nebular))
SSP_Z = float(re.split("Z=?",metal)[1])
#print SSP_Z,
if SSP_Z <= 0.0004: neb_z = 0
elif SSP_Z > 0.0004 and SSP_Z <= 0.004: neb_z = 1
elif SSP_Z > 0.004: neb_z = 2
#print neb_z
if self.dust_model == "charlot":
ATT = numpy.empty([len(self.wave),len(self.ta)])
tv = ((self.tauv/1.0857)*numpy.ones(len(self.ta)))
tv[self.ta>1e7] = mu*self.tauv
lam = numpy.array((5500/self.wave)**0.7)
ATT[:,:] = (numpy.exp(-1*numpy.outer(lam,tv)))
elif self.dust_model == "calzetti":
ATT = numpy.ones([len(self.wave),len(self.ta)])
k = numpy.zeros_like(self.wave)
w0 = [self.wave <= 1200]
w1 = [self.wave < 6300]
w2 = [self.wave >= 6300]
w_u = self.wave/1e4
x1 = numpy.argmin(numpy.abs(self.wave-1200))
x2 = numpy.argmin(numpy.abs(self.wave-1250))
k[w2] = 2.659*(-1.857 + 1.040/w_u[w2])
k[w1] = 2.659*(-2.156 + (1.509/w_u[w1]) - (0.198/w_u[w1]**2) + (0.011/w_u[w1]**3))
k[w0] = k[x1] + ((self.wave[w0]-1200.) * (k[x1]-k[x2]) / (self.wave[x1]-self.wave[x2]))
k += 4.05
k[k < 0.] = 0.
tv = self.tauv*k/4.05
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*tv)
elif self.dust_model == "calzetti2":
ATT = numpy.ones([len(self.wave),len(self.ta)])
k = numpy.zeros_like(self.wave)
w0 = [self.wave <= 1000]
w1 = [(self.wave > 1000)*(self.wave < 6300)]
w2 = [self.wave >= 6300]
w_u = self.wave/1e4
k[w2] = 2.659*(-1.857 + 1.040/w_u[w2])
k[w1] = 2.659*(-2.156 + (1.509/w_u[w1]) - (0.198/w_u[w1]**2) + (0.011/w_u[w1]**3))
p1 = self.dust_func(self.wave,27,4,5.5,0.08) + self.dust_func(self.wave,185,90,2,0.042)
k[w0] = p1[w0] / (p1[w1][0]/k[w1][0])
k += 4.05
k[k < 0.] = 0.
tv = self.tauv*k/4.05
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*tv)
elif self.dust_model == "smc":
ai = [185., 27., 0.005, 0.01, 0.012, 0.03]
bi = [90., 5.5, -1.95, -1.95, -1.8, 0.]
ni = [2., 4., 2., 2., 2., 2.]
li = [0.042, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 2.93
Ab = self.tauv * (1 + (1/Rv))
print(numpy.exp(self.tauv*eta))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
elif self.dust_model == "lmc":
ai = [175., 19., 0.023, 0.005, 0.006, 0.02]
bi = [90., 4.0, -1.95, -1.95, -1.8, 0.]
ni = [2., 4.5, 2., 2., 2., 2.]
li = [0.046, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 3.16
Ab = self.tauv * (1 + (1/Rv))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
elif self.dust_model == "mw":
ai = [165., 14., 0.045, 0.002, 0.002, 0.012]
bi = [90., 4., -1.95, -1.95, -1.8, 0.]
ni = [2., 6.5, 2., 2., 2., 2.]
li = [0.047, 0.08, 0.22, 9.7, 18., 25.]
eta = numpy.zeros_like(self.wave)
for i in range(len(ai)):
eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])
Rv = 3.08
Ab = self.tauv * (1 + (1/Rv))
ATT = numpy.ones([len(self.wave),len(self.ta)])
for ti in range(0,len(self.ta)):
ATT[:,ti] *= numpy.power(10,-0.4*(Ab*eta))
#Offset added to renormalise from B to V band
#ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
"""
SECTION 1
First calculate and store those parameters that are functions of the age array
'ta' only - these are the same for every model to be made. The parameters are
the age array TP, the time interval array DT, the interpolation coefficient
'a' and the interpolation indices J. Each are stored in cell arrays of size ks,
with the data corresponding to the original age array first, and the
interpolated data second.
"""
self.TP = {}
self.A = {}
self.J = {}
self.DT = {}
for ai in range(tgi+1):
#Calculate taux2: the reverse age array; remove those values which
#are less than the first non-zero entry of taux1 - these values
#are treated differently in the original BC code
taux1 = self.ta[:ai+1]
taux2 = self.ta[ai]-self.ta[ai::-1]
if max(taux1) > 0.:
taux2 = numpy.delete(taux2,numpy.where(taux2<taux1[numpy.flatnonzero(taux1)[0]]))
#Remove values common to taux1 and taux2; calulate array TP
[T1,T2] = numpy.meshgrid(taux1,taux2)
[i,j] = numpy.where(T1-T2==0)
taux2 = numpy.delete(taux2, i)
self.TP[ai] = self.ta[ai]-numpy.concatenate((taux1,taux2),axis=0)
l = len(taux2)
#If taux2 has entries, calculate the interpolation parameters a and J.
#The indicies correspond to those values of 'ta' which are just below
#the entries in taux2. They are calculated by taking the difference
#between the two arrays, then finding the last negative entry in the
#resulting array.
if l == 0:
self.J[ai] = numpy.array([])
self.A[ai] = numpy.array([])
if l>0:
[T1,T2] = numpy.meshgrid(self.ta,taux2)
T = T1-T2
T[numpy.where(T<=0)] = 0
T[numpy.where(T!=0)] = 1
T = numpy.diff(T,1,1)
(i,self.J[ai]) = T.nonzero()
self.A[ai] = (numpy.log10(taux2/self.ta[self.J[ai]]) /
numpy.log10(self.ta[self.J[ai]+1]/self.ta[self.J[ai]]))
#Calculate age difference array: the taux arrays are joined and
#sorted, the differences calculated, then rearranged back to the order
#of the original taux values.
taux = numpy.concatenate((taux1,taux2),axis=0)
taux.sort()
b = numpy.searchsorted(taux,taux1)
c = numpy.searchsorted(taux,taux2)
order = numpy.concatenate((b,c))
d = numpy.diff(taux)
dt = numpy.append(d,0) + numpy.append(0,d)
self.DT[ai] = numpy.copy(dt[order])
SED = numpy.empty([len(self.wave)])
Nlyman = numpy.empty([1])
Nlyman_final = numpy.empty([1])
beta = numpy.empty([1])
norm =
|
numpy.empty([1])
|
numpy.empty
|
################################################################################
# Copyright (C) 2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Demonstrate categorical Markov chain with hidden Markov model (HMM)
"""
import numpy as np
import matplotlib.pyplot as plt
from bayespy.nodes import Gaussian, \
CategoricalMarkovChain, \
Dirichlet, \
Mixture, \
Categorical
from bayespy.inference.vmp.vmp import VB
import bayespy.plot as bpplt
def hidden_markov_model(distribution, *args, K=3, N=100):
# Prior for initial state probabilities
alpha = Dirichlet(1e-3*np.ones(K),
name='alpha')
# Prior for state transition probabilities
A = Dirichlet(1e-3*np.ones(K),
plates=(K,),
name='A')
# Hidden states (with unknown initial state probabilities and state
# transition probabilities)
Z = CategoricalMarkovChain(alpha, A,
states=N,
name='Z')
# Emission/observation distribution
Y = Mixture(Z, distribution, *args,
name='Y')
Q = VB(Y, Z, alpha, A)
return Q
def mixture_model(distribution, *args, K=3, N=100):
# Prior for state probabilities
alpha = Dirichlet(1e-3*np.ones(K),
name='alpha')
# Cluster assignments
Z = Categorical(alpha,
plates=(N,),
name='Z')
# Observation distribution
Y = Mixture(Z, distribution, *args,
name='Y')
Q = VB(Y, Z, alpha)
return Q
@bpplt.interactive
def run(N=200, maxiter=10, seed=42, std=2.0, plot=True):
# Use deterministic random numbers
if seed is not None:
np.random.seed(seed)
#
# Generate data
#
mu =
|
np.array([ [0,0], [3,4], [6,0] ])
|
numpy.array
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter as P
from torchvision.models.inception import inception_v3
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import numpy as np
import math
from sklearn.linear_model import LinearRegression
import math
import os
import glob
from tqdm import tqdm
from PIL import Image
from scipy import linalg
class randn_sampler():
"""
Generates z~N(0,1) using random sampling or scrambled Sobol sequences.
Args:
ndim: (int)
The dimension of z.
use_sobol: (bool)
If True, sample z from scrambled Sobol sequence. Else, sample
from standard normal distribution.
Default: False
use_inv: (bool)
If True, use inverse CDF to transform z from U[0,1] to N(0,1).
Else, use Box-Muller transformation.
Default: True
cache: (bool)
If True, we cache some amount of Sobol points and reorder them.
This is mainly used for training GANs when we use two separate
Sobol generators which helps stabilize the training.
Default: False
Examples::
>>> sampler = randn_sampler(128, True)
>>> z = sampler.draw(10) # Generates [10, 128] vector
"""
def __init__(self, ndim, use_sobol=False, use_inv=True, cache=False):
self.ndim = ndim
self.cache = cache
if use_sobol:
self.sampler = NormalQMCEngine(d=ndim, inv_transform=use_inv)
self.cached_points = torch.tensor([])
else:
self.sampler = None
def draw(self, batch_size):
if self.sampler is None:
return torch.randn([batch_size, self.ndim])
else:
if self.cache:
if len(self.cached_points) < batch_size:
# sample from sampler and reorder the points
self.cached_points = self.sampler.draw(int(1e6))[torch.randperm(int(1e6))]
# Sample without replacement from cached points
samples = self.cached_points[:batch_size]
self.cached_points = self.cached_points[batch_size:]
return samples
else:
return self.sampler.draw(batch_size)
def calculate_FID_infinity(dataloader, gt_m, gt_s, batch_size, num_points=15):
"""
Calculates effectively unbiased FID_inf using extrapolation
Args:
gen_model: (nn.Module)
The trained generator. Generator takes in z~N(0,1) and outputs
an image of [-1, 1].
ndim: (int)
The dimension of z.
batch_size: (int)
The batch size of generator
gt_path: (str)
Path to saved FID statistics of true data.
num_im: (int)
Number of images we are generating to evaluate FID_inf.
Default: 50000
num_points: (int)
Number of FID_N we evaluate to fit a line.
Default: 15
"""
# load pretrained inception model
inception_model = load_inception_net()
# get all activations of generated images
activations = get_activations(dataloader, inception_model).cpu().numpy()
fids = []
# Choose the number of images to evaluate FID_N at regular intervals over N
fid_batches = np.linspace(5000, len(dataloader), num_points).astype('int32')
# Evaluate FID_N
for fid_batch_size in fid_batches:
# sample with replacement
np.random.shuffle(activations)
fid_activations = activations[:fid_batch_size]
fids.append(calculate_FID(fid_activations, gt_m, gt_s))
fids = np.array(fids).reshape(-1, 1)
# Fit linear regression
reg = LinearRegression().fit(1 / fid_batches.reshape(-1, 1), fids)
fid_infinity = reg.predict(np.array([[0]]))[0, 0]
return fid_infinity
def calculate_IS_infinity(gen_model, ndim, batch_size, num_im=50000, num_points=15):
"""
Calculates effectively unbiased IS_inf using extrapolation
Args:
gen_model: (nn.Module)
The trained generator. Generator takes in z~N(0,1) and outputs
an image of [-1, 1].
ndim: (int)
The dimension of z.
batch_size: (int)
The batch size of generator
num_im: (int)
Number of images we are generating to evaluate IS_inf.
Default: 50000
num_points: (int)
Number of IS_N we evaluate to fit a line.
Default: 15
"""
# load pretrained inception model
inception_model = load_inception_net()
# define a sobol_inv sampler
z_sampler = randn_sampler(ndim, True)
# get all activations of generated images
_, logits = accumulate_activations(gen_model, inception_model, num_im, z_sampler, batch_size)
IS = []
# Choose the number of images to evaluate IS_N at regular intervals over N
IS_batches = np.linspace(5000, num_im, num_points).astype('int32')
# Evaluate IS_N
for IS_batch_size in IS_batches:
# sample with replacement
np.random.shuffle(logits)
IS_logits = logits[:IS_batch_size]
IS.append(calculate_inception_score(IS_logits)[0])
IS = np.array(IS).reshape(-1, 1)
# Fit linear regression
reg = LinearRegression().fit(1 / IS_batches.reshape(-1, 1), IS)
IS_infinity = reg.predict(np.array([[0]]))[0, 0]
return IS_infinity
################# Functions for calculating and saving dataset inception statistics ##################
class im_dataset(Dataset):
def __init__(self, data_dir):
self.data_dir = data_dir
self.imgpaths = self.get_imgpaths()
print(self.data_dir)
self.transform = transforms.Compose([
transforms.ToTensor()])
def get_imgpaths(self):
paths = glob.glob(os.path.join(self.data_dir, "**/*.jpg"), recursive=True)
return paths
def __getitem__(self, idx):
img_name = self.imgpaths[idx]
image = self.transform(Image.open(img_name))
return image
def __len__(self):
return len(self.imgpaths)
def load_path_statistics(path):
"""
Given path to dataset npz file, load and return mu and sigma
"""
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
return m, s
else:
raise RuntimeError('Invalid path: %s' % path)
def compute_path_statistics(path, batch_size):
"""
Given path to a dataset, load and compute mu and sigma.
Save to stats to out_path
"""
if not os.path.exists(path):
raise RuntimeError('Invalid path: %s' % path)
model = load_inception_net()
dataloader = torch.utils.data.DataLoader(im_dataset(path), batch_size=batch_size, drop_last=False,
**{'num_workers': 8, 'pin_memory': False})
act = get_activations(dataloader, model).cpu().numpy()
m, s = np.mean(act, axis=0), np.cov(act, rowvar=False)
return m, s
def get_activations(dataloader, model):
"""
Get inception activations from dataset
"""
pool = []
for images in tqdm(dataloader):
images = images.cuda()
with torch.no_grad():
pool_val, logits_val = model(images)
pool += [pool_val]
return torch.cat(pool, 0)
####################### Functions to help calculate FID and IS #######################
def calculate_FID(act, data_m, data_s):
"""
calculate score given activations and path to npz
"""
gen_m, gen_s = np.mean(act, axis=0), np.cov(act, rowvar=False)
FID = numpy_calculate_frechet_distance(gen_m, gen_s, data_m, data_s)
return FID
def calculate_inception_score(pred, num_splits=1):
scores = []
for index in range(num_splits):
pred_chunk = pred[index * (pred.shape[0] // num_splits): (index + 1) * (pred.shape[0] // num_splits), :]
kl_inception = pred_chunk * (np.log(pred_chunk) - np.log(np.expand_dims(np.mean(pred_chunk, 0), 0)))
kl_inception = np.mean(np.sum(kl_inception, 1))
scores.append(np.exp(kl_inception))
return np.mean(scores),
|
np.std(scores)
|
numpy.std
|
#!/usr/local/sci/bin/python2.7
#*****************************
#
# general utilities & classes for Python gridding.
#
#
#************************************************************************
'''
Author: <NAME>
Created: March 2016
Last update: 12 April 2016
Location: /project/hadobs2/hadisdh/marine/PROGS/Build
-----------------------
CODE PURPOSE AND OUTPUT
-----------------------
A set of class definitions and routines to help with the gridding of HadISDH Marine
-----------------------
LIST OF MODULES
-----------------------
None
-----------------------
DATA
-----------------------
None
-----------------------
HOW TO RUN THE CODE
-----------------------
All routines to be called from external scripts.
-----------------------
OUTPUT
-----------------------
None
-----------------------
VERSION/RELEASE NOTES
-----------------------
Version 2 (26 Sep 2016) <NAME>
---------
Enhancements
This now works with doNOWHOLE which is BC total but no data with whole number flags
Changes
Bug fixes
Version 2 (26 Sep 2016) <NAME>
---------
Enhancements
This can now work with the 3 QC iterations and BC options
This has a ShipOnly option in read_qc_data to pull through only ship data --ShipOnly
Bug fixed to work with ship only bias corrected data - platform_meta[:,2] rather than the QConly platform_meta[:,3]
Changes
Bug fixes
Possible bug fix in set_qc_flag_list
This had an incomplete list of QC flags for the full list and wasn't matching the QC flags up correctly.
This is now based on MDS_RWtools standard list.
Possible number of elements mistake in read_qc_data
This was causing an error where it was trying to treat 'None' as an intefer. I think it was miscounting the elements.
This is now based on MDS_RWtools standard list.
Version 1 (release date)
---------
Enhancements
Changes
Bug fixes
-----------------------
OTHER INFORMATION
-----------------------
'''
import os
import datetime as dt
import numpy as np
import sys
import argparse
import matplotlib
import struct
import netCDF4 as ncdf
import pdb
#*********************************************
class MetVar(object):
'''
Bare bones class for meteorological variable
'''
def __init__(self, name, long_name):
self.name = name
self.long_name = long_name
def __str__(self):
return "variable: {}, long_name: {}".format(self.name, self.long_name)
__repr__ = __str__
#*********************************************
class TimeVar(object):
'''
Bare bones class for times
'''
def __init__(self, name, long_name, units, standard_name):
self.name = name
self.long_name = long_name
self.units = units
self.standard_name = standard_name
def __str__(self):
return "time: {}, long_name: {}, units: {}".format(self.name, self.long_name, self.units)
__repr__ = __str__
#*****************************************************
# KATE modified - added BC options
#def set_qc_flag_list(doBC = False, doUncert = False):
def set_qc_flag_list(doBC = False, doBCtotal = False, doBChgt = False, doBCscn = False, doNOWHOLE = False, doUncert = False):
# end
'''
Set the QC flags present in the raw data
:param bool doBC: run for bias corrected data
:param bool doUncert: work on files with uncertainty information (not currently used)
:returns: QC_FLAGS - np string array
'''
# KATE modified - added BC options
# if doBC:
if doBC | doBCtotal | doBChgt | doBCscn | doNOWHOLE:
# end
# reduced number of QC flags.
return np.array(["day","land","trk","date1","date2","pos","blklst","dup",\
"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTrep",\
"ATbud","ATclim","ATnonorm","ATround","ATrep",\
"DPTbud","DPTclim","DPTssat","DPTround","DPTrep","DPTrepsat"])
else:
# KATE modified - this doesn't seem to be working and I can't quite see how the subset listed below would work without any former subsetting of the read in data
# This now uses the complete list from MDS_RWtools.py standard version
# full list
return np.array(["day","land","trk","date1","date2","pos","blklst","dup","POSblank1",\
"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTnoval","SSTnbud","SSTbbud","SSTrep","SSTblank",\
"ATbud","ATclim","ATnonorm","ATblank1","ATnoval","ATround","ATbbud","ATrep","ATblank2",\
"DPTbud","DPTclim","DPTnonorm","DPTssat","DPTnoval","DPTround","DPTbbud","DPTrep","DPTrepsat",\
"few","ntrk","POSblank2","POSblank3","POSblank4","POSblank5","POSblank6","POSblank7"]) # set_qc_flag_list
# # full number
# return np.array(["day","land","trk","date1","date2","pos","blklst","dup",\
#"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTrep",\
#"ATbud","ATclim","ATnonorm","ATnoval","ATround","ATrep",\
#"DPTbud","DPTclim","DPTnonorm","DPTssat","DPTnoval","DPTround","DPTrep","DPTrepsat"]) # set_qc_flag_list
# end
# RD - kept original flag array here just in case MDS_RWtools isn't used before next read
#np.array(["day","land","trk","date1","date2","pos","blklst","dup","POSblank1",\
#"SSTbud","SSTclim","SSTnonorm","SSTfreez","SSTnoval","SSTnbud","SSTbbud","SSTrep","SSTblank",\
#"ATbud","ATclim","ATnonorm","ATblank1","ATnoval","ATnbud","ATbbud","ATrep","ATblank2",\
#"DPTbud","DPTclim","DPTnonorm","DPTssat","DPTnoval","DPTnbud","DPTbbud","DPTrep","DPTrepsat",\
#"few","ntrk","DUMblank1","DUMblank2","DUMblank3","DUMblank4","DUMblank5","DUMblank6"])
#*****************************************************
# KATE modified - added BC options
#def read_qc_data(filename, location, fieldwidths, doBC = False):
def read_qc_data(filename, location, fieldwidths, doBC = False, doBCtotal = False, doBChgt = False, doBCscn = False, doNOWHOLE = False, ShipOnly = False):
# end
"""
Read in the QC'd data and return
Expects fixed field format
http://stackoverflow.com/questions/4914008/efficient-way-of-parsing-fixed-width-files-in-python
:param str filename: filename to read
:param str location: location of file
:param str fieldwidths: fixed field widths to use
:param bool doBC: run on the bias corrected data
# KATE modified - added BC options
:param bool doBCtotal: run on the full bias corrected data
:param bool doBChgt: run on the height only bias corrected data
:param bool doBCscn: run on the screen only bias corrected data
# end
:param bool doNOWHOLE: run on the bias corrected data with no whole number flags set
# KATE modified - added BC options
:param bool ShipOnly: select only ship platform (0:5) data
# end
:returns: data - np.array of string data
"""
fmtstring = ''.join('%ds' % f for f in fieldwidths)
parse = struct.Struct(fmtstring).unpack_from
platform_data = []
platform_meta = []
platform_obs = []
platform_qc = []
# pdb.set_trace()
with open(os.path.join(location, filename), 'r') as infile:
for line in infile:
try:
if doBC:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18]] # used to help counting the fields
platform_obs += [fields[8+18: 8+18+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]]
platform_qc += [fields[8+18+14+14+14+14+12:]]
# KATE modified - added BC options
elif doBCtotal | doNOWHOLE:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18]] # used to help counting the fields
platform_obs += [fields[8+18: 8+18+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]] # 3rd element is PT
platform_qc += [fields[8+18+14+14+14+14+12:]]
elif doBChgt:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18+14]] # used to help counting the fields
platform_obs += [fields[8+18+14: 8+18+14+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]] # 3rd element is PT
platform_qc += [fields[8+18+14+14+14+14+12:]]
elif doBCscn:
# some lines might not be the correct length
assert len(line) == 751
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
dummy_obs = [fields[8: 8+18+14+14]] # used to help counting the fields
platform_obs += [fields[8+18+14+14: 8+18+14+14+14]] # the ???tbc fields
dummy_obs = [fields[8+18+14+14+14: 8+18+14+14+14+14]] # ditto
platform_meta += [fields[8+18+14+14+14+14: 8+18+14+14+14+14+12]] # 3rd element is PT
platform_qc += [fields[8+18+14+14+14+14+12:]]
# end
else:
# some lines might not be the correct length
assert len(line) == 410
fields = parse(line)
# now unpack and process
platform_data += [fields[: 8]]
platform_obs += [fields[8: 8+17]]
# KATE modified - this seems to be wrong
platform_meta += [fields[8+17: 8+17+30]] # 4th element is PT
platform_qc += [fields[8+17+30:]]
#platform_meta += [fields[8+17: 8+17+20]]
#platform_qc += [fields[8+17+20:]]
# end
except AssertionError:
print("skipping line in {} - malformed data".format(filename))
print(line)
except OSError:
print("file {} missing".format(filename))
sys.exit()
# convert to arrays
platform_qc = np.array(platform_qc)
platform_obs = np.array(platform_obs)
platform_meta =
|
np.array(platform_meta)
|
numpy.array
|
import pickle
import cv2
from skimage.filters import threshold_otsu, threshold_local
from skimage import measure
from scipy import ndimage
import sys
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
import imageio
import scipy
from scipy import signal
from skimage.feature import peak_local_max
from scipy.signal import find_peaks
from skimage.segmentation import watershed
from skimage.measure import label, regionprops
from scipy.signal import find_peaks
import csv
import pandas as pd
import random
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster, leaves_list
import networkx as nx
import scipy
from scipy.linalg import polar
from numpy import linalg as LA
import time_series as ts
import moviepy.editor as mp
##########################################################################################
# visualization on the images
##########################################################################################
##########################################################################################
def visualize_segmentation(folder_name, gaussian_filter_size=1,frame_num=0,include_eps=False):
"""Visualize the results of z-disk and sarcomere segmentation."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# --> visualize segmentation
raw_img = np.load('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/frame-%04d.npy'%(frame_num))
# plot of segmented z disks
box = -1
laplacian = cv2.Laplacian(raw_img,cv2.CV_64F)
laplacian = ndimage.gaussian_filter(laplacian, gaussian_filter_size)
contour_thresh = threshold_otsu(laplacian)
contour_image = laplacian
contours = measure.find_contours(contour_image,contour_thresh)
total = 0
contour_list = []
for n, contour in enumerate(contours):
total += 1
if contour.shape[0] >= 8:
contour_list.append(contour)
band_data = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/segmented_bands/frame-%04d_bands.txt'%(frame_num))
z_disc_x = band_data[:,0]
z_disc_y = band_data[:,1]
# --> import sarcomeres
sarc_data = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/segmented_sarc/frame-%04d_sarc_data.txt'%(frame_num))
sarc_x = sarc_data[:,2]
sarc_y = sarc_data[:,3]
fig, axs = plt.subplots(1,2,figsize=(10,5))
axs[0].imshow(raw_img, cmap=plt.cm.gray); axs[0].set_title('z-disks -- frame %i, %i found'%(frame_num,len(contour_list)))
for kk in range(0,len(contour_list)):
cont = contour_list[kk]
axs[0].plot(cont[:,1],cont[:,0])
axs[0].set_xticks([]); axs[0].set_yticks([])
axs[1].imshow(raw_img, cmap=plt.cm.gray); axs[1].set_title('sarcomeres -- frame %i, %i found'%(frame_num,sarc_x.shape[0]))
axs[1].plot(sarc_y,sarc_x,'r*',markersize=3)
axs[1].set_xticks([]); axs[1].set_yticks([])
plt.savefig(out_analysis + '/visualize_segmentation_%04d'%(frame_num))
if include_eps:
plt.savefig(out_analysis + '/visualize_segmentation_%04d.eps'%(frame_num))
return
##########################################################################################
def get_frame_matrix(folder_name, frame):
"""Get the npy matrix for a frame of the movie."""
if frame < 10: file_root = '_matrices/frame-000%i'%(frame)
elif frame < 100: file_root = '_matrices/frame-00%i'%(frame)
else: file_root = '_matrices/frame-0%i'%(frame)
root = 'ALL_MOVIES_MATRICES/' + folder_name + file_root + '.npy'
raw_img = np.load(root)
return raw_img
##########################################################################################
def visualize_contract_anim_movie(folder_name,re_run_timeseries=False, use_re_run_timeseries=False, keep_thresh=0.75,include_eps=False,single_frame=False):
"""Visualize the results of tracking."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
if single_frame:
num_frames = 1
if use_re_run_timeseries:
tag_vis = 'for_plotting_'
if re_run_timeseries:
ts.timeseries_all(folder_name, keep_thresh, True)
else:
tag_vis = ''
plot_info_frames_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'plotting_all_frames.pkl'
ALL_frames_above_thresh = pickle.load( open( plot_info_frames_fname , "rb" ) )
plot_info_x_pos_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'plotting_all_x.pkl'
ALL_x_pos_above_thresh = pickle.load( open( plot_info_x_pos_fname , "rb" ) )
plot_info_y_pos_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'plotting_all_y.pkl'
ALL_y_pos_above_thresh = pickle.load( open( plot_info_y_pos_fname , "rb" ) )
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/' + tag_vis + 'tracking_results_leng.txt'
all_normalized = np.loadtxt(sarc_data_normalized_fname)
if use_re_run_timeseries:
out_plots = out_analysis + '/for_plotting_contract_anim'
else:
out_plots = out_analysis + '/contract_anim'
if not os.path.exists(out_plots): os.makedirs(out_plots)
# --> plot every frame, plot every sarcomere according to normalized fraction length
color_matrix = np.zeros(all_normalized.shape)
for kk in range(0,all_normalized.shape[0]):
for jj in range(0,all_normalized.shape[1]):
of = all_normalized[kk,jj]
if of < -.2: color_matrix[kk,jj] = 0
elif of > .2: color_matrix[kk,jj] = 1
else: color_matrix[kk,jj] = of*2.5 + .5
img_list = []
for t in range(0,num_frames):
if t < 10: file_root = '/frame-000%i'%(t)
elif t < 100: file_root = '/frame-00%i'%(t)
else: file_root = '/frame-0%i'%(t)
img = get_frame_matrix(folder_name,t)
plt.figure()
plt.imshow(img, cmap=plt.cm.gray)
for kk in range(0,all_normalized.shape[0]):
if t in ALL_frames_above_thresh[kk]:
ix = np.argwhere(np.asarray(ALL_frames_above_thresh[kk]) == t)[0][0]
col = (1-color_matrix[kk,t], 0 , color_matrix[kk,t])
yy = ALL_y_pos_above_thresh[kk][ix]
xx = ALL_x_pos_above_thresh[kk][ix]
plt.scatter(yy,xx,s=15,color=col,marker='o')
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.savefig(out_plots + '/' + file_root + '_length')
if include_eps:
plt.savefig(out_plots + '/' + file_root + '_length.eps')
plt.close()
img_list.append(imageio.imread(out_plots + '/' + file_root + '_length.png'))
if num_frames > 1:
imageio.mimsave(out_plots + '/contract_anim.gif', img_list)
# clip = mp.VideoFileClip(out_plots + '/contract_anim.gif')
# clip.write_videofile( 'Kehan_Tracked_Movies/' + folder_name + '.mp4') # put all movies in one folder
return
##########################################################################################
# plot the spatial graph
##########################################################################################
##########################################################################################
def visualize_spatial_graph(folder_name,include_eps=False):
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
######################################################################################
out_graph = 'ALL_MOVIES_PROCESSED' + '/' + folder_name + '/graph'
with open(out_graph + '/graph.pkl', 'rb') as f: G = pickle.load(f)
with open(out_graph + '/pos.pkl', 'rb') as f: pos = pickle.load(f)
# plot spatial graph
G2 = nx.Graph()
nodes_list = list(G.nodes())
for kk in range(0,len(nodes_list)): G2.add_node(nodes_list[kk])
edges_list = list(G.edges())
orient_list = []
for kk in range(0,len(edges_list)):
# define the angle of the edge
node_1_ix = edges_list[kk][0]
node_2_ix = edges_list[kk][1]
x1 = pos[node_1_ix][0]
x2 = pos[node_2_ix][0]
y1 = pos[node_1_ix][1]
y2 = pos[node_2_ix][1]
rad = ((x1-x2)**2.0 + (y1-y2)**2.0)**0.5
x_val = (x2-x1)/rad
y_val = (y2-y1)/rad
ang = np.abs(np.dot([1,0],[x_val,y_val]))
orient_list.append(ang)
G2.add_edge(node_1_ix,node_2_ix,weight=ang)
# for each node, determine local alignment --
node_val_list = []
for kk in range(0,len(nodes_list)):
ix = nodes_list[kk]
ed_li = list(G.edges(ix))
val = 0
num = 0
for jj in range(0,len(ed_li)):
for ii in range(jj+1,len(ed_li)):
node_1a_ix = ed_li[jj][0]
node_1b_ix = ed_li[jj][1]
node_2a_ix = ed_li[ii][0]
node_2b_ix = ed_li[ii][1]
x1a = pos[node_1a_ix][0]
x1b = pos[node_1b_ix][0]
y1a = pos[node_1a_ix][1]
y1b = pos[node_1b_ix][1]
x2a = pos[node_2a_ix][0]
x2b = pos[node_2b_ix][0]
y2a = pos[node_2a_ix][1]
y2b = pos[node_2b_ix][1]
rad1 = ((x1a-x1b)**2.0 + (y1a-y1b)**2.0)**0.5
rad2 = ((x2a-x2b)**2.0 + (y2a-y2b)**2.0)**0.5
vec1 = [(x1a-x1b)/rad1,(y1a-y1b)/rad1]
vec2 = [(x2a-x2b)/rad2,(y2a-y2b)/rad2]
val += np.abs(np.dot( vec1 , vec2 ))
num += 1
if num > 0:
node_val_list.append(val/num)
else:
node_val_list.append(0)
plt.figure(figsize=(5,5))
edges,weights = zip(*nx.get_edge_attributes(G2,'weight').items())
nx.draw(G2,pos,node_color='k',node_size=10, width=2, edge_color=weights, edge_cmap = plt.cm.rainbow)
x_list = []; y_list = []
mi = np.min(node_val_list); ma = np.max(node_val_list)
for kk in range(0,len(nodes_list)):
ix = nodes_list[kk]
x = pos[ix][0]
y = pos[ix][1]
val = 1 - ((node_val_list[kk] - mi) /(ma - mi)*0.75 + 0.25)
if node_val_list[kk] > .9:
plt.plot(x,y,'.',color=(val,val,val),ms=10)
if node_val_list[kk] > .75:
plt.plot(x,y,'.',color=(val,val,val),ms=7.5)
else:
plt.plot(x,y,'.',color=(val,val,val),ms=5)
######################################################################################
plt.savefig(out_analysis + '/' + folder_name + '_spatial_graph')
if include_eps:
plt.savefig(out_analysis + '/' + folder_name + '_spatial_graph.eps')
plt.close()
return
##########################################################################################
# time series plots and analysis
##########################################################################################
##########################################################################################
def DTWDistance(s1, s2):
"""Compute distance based on dynamic time warping (DTW)"""
DTW={}
for i in range(len(s1)):
DTW[(i, -1)] = float('inf')
for i in range(len(s2)):
DTW[(-1, i)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(len(s1)):
for j in range(len(s2)):
dist= (s1[i]-s2[j])**2
DTW[(i, j)] = dist + min(DTW[(i-1, j)],DTW[(i, j-1)], DTW[(i-1, j-1)])
return np.sqrt(DTW[len(s1)-1, len(s2)-1])
##########################################################################################
def cluster_timeseries_plot_dendrogram(folder_name,compute_dist_DTW,compute_dist_euclidean=False):
"""Cluster timeseries and plot a dendrogram that shows the clustering."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
if compute_dist_DTW == False and compute_dist_euclidean == False: load_dist_DTW = True
else: load_dist_DTW = False
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_frames.txt'
arr_frames = np.loadtxt(sarc_data_normalized_fname)
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_leng.txt'
arr_leng = np.loadtxt(sarc_data_normalized_fname)
X = arr_leng
if compute_dist_DTW:
num_sarc = X.shape[0]
dist_mat = np.zeros((num_sarc,num_sarc))
for kk in range(0,num_sarc):
for jj in range(kk+1,num_sarc):
dist_mat[kk,jj] = DTWDistance(X[kk,:],X[jj,:])
np.savetxt( 'ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dist_mat_DTW.txt',dist_mat)
dist_mat = dist_mat + dist_mat.T
elif load_dist_DTW:
dist_mat = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dist_mat_DTW.txt')
dist_mat = dist_mat + dist_mat.T
elif compute_dist_euclidean:
Y = pdist(X, 'euclidean')
dist_mat = squareform(Y)
dist_v = squareform(dist_mat)
Z = linkage(dist_v , method='ward', metric='euclidean')
ll = leaves_list(Z)
# --> plot dendrogram
plt.figure(figsize=(9,30),frameon=False)
plt.subplot(1,2,1)
# dendrogram
dn1 = dendrogram(Z,orientation='left',color_threshold=0, above_threshold_color='k') #,truncate_mode='lastp')
ordered = dn1['leaves'] #from bottom to top
if compute_dist_DTW or load_dist_DTW:
np.savetxt('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_order_DTW.txt',np.asarray(ordered))
else:
np.savetxt('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_order_euc.txt',np.asarray(ordered))
ax = plt.gca()
ax.xaxis.set_visible(False)
plt.subplot(1,2,2)
ax = plt.gca()
for kk in range(0,len(ordered)):
ix = ordered[kk]
col = (1-kk/len(ordered), kk/len(ordered) , 1- kk/len(ordered))
plt.plot(X[ix,:] + kk*.3,c=col)
plt.tight_layout()
plt.ylim((-.4,kk*.3+.35))
plt.axis('off')
if compute_dist_DTW or load_dist_DTW:
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_DTW.pdf')
else:
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/dendrogram_euclidean.pdf')
return
##########################################################################################
def plot_normalized_tracked_timeseries(folder_name,include_eps=False):
"""Create a plot of the normalized tracked time series."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
sarc_data_normalized_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_leng.txt'
all_normalized = np.loadtxt(sarc_data_normalized_fname)
plt.figure()
plt.plot(all_normalized.T,linewidth=.25)
plt.plot(np.median(all_normalized.T,axis=1),'k-',linewidth=3,label='median curve')
plt.plot(np.mean(all_normalized.T,axis=1),'--',color=(.5,.5,.5),linewidth=3,label='mean curve')
plt.legend()
plt.legend()
plt.xlabel('frame')
plt.ylabel('normalized length')
plt.title('timeseries data, tracked and normalized, %i sarcomeres'%(all_normalized.shape[0]))
plt.ylim((-.1,.1))
plt.legend
plt.tight_layout()
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/timeseries_tracked_normalized')
if include_eps:
plt.savefig('ALL_MOVIES_PROCESSED/' + folder_name + '/analysis/timeseries_tracked_normalized.eps')
return
##########################################################################################
def plot_untracked_absolute_timeseries(folder_name,include_eps=False):
"""Create a plot of the un-tracked absolute sarcomere lengths."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
ALL_PIX_LEN = []; med = []; ix = []; num_sarc = []
for frame in range(0,num_frames):
if frame < 10: file_root = '/frame-000%i'%(frame)
elif frame < 100: file_root = '/frame-00%i'%(frame)
else: file_root = '/frame-0%i'%(frame)
fname = external_folder_name + '/' + folder_name + '/segmented_sarc/' + file_root + '_sarc_data.txt'
data = np.loadtxt(fname)
pix_len = data[:,4]
ALL_PIX_LEN.append(pix_len)
med.append(np.median(pix_len))
ix.append(frame+1)
num_sarc.append(len(pix_len))
# --> create a violin plot of everything
plt.figure(figsize=(12,6))
plt.subplot(3,1,1)
ax = plt.gca()
ax.violinplot(ALL_PIX_LEN)
plt.plot(ix,med,'ro',label='median')
plt.legend()
plt.xlabel('frame')
plt.ylabel('sarc len in pixels')
plt.title(folder_name + ' absolute sarcomere length untracked')
plt.subplot(3,1,2)
plt.plot(ix,med,'k-')
plt.plot(ix,med,'ro',label='median')
plt.legend()
plt.xlabel('frame')
plt.ylabel('sarc len in pixels')
plt.subplot(3,1,3)
plt.plot(ix,num_sarc,'k-')
plt.plot(ix,num_sarc,'go')
plt.xlabel('frame')
plt.ylabel('# sarc segmented')
plt.savefig( external_folder_name + '/' + folder_name + '/analysis/absolute_sarc_length_untracked')
if include_eps:
plt.savefig( external_folder_name + '/' + folder_name + '/analysis/absolute_sarc_length_untracked.eps')
return
##########################################################################################
def compute_timeseries_individual_parameters(folder_name,include_eps=False):
"""Compute and save timeseries time constants (contraction time, relaxation time, flat time, period, offset, etc.)."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
input_distance = 10; input_width = 5 # <-- might need to adjust?
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_frames.txt'
arr_frames = np.loadtxt(sarc_data_normalized_fname)
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng.txt'
arr_leng = np.loadtxt(sarc_data_normalized_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng_NOT_NORMALIZED.txt'
arr_leng_not_normalized = np.loadtxt(sarc_data_fname)
pix_leng_median = []; pix_leng_mean = []; pix_leng_min = []; pix_leng_max = []; perc_sarc_short = []
fra_mean_contract_time = []; fra_mean_relax_time = []; fra_mean_flat_time = []; fra_mean_period = []; fra_to_first = []
idx_sarc = []; num_peak_all = []
for zz in range(0,arr_frames.shape[0]):
idx_sarc.append(zz)
x = arr_frames[zz,:]
data_pixels = arr_leng_not_normalized[zz,:]
data = arr_leng[zz,:]
data_med = signal.medfilt(data,5) # optional median filter
deriv = np.gradient(data,x)
# go through and group into category by derivative
count_C = 0; count_R = 0; count_F = 0
thresh_flat = 0.005*(np.max(data_med) - np.min(data_med))/0.2
for kk in range(0,x.shape[0]):
if deriv[kk] > thresh_flat: count_R += 1
elif deriv[kk] < -1.0*thresh_flat: count_C += 1
else: count_F += 1
# detect peaks and valleys
th = .00; di = input_distance; wi = input_width # parameters
# distance Required minimal horizontal distance (>= 1) in samples between neighbouring peaks. Smaller peaks are removed first until the condition is fulfilled for all remaining peaks.
#widthnumber or ndarray or sequence, optional Required width of peaks in samples. Either a number, None, an array matching x or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied, as the maximal required width.
peaks_U, _ = find_peaks(data_med,threshold=th,distance=di,width=wi)
peaks_L, _ = find_peaks(-1.0*data_med,threshold=th,distance=di,width=wi)
#num_peaks = 0.5 * peaks_U.shape[0] + 0.5 * peaks_L.shape[0]
#num_peaks = peaks_L.shape[0]
num_peaks = 0
for kk in range(0,peaks_L.shape[0]):
if data_med[peaks_L[kk]] < np.mean(data_med) - thresh_flat:
num_peaks += 1
if num_peaks == 0: num_peaks = 999999
mean_C = count_C / num_peaks
mean_R = count_R / num_peaks
mean_F = count_F / num_peaks
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# save everything #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
pix_leng_median.append(np.median(data_pixels))
pix_leng_mean.append(np.mean(data_pixels))
mi = np.min(data_pixels); pix_leng_min.append(mi)
ma = np.max(data_pixels); pix_leng_max.append(ma)
perc_sarc_short.append( (ma - mi)/(ma) * 100 )
fra_mean_contract_time.append(mean_C)
fra_mean_relax_time.append(mean_R)
fra_mean_flat_time.append(mean_F)
fra_mean_period.append(x.shape[0] / num_peaks)
if peaks_L.shape[0] > 0:
fra_to_first.append(peaks_L[0])
else:
fra_to_first.append(0)
num_peak_all.append(num_peaks)
### --> plot parameters
plt.figure(figsize=(7,7))
plt.subplot(2,2,1)
plt.hist(fra_mean_contract_time)
plt.plot([np.median(fra_mean_contract_time),np.median(fra_mean_contract_time)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_contract: %.2f'%(np.median(fra_mean_contract_time)))
plt.tight_layout()
plt.subplot(2,2,2)
plt.hist(fra_mean_relax_time)
plt.plot([np.median(fra_mean_relax_time),np.median(fra_mean_relax_time)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_relax: %.2f'%(np.median(fra_mean_relax_time)))
plt.tight_layout()
plt.subplot(2,2,3)
plt.hist(fra_mean_flat_time)
plt.plot([np.median(fra_mean_flat_time),np.median(fra_mean_flat_time)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_flat: %.2f'%(np.median(fra_mean_flat_time)))
plt.tight_layout()
plt.subplot(2,2,4)
plt.hist(fra_mean_period)
plt.plot([np.median(fra_mean_period),np.median(fra_mean_period)],[0,10],'r--')
plt.xlabel('frames')
plt.title('median_period: %.2f'%(np.median(fra_mean_period)))
plt.tight_layout()
plt.savefig(out_analysis + '/histogram_time_constants')
if include_eps:
plt.savefig(out_analysis + '/histogram_time_constants.eps')
num_sarc = len(idx_sarc)
arr = np.zeros((num_sarc,12))
arr[:,0] = np.asarray(idx_sarc)
arr[:,1] = np.asarray(pix_leng_median)
arr[:,2] = np.asarray(pix_leng_mean)
arr[:,3] = np.asarray(pix_leng_min)
arr[:,4] = np.asarray(pix_leng_max)
arr[:,5] = np.asarray(perc_sarc_short)
arr[:,6] = np.asarray(fra_mean_contract_time)
arr[:,7] = np.asarray(fra_mean_relax_time)
arr[:,8] = np.asarray(fra_mean_flat_time)
arr[:,9] = np.asarray(fra_mean_period)
arr[:,10] = np.asarray(fra_to_first)
arr[:,11] = np.asarray(num_peak_all)
np.savetxt(out_analysis + '/timeseries_parameters_info.txt', arr)
# --> save as excel spreadsheet
writer = pd.ExcelWriter(out_analysis + '/timeseries_parameters_info.xlsx', engine='xlsxwriter')
all_col = ['idx', 'pix_leng_median', 'pix_leng_mean', 'pix_leng_min', 'pix_leng_max', 'perc_sarc_short', 'frames_mean_contract', 'frames_mean_relax', 'frames_mean_flat', 'frames_mean_period', 'frames_to_first', 'num_peaks']
df = pd.DataFrame(np.asarray(arr), columns=all_col)
df.to_excel(writer, sheet_name='summary_stats')
arr = arr_leng
df2 = pd.DataFrame(np.asarray(arr))
df2.to_excel(writer, sheet_name='full_time_series', columns = arr_frames[0,:])
writer.save()
return
##########################################################################################
def sample(mu_track,num_track,vals_all):
"""Sample mu from the total population -- match #tracked."""
num_run = 1000
mu_samp = []
for jj in range(0,num_run):
ix = []
for kk in range(0,num_track):
ix.append(random.randint(0,len(vals_all)-1))
samp = vals_all[ix]
mu_samp.append(mu_track - np.mean(samp))
return mu_samp
##########################################################################################
def compute_mu_ang(ang_list):
"""Compute the mean of an angle."""
x_total = 0
y_total = 0
for kk in range(0,len(ang_list)):
ang = ang_list[kk]
x_total += np.cos(ang)
y_total += np.sin(ang)
x_mean = x_total / len(ang_list)
y_mean = y_total / len(ang_list)
ang = np.arctan2(y_mean, x_mean)
r = np.sqrt(x_mean**2.0 + y_mean**2.0)
return ang, r
##########################################################################################
def sample_ang(mu_track_ang, mu_track_r,num_track,vals_all):
"""Sample angle from the total population -- match #tracked."""
num_run = 1000
mu_samp_ang = []
mu_samp_r = []
for jj in range(0,num_run):
ix = []
for kk in range(0,num_track):
ix.append(random.randint(0,len(vals_all)-1))
samp = vals_all[ix]
ang, r = compute_mu_ang(samp)
mu_samp_ang.append(mu_track_ang - ang)
mu_samp_r.append(mu_track_r - r)
return mu_samp_ang, mu_samp_r
##########################################################################################
def compare_tracked_untracked(folder_name,include_eps=False):
"""Compare the tracked and untracked populations by random sampling the untracked population."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
ALL_PIX_LEN = []; ALL_PIX_WID = []; ALL_PIX_ANG = []
med = []; ix = []; num_sarc = []
for frame in range(0,num_frames):
if frame < 10: file_root = '/frame-000%i'%(frame)
elif frame < 100: file_root = '/frame-00%i'%(frame)
else: file_root = '/frame-0%i'%(frame)
fname = external_folder_name + '/' + folder_name + '/segmented_sarc/' + file_root + '_sarc_data.txt'
data = np.loadtxt(fname)
pix_len = data[:,4]; pix_wid = data[:,5]; pix_ang = data[:,6]
ALL_PIX_LEN.append(pix_len); ALL_PIX_WID.append(pix_wid); ALL_PIX_ANG.append(pix_ang)
med.append(np.median(pix_len)); ix.append(frame+1); num_sarc.append(len(pix_len))
# --> import data
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng_NOT_NORMALIZED.txt'
tracked_leng = np.loadtxt(sarc_data_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_wid.txt'
tracked_wid = np.loadtxt(sarc_data_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_ang.txt'
tracked_ang = np.loadtxt(sarc_data_fname)
# --> compute the mean number NOT tracked
num_not = 0
for kk in range(0,len(ALL_PIX_LEN)): num_not += len(ALL_PIX_LEN[kk])
num_not = num_not / len(ALL_PIX_LEN); num_tracked = tracked_leng.shape[0]
# --> sample the length from the tracked population
mu_samp_ALL_len = []
for frame_num in range(0,num_frames):
len_all = ALL_PIX_LEN[frame_num]
len_tracked = list(tracked_leng[:,frame_num])
mu_track = np.mean(len_tracked)
num_track = len(len_tracked)
vals_all = len_all
mu_samp = sample(mu_track, num_track, vals_all)
mu_samp_ALL_len.append(mu_samp)
plt.figure(figsize=(25,5))
plt.boxplot(mu_samp_ALL_len)
plt.plot([0,num_frames],[-.5,-.5],'k--')
plt.plot([0,num_frames],[.5,.5],'k--')
plt.title('comparison of length in pixels, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.savefig(out_analysis + '/length_compare_box_plots')
if include_eps:
plt.savefig(out_analysis + '/length_compare_box_plots.eps')
# --> sample the width from the tracked population
mu_samp_ALL_wid = []
for frame_num in range(0,num_frames):
wid_all = ALL_PIX_WID[frame_num]
wid_tracked = list(tracked_wid[:,frame_num])
mu_track = np.mean(wid_tracked)
num_track = len(wid_tracked)
vals_all = wid_all
mu_samp = sample(mu_track, num_track, vals_all)
mu_samp_ALL_wid.append(mu_samp)
plt.figure(figsize=(25,5))
plt.boxplot(mu_samp_ALL_wid)
plt.plot([0,num_frames],[-.5,-.5],'k--')
plt.plot([0,num_frames],[.5,.5],'k--')
plt.title('comparison of width in pixels, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.savefig(out_analysis + '/width_compare_box_plots')
if include_eps:
plt.savefig(out_analysis + '/width_compare_box_plots.eps')
# --> sample the angle from the tracked population
mu_samp_ALL_ang = []; mu_samp_ALL_rad = []
for frame_num in range(0,num_frames):
ang_all = ALL_PIX_ANG[frame_num]
ang_tracked = list(tracked_ang[:,frame_num])
mu_track_ang, mu_track_r = compute_mu_ang(ang_tracked)
num_track = len(ang_tracked)
vals_all = ang_all
mu_samp_ang, mu_samp_r = sample_ang(mu_track_ang, mu_track_r,num_track,vals_all)
mu_samp_ALL_ang.append(mu_samp_ang)
mu_samp_ALL_rad.append(mu_samp_r)
plt.figure(figsize=(25,10))
plt.subplot(2,1,1)
plt.boxplot(mu_samp_ALL_ang)
plt.plot([0,num_frames],[-1*np.pi/8,-1*np.pi/8],'k--')
plt.plot([0,num_frames],[np.pi/8,np.pi/8],'k--')
plt.title('comparison of angle in radians, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.subplot(2,1,2)
plt.boxplot(mu_samp_ALL_rad)
plt.plot([0,num_frames],[0,0],'r--',label='uniform')
plt.plot([0,num_frames],[1,1],'k--',label='oriented')
plt.title('comparison of angle radius in pixels, approx %i untracked, %i tracked'%(num_not,num_tracked))
plt.xlabel('frame number')
plt.ylabel(r'$\mu_{track}-\mu_{all}$')
plt.legend()
plt.savefig(out_analysis + '/angle_compare_box_plots')
if include_eps:
plt.savefig(out_analysis + '/angle_compare_box_plots.eps')
return
##########################################################################################
##########################################################################################
# compute time series correlations -- on graph distance and euclidean distance
##########################################################################################
##########################################################################################
def compute_cross_correlation(sig1, sig2):
"""Compute the normalized cross correlation between two signals."""
sig1_norm = (sig1 - np.mean(sig1)) / (np.std(sig1) * sig1.shape[0])
sig2_norm = (sig2 - np.mean(sig2)) / (np.std(sig2))
val = np.correlate(sig1_norm,sig2_norm)
return val
##########################################################################################
def dist_val2(subgraphs,node_1,node_2):
"""Compute the network distance between two nodes."""
for sg in subgraphs:
node_1_in = sg.has_node(node_1)
node_2_in = sg.has_node(node_2)
if node_1_in and node_2_in:
dist = nx.shortest_path_length(sg,source=node_1,target=node_2)
return dist
return 99999
##########################################################################################
def get_euclid_dist_from_avg_pos(x_vec_1,y_vec_1,x_vec_2,y_vec_2):
"""Return the average euclidian distance between two sarcomeres."""
dist_vec = (( x_vec_1 - x_vec_2 )**2.0 + ( y_vec_1 - y_vec_2 )**2.0)**(1.0/2.0)
return np.mean(dist_vec)
##########################################################################################
def preliminary_spatial_temporal_correlation_info(folder_name,compute_network_distances=True,include_eps=False):
"""Perform a preliminary analysis of spatial/temporal correlation."""
num_frames = len(glob.glob('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/*.npy'))
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# --> import sarcomere
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_frames.txt'
arr_frames = np.loadtxt(sarc_data_normalized_fname)
sarc_data_normalized_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng.txt'
arr_leng = np.loadtxt(sarc_data_normalized_fname)
sarc_data_fname = external_folder_name + '/' + folder_name + '/timeseries/tracking_results_leng_NOT_NORMALIZED.txt'
arr_leng_not_normalized = np.loadtxt(sarc_data_fname)
# --> import raw image
raw_img = np.load('ALL_MOVIES_MATRICES/' + folder_name + '_matrices/frame-0000.npy')
# --> import graph
out_graph = external_folder_name + '/' + folder_name + '/graph'
with open(out_graph + '/graph.pkl', 'rb') as f: G = pickle.load(f)
out_graph = folder_name + '/graph/basic_graph.png'
graph = plt.imread(external_folder_name + '/' + folder_name + '/graph/basic_graph.png')
# --> import sarcomere info
sarc_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/tracking_results/tracking_results_sarcomeres.txt'
sarc_data = np.loadtxt(sarc_data_fname)
# --> import sarcomere position info
sarc_x_pos_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_x_pos.txt'
sarc_x_pos_data = np.loadtxt(sarc_x_pos_data_fname )
sarc_y_pos_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_y_pos.txt'
sarc_y_pos_data = np.loadtxt(sarc_y_pos_data_fname )
# --> import z-disc data
zdisc_data_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/tracking_results/tracking_results_zdisks.txt'
zdisc_data = np.loadtxt(zdisc_data_fname)
particle = zdisc_data[:,2]
# --> import index information
sarc_idx_fname = 'ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_sarc_idx_above_thresh.txt'
sarc_idx = np.loadtxt(sarc_idx_fname)
all_frames = sarc_data[:,0]; all_particles = sarc_data[:,2]
all_z_1 = sarc_data[:,5]; all_z_2 = sarc_data[:,6]
unique_particles = np.unique(all_particles).astype('int')
organized_data_z1 = np.zeros((unique_particles.shape[0],num_frames))
organized_data_z2 = np.zeros((unique_particles.shape[0],num_frames))
for kk in range(0,sarc_data.shape[0]):
part = int(all_particles[kk])
frame = int(all_frames[kk])
idx_in_frame = np.where(zdisc_data[:,0] == frame)
disc_data = zdisc_data[idx_in_frame[0],:]
part_idx = np.argmin(np.abs(unique_particles - part))
ZLID1 = int(all_z_1[kk])
ZLID2 = int(all_z_2[kk])
orig_disc_idx = disc_data[:,1].astype(int)
check = np.where(orig_disc_idx == ZLID1)[0]
if check.shape[0] == 0:
continue
else:
ZGID1_idx = check[0]
ZGID1 = int(disc_data[ZGID1_idx,2])
check = np.where(orig_disc_idx == ZLID2)[0]
if check.shape[0] == 0:
continue
else:
ZGID2_idx = check[0]
ZGID2 = int(disc_data[ZGID2_idx,2])
organized_data_z1[part_idx,frame] = ZGID1
organized_data_z2[part_idx,frame] = ZGID2
# --> for each sarcomere identify which z discs it belongs to
Z_disc_1 = []; Z_disc_2 = []
for kk in range(0,sarc_idx.shape[0]):
idx = int(sarc_idx[kk])
z_idx_1 = organized_data_z1[idx,:]
if np.sum(z_idx_1) == 0:
z_idx_1 = z_idx_1
else:
z_idx_1 = z_idx_1[z_idx_1>0]
z_idx_2 = organized_data_z2[idx,:]
if np.sum(z_idx_2) == 0:
z_idx_2 = z_idx_2
else:
z_idx_2 = z_idx_2[z_idx_2>0]
Z_disc_1.append(int(scipy.stats.mode(z_idx_1)[0][0]))
Z_disc_2.append(int(scipy.stats.mode(z_idx_2)[0][0]))
# get graph distances and correlation scores
graph_dist_all = []; corr_score_all = []; euclid_dist_all = []
if compute_network_distances:
for jj in range(0,sarc_idx.shape[0]):
for kk in range(jj+1,sarc_idx.shape[0]):
jj_idx = [Z_disc_1[jj], Z_disc_2[jj]]
kk_idx = [Z_disc_1[kk], Z_disc_2[kk]]
dist_all_combos = []
for j in jj_idx:
for k in kk_idx:
subgraphs = (G.subgraph(c).copy() for c in nx.connected_components(G))
dist = dist_val2(subgraphs,j,k)
dist_all_combos.append(dist)
sig1 = arr_leng[jj,:]
sig2 = arr_leng[kk,:]
corr_score = compute_cross_correlation(sig1, sig2)
corr_score_all.append(corr_score)
graph_dist_all.append( np.min(dist_all_combos) )
x_vec_1 = sarc_x_pos_data[jj,:]; y_vec_1 = sarc_y_pos_data[jj,:]
x_vec_2 = sarc_x_pos_data[kk,:]; y_vec_2 = sarc_y_pos_data[kk,:]
euclid_dist = get_euclid_dist_from_avg_pos(x_vec_1,y_vec_1,x_vec_2,y_vec_2)
euclid_dist_all.append(euclid_dist)
np.savetxt(out_analysis + '/graph_dist_all.txt',np.asarray(graph_dist_all))
np.savetxt(out_analysis + '/euclid_dist_all.txt',np.asarray(euclid_dist_all))
np.savetxt(out_analysis + '/corr_score_all.txt',np.asarray(corr_score_all))
else:
graph_dist_all = np.loadtxt(out_analysis + '/graph_dist_all.txt')
euclid_dist_all = np.loadtxt(out_analysis + '/euclid_dist_all.txt')
corr_score_all = np.loadtxt(out_analysis + '/corr_score_all.txt')
graph_dist_all = np.asarray(graph_dist_all).astype('int')
euclid_dist_all = np.asarray(euclid_dist_all)
corr_score_all = np.asarray(corr_score_all)
########## --> make plot
plt.figure(figsize=(30,4))
# raw image
plt.subplot(1,5,1)
plt.imshow(raw_img)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.title(folder_name + ' raw image')
plt.tight_layout()
# graph
plt.subplot(1,5,2)
plt.imshow(graph)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.title(folder_name + ' graph')
plt.tight_layout()
# histogram
plt.subplot(1,5,3)
n, bins, patches = plt.hist(corr_score_all,range=(-1,1),rwidth=.8,color=(.5,.5,.5))
plt.xlim((-1.1,1.1))
plt.xlabel('normalized cross-correlation')
plt.title('timeseries comparison')
ma = np.max(n)
plt.plot([0,0],[0,ma],'g--',label='no correlation')
plt.plot([np.median(corr_score_all),np.median(corr_score_all)],[0,ma],'b-',label='median: %.2f'%(np.median(corr_score_all)))
plt.legend()
plt.tight_layout()
# euclidean
plt.subplot(1,5,4)
x_coord = []; y_coord = []; num_in_bin = []
for kk in range(0,5):
ix_1 = euclid_dist_all > kk*20
ix_2 = euclid_dist_all < (kk +1)*20
ix = []
for jj in range(0,np.asarray(euclid_dist_all).shape[0]):
if ix_1[jj] == True and ix_2[jj] == True:
ix.append(jj)
x_coord.append(kk*20 + 5)
me = np.mean(corr_score_all[ix])
num_in_bin.append(len(corr_score_all[ix]))
y_coord.append(me)
plt.plot(x_coord,y_coord,'.',color=(1.0,.5,.5),markersize=20,label='binned means')
maxi = np.max(x_coord)
plt.plot([0,maxi],[0,0],'g--',label='no correlation')
mean_all = np.mean(corr_score_all)
plt.plot([0,maxi],[mean_all,mean_all],'b-',label='mean all: %.2f'%(mean_all))
plt.xlabel('timeseries comparison wrt euclidian distance (pixels)')
plt.ylabel('normalized cross-correlation')
plt.grid(True)
plt.title('timeseries comparison wrt distance')
plt.legend()
plt.ylim((-1.05,1.05))
plt.tight_layout()
# network
plt.subplot(1,5,5)
dist_bins = []
for kk in range(0,5): dist_bins.append(kk)
x_coord = []; y_coord = []; num_in_bin = [ ]
for di in dist_bins:
ix = graph_dist_all == int(di)
corr_score = corr_score_all[ix]
if corr_score.shape[0] > 3:
x_coord.append(di)
y_coord.append(np.mean(corr_score))
num_in_bin.append(len(corr_score))
ix = graph_dist_all < 9999
corr_score = corr_score_all[ix]
mean_connected = np.mean(corr_score)
mean_all = np.mean(corr_score_all)
plt.plot(x_coord,y_coord,'.',color=(1.0,.5,.5),markersize=20,label='binned means')
maxi = np.max(dist_bins)
plt.plot([0,maxi],[mean_connected, mean_connected],'r--',label='mean connected: %.2f'%(mean_connected))
plt.plot([0,maxi],[0,0],'g--',label='no correlation')
plt.plot([0,maxi],[mean_all,mean_all],'b-',label='mean all: %.2f'%(mean_all))
plt.legend(loc=4)
plt.xlabel('distance along network')
plt.ylabel('normalized cross-correlation')
plt.grid(True)
plt.title('timeseries comparison wrt network distance')
plt.ylim((-1.05,1.05))
plt.tight_layout()
plt.savefig(out_analysis + '/preliminary_spatial_analysis')
if include_eps:
plt.savefig(out_analysis + '/preliminary_spatial_analysis.eps')
return
##########################################################################################
# compute F
##########################################################################################
##########################################################################################
def compute_F_whole_movie(folder_name,include_eps=False):
"""Compute and return the average deformation gradient for the whole movie."""
# set up folders
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# compute Lambda from x_pos and y_pos
x_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_x_pos.txt')
y_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_y_pos.txt')
num_sarc = x_pos.shape[0]
num_time = x_pos.shape[1]
num_vec = int((num_sarc * num_sarc - num_sarc) / 2.0)
Lambda_list = []
for tt in range(0,num_time):
Lambda = np.zeros((2,num_vec))
ix = 0
for kk in range(0,num_sarc):
for jj in range(kk+1,num_sarc):
x_vec = x_pos[kk,tt] - x_pos[jj,tt]
y_vec = y_pos[kk,tt] - y_pos[jj,tt]
Lambda[0,ix] = x_vec
Lambda[1,ix] = y_vec
ix += 1
Lambda_list.append(Lambda)
F_list = []; F11_list = []; F22_list = []; F12_list = []; F21_list = []
J_list = []
for tt in range(0,num_time):
Lambda_0 = Lambda_list[0]
Lambda_t = Lambda_list[tt]
term_1 = np.dot( Lambda_t , np.transpose(Lambda_0) )
term_2 = np.linalg.inv( np.dot( Lambda_0 , np.transpose(Lambda_0) ) )
F = np.dot(term_1 , term_2)
F_vec = [F[0,0],F[0,1],F[1,0],F[1,1]]
F_list.append(F_vec)
F11_list.append(F[0,0] - 1.0)
F22_list.append(F[1,1] - 1.0)
F12_list.append(F[0,1])
F21_list.append(F[1,0])
J_list.append(F[0,0]*F[1,1] - F[0,1]*F[1,0])
np.savetxt(out_analysis + '/recovered_F.txt',np.asarray(F_list))
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(F11_list,'r--',linewidth=5, label='F11 recovered')
plt.plot(F22_list,'g--',linewidth=4, label='F22 recovered')
plt.plot(F12_list,'c:',label='F12 recovered')
plt.plot(F21_list,'b:',label='F21 recovered')
plt.legend()
plt.title('recovered deformation gradient')
plt.xlabel('frames');
plt.subplot(1,2,2)
plt.plot(J_list,'k-',label='Jacobian')
plt.xlabel('frames');
plt.legend()
plt.title('det of deformation gradient')
plt.savefig(out_analysis + '/recovered_F_plot')
if include_eps:
plt.savefig(out_analysis + '/recovered_F_plot.eps')
return
##########################################################################################
def adjust_F_if_movie_starts_not_contracted(folder_name,include_eps=False):
"""Adjust and return the average deformation gradient for the whole movie -- useful if first frame is not the relaxed state."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
F_list = np.loadtxt(out_analysis + '/recovered_F.txt')
J_list = [] #F_vec = [F[0,0],F[0,1],F[1,0],F[1,1]]
for kk in range(0,F_list.shape[0]):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
arg_max = np.argmax(J_list)
# compute Lambda from x_pos and y_pos
x_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_x_pos.txt')
y_pos = np.loadtxt('ALL_MOVIES_PROCESSED/' + folder_name + '/timeseries/tracking_results_y_pos.txt')
num_sarc = x_pos.shape[0]
num_time = x_pos.shape[1]
num_vec = int((num_sarc * num_sarc - num_sarc) / 2.0)
Lambda_list = []
for tt in range(0,num_time):
Lambda = np.zeros((2,num_vec))
ix = 0
for kk in range(0,num_sarc):
for jj in range(kk+1,num_sarc):
x_vec = x_pos[kk,tt] - x_pos[jj,tt]
y_vec = y_pos[kk,tt] - y_pos[jj,tt]
Lambda[0,ix] = x_vec
Lambda[1,ix] = y_vec
ix += 1
Lambda_list.append(Lambda)
F_list = []; F11_list = []; F22_list = []; F12_list = []; F21_list = []
J_list = []
for tt in range(0,num_time):
Lambda_0 = Lambda_list[arg_max]
Lambda_t = Lambda_list[tt]
term_1 = np.dot( Lambda_t , np.transpose(Lambda_0) )
term_2 = np.linalg.inv( np.dot( Lambda_0 , np.transpose(Lambda_0) ) )
F = np.dot(term_1 , term_2)
F_vec = [F[0,0],F[0,1],F[1,0],F[1,1]]
F_list.append(F_vec)
F11_list.append(F[0,0] - 1.0)
F22_list.append(F[1,1] - 1.0)
F12_list.append(F[0,1])
F21_list.append(F[1,0])
J_list.append(F[0,0]*F[1,1] - F[0,1]*F[1,0])
np.savetxt(out_analysis + '/recovered_F.txt',np.asarray(F_list))
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(F11_list,'r--',linewidth=5, label='F11 recovered')
plt.plot(F22_list,'g--',linewidth=4, label='F22 recovered')
plt.plot(F12_list,'c:',label='F12 recovered')
plt.plot(F21_list,'b:',label='F21 recovered')
plt.legend()
plt.title('recovered deformation gradient')
plt.xlabel('frames');
plt.subplot(1,2,2)
plt.plot(J_list,'k-',label='Jacobian')
plt.xlabel('frames');
plt.legend()
plt.title('det of deformation gradient')
plt.savefig(out_analysis + '/recovered_F_plot')
if include_eps:
plt.savefig(out_analysis + '/recovered_F_plot.eps')
return
##########################################################################################
def analyze_J_full_movie(folder_name,include_eps=False):
"""Analyze the Jacobian -- report timeseries parmeters. Must first run compute_F_whole_movie()."""
# set up folders
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# import the deformation gradient.
F_list = np.loadtxt(out_analysis + '/recovered_F.txt')
num_frames = F_list.shape[0]; x = []
J_list = []
for kk in range(0,num_frames):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
x.append(kk)
J_list = np.asarray(J_list)
x = np.asarray(x)
# compute the parameters of the timeseries
plt.figure(figsize=(4,4))
plt.plot(J_list,'k-')
data = J_list
data_med = signal.medfilt(data,5)
deriv = np.gradient(data,x)
count_C = 0; count_R = 0; count_F = 0
thresh_flat = 0.01*(np.max(J_list) - np.min(J_list))
pix_leng_median = []; pix_leng_mean = []; pix_leng_min = []; pix_leng_max = []; perc_sarc_short = []
fra_mean_contract_time = []; fra_mean_relax_time = []; fra_mean_flat_time = []; fra_mean_period = []; fra_to_first = []
idx_sarc = []; num_peak_all = []
for kk in range(0,x.shape[0]):
if deriv[kk] > thresh_flat:
count_R += 1
plt.plot(x[kk],J_list[kk],'o',color=(.5,.5,.5))
elif deriv[kk] < -1.0*thresh_flat:
count_C += 1
plt.plot(x[kk],J_list[kk],'o',color=(.5,0,0))
else:
count_F += 1
plt.plot(x[kk],J_list[kk],'o',color=(0,0,.5))
# detect peaks and valleys
input_distance = 10; input_width = 5
th = .00; di = input_distance; wi = input_width # parameters
peaks_U, _ = find_peaks(data_med,threshold=th,distance=di,width=wi)
peaks_L, _ = find_peaks(-1.0*data_med,threshold=th,distance=di,width=wi)
#num_peaks = 0.5 * peaks_U.shape[0] + 0.5 * peaks_L.shape[0]
num_peaks = peaks_L.shape[0]
if num_peaks == 0: num_peaks = 999999
mean_C = count_C / num_peaks
mean_R = count_R / num_peaks
mean_F = count_F / num_peaks
plt.grid()
#plt.plot(x[peaks_U],data[peaks_U],'rx',markersize=10)
plt.plot(x[peaks_L],data[peaks_L],'rx',markersize=13)
plt.title('frames contract: %i, relax: %i, flat: %i'%(count_C,count_R,count_F))
plt.xlabel('frame number')
plt.ylabel('determinate of average F')
plt.tight_layout()
plt.savefig(out_analysis + '/recovered_F_plot_timeseries')
if include_eps:
plt.savefig(out_analysis + '/recovered_F_plot_timeseries.eps')
return
##########################################################################################
def visualize_F_full_movie(folder_name,include_eps=False):
"""Visualize the eigenvalues of F -- plot timeseries next to the movie. Must first run compute_F_whole_movie()."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis/F_movie'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# import the deformation gradient.
F_list = np.loadtxt(external_folder_name + '/' + folder_name + '/analysis/recovered_F.txt')
num_frames = F_list.shape[0]; x = []
J_list = []
R_list = []
U_list = []
F_list_mat = []
lambda_1_list = []; vec_1_list = []
lambda_2_list = []; vec_2_list = []
th_list = []
for kk in range(0,num_frames):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
x.append(kk)
R, U = polar(np.asarray([[F00,F01],[F10,F11]]))
R_list.append(R); U_list.append(U); F_list_mat.append(np.asarray([[F00,F01],[F10,F11]]))
w, v = LA.eig(U)
lambda_1_list.append(np.min(w)); lambda_2_list.append(np.max(w))
v = np.dot(R, v)
vec_1_list.append(v[:,np.argmin(w)]); vec_2_list.append(v[:,np.argmax(w)])
th_list.append(np.arccos(v[0,0]))
J_list = np.asarray(J_list)
x = np.asarray(x)
J_min = np.min(J_list)
img_list = []
# --> plot
for kk in range(0,num_frames):
raw_img = get_frame_matrix(folder_name, kk)
x_pos_mean = raw_img.shape[0]/2.0; y_pos_mean = raw_img.shape[1]/2.0
plt.figure(figsize=(10*.7,5*.7))
plt.subplot(1,2,1)
plt.imshow(raw_img, cmap=plt.cm.gray)
rad = .2*np.min([raw_img.shape[0],raw_img.shape[1]]); th = np.linspace(0,2.0*np.pi,100)
plt.plot([y_pos_mean-rad*vec_1_list[kk][1],y_pos_mean+rad*vec_1_list[kk][1]],[x_pos_mean-rad*vec_1_list[kk][0],x_pos_mean+rad*vec_1_list[kk][0]],'-',color=(255/255,204/255,203/255),linewidth=0.3)
plt.plot([y_pos_mean-rad*vec_2_list[kk][1],y_pos_mean+rad*vec_2_list[kk][1]],[x_pos_mean-rad*vec_2_list[kk][0],x_pos_mean+rad*vec_2_list[kk][0]],'-',color=(0.5,0.5,0.5),linewidth=0.3)
#plt.plot([y_pos_mean,y_pos_mean],[x_pos_mean-rad,x_pos_mean+rad],'-',color=(255/255,204/255,203/255),linewidth=0.2)
# add in eigenvector directions
x_vec = []; y_vec = [] ; x_vec_circ = []; y_vec_circ = []
scale = np.asarray([[.9,0],[0,.9]])
for jj in range(0,100):
v = np.asarray([rad*np.cos(th[jj]),rad*np.sin(th[jj])])
#v_def = np.dot(np.dot(F_list_mat[jj],scale),v)
nest1 = np.dot(F_list_mat[kk],F_list_mat[kk])
nest2 = np.dot(F_list_mat[kk],nest1)
nest3 = np.dot(F_list_mat[kk],nest2)
nest4 = np.dot(F_list_mat[kk],nest3)
nest5 = np.dot(F_list_mat[kk],nest4)
nest6 = np.dot(F_list_mat[kk],nest5)
nest7 = np.dot(F_list_mat[kk],nest6)
nest8 = np.dot(F_list_mat[kk],nest7)
v_def = np.dot(nest8,v)
x_vec.append(v_def[0] + x_pos_mean); y_vec.append(v_def[1] + y_pos_mean)
x_vec_circ.append(x_pos_mean + v[0]); y_vec_circ.append(y_pos_mean + v[1])
plt.plot(y_vec_circ,x_vec_circ,'-',color=(255/255,204/255,203/255),linewidth=0.3)
plt.plot(y_vec,x_vec,'-',color=(255/255,204/255,203/255),linewidth=1.0)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([]);
plt.subplot(1,2,2)
plt.plot(x,lambda_1_list,'-',color='k',linewidth=1,label='λ1')
plt.plot(x,lambda_2_list,'-',color=(0.5,0.5,0.5),linewidth=1,label='λ2')
plt.plot(x[kk],lambda_1_list[kk],'o',mfc=(.7,0,0),mec=(0,0,0),markersize=7)
plt.plot(x[kk],lambda_2_list[kk],'o',mfc=(.7,0,0),mec=(0.5,0.5,0.5),markersize=7)
plt.xlabel('frame number')
plt.legend()
plt.tight_layout()
plt.savefig(out_analysis + '/frame_%04d'%(kk))
if include_eps:
plt.savefig(out_analysis + '/frame_%i.eps'%(kk))
plt.close()
img_list.append(imageio.imread(out_analysis + '/frame_%04d.png'%(kk)))
imageio.mimsave(out_analysis + '/F_anim.gif', img_list)
return
##########################################################################################
def save_lambda_from_F(folder_name,include_eps=False):
"""Visualize the eigenvalues of F -- plot timeseries next to the movie. Must first run compute_F_whole_movie()."""
external_folder_name = 'ALL_MOVIES_PROCESSED'
if not os.path.exists(external_folder_name):
os.makedirs(external_folder_name)
out_analysis = external_folder_name + '/' + folder_name + '/analysis'
if not os.path.exists(external_folder_name + '/' + folder_name): os.makedirs(external_folder_name + '/' + folder_name)
if not os.path.exists(out_analysis): os.makedirs(out_analysis)
# import the deformation gradient.
F_list = np.loadtxt(external_folder_name + '/' + folder_name + '/analysis/recovered_F.txt')
num_frames = F_list.shape[0]; x = []
J_list = []
R_list = []
U_list = []
F_list_mat = []
lambda_1_list = []; vec_1_list = []
lambda_2_list = []; vec_2_list = []
th_list = []
for kk in range(0,num_frames):
F00 = F_list[kk,0]; F01 = F_list[kk,1]; F10 = F_list[kk,2]; F11 = F_list[kk,3]
J_list.append(F00*F11 - F01*F10)
x.append(kk)
R, U = polar(np.asarray([[F00,F01],[F10,F11]]))
R_list.append(R); U_list.append(U); F_list_mat.append(np.asarray([[F00,F01],[F10,F11]]))
w, v = LA.eig(U)
lambda_1_list.append(np.min(w)); lambda_2_list.append(np.max(w))
v = np.dot(R, v)
vec_1_list.append(v[:,np.argmin(w)]); vec_2_list.append(v[:,np.argmax(w)])
th_list.append(np.arccos(v[0,0]))
J_list = np.asarray(J_list)
x = np.asarray(x)
J_min = np.min(J_list)
# --> plot
kk = np.argmin(J_list)
raw_img = get_frame_matrix(folder_name, kk)
x_pos_mean = raw_img.shape[0]/2.0; y_pos_mean = raw_img.shape[1]/2.0
plt.figure(figsize=(10*.7,5*.7))
plt.subplot(1,2,1)
plt.imshow(raw_img, cmap=plt.cm.gray)
rad = .2*np.min([raw_img.shape[0],raw_img.shape[1]]); th = np.linspace(0,2.0*np.pi,100)
plt.plot([y_pos_mean-rad*vec_1_list[kk][1],y_pos_mean+rad*vec_1_list[kk][1]],[x_pos_mean-rad*vec_1_list[kk][0],x_pos_mean+rad*vec_1_list[kk][0]],'-',color=(255/255,204/255,203/255),linewidth=0.3)
plt.plot([y_pos_mean-rad*vec_2_list[kk][1],y_pos_mean+rad*vec_2_list[kk][1]],[x_pos_mean-rad*vec_2_list[kk][0],x_pos_mean+rad*vec_2_list[kk][0]],'-',color=(0.5,0.5,0.5),linewidth=0.3)
#plt.plot([y_pos_mean,y_pos_mean],[x_pos_mean-rad,x_pos_mean+rad],'-',color=(255/255,204/255,203/255),linewidth=0.2)
# add in eigenvector directions
x_vec = []; y_vec = [] ; x_vec_circ = []; y_vec_circ = []
scale = np.asarray([[.9,0],[0,.9]])
for jj in range(0,100):
v = np.asarray([rad*np.cos(th[jj]),rad*
|
np.sin(th[jj])
|
numpy.sin
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.