repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
7
| content
stringlengths 711
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,328,406,218,787,000
9,223,331,109B
| line_mean
float64 5.74
99.7
| line_max
int64 17
1k
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
OMS-NetZero/FAIR | fair/gas_cycle/gir.py | 1 | 2430 | from __future__ import division
import numpy as np
from ..constants.general import ppm_gtc
"""Gas cycle functions from Generalised Impulse Response Model v1.0.0.
Much of this has been adapted from:
Leach et al., 2020, Geoscientific Model Development
https://www.geosci-model-dev-discuss.net/gmd-2019-379/
"""
def calculate_alpha(cumulative_emissions,airborne_emissions,temperature,r0,rC,rT,g0,g1,iirf_max = 97.0):
"""
Calculate CO2 time constant scaling factor.
Inputs:
cumulative_emissions: GtC cumulative emissions since pre-industrial.
airborne_emissions: GtC total emissions remaining in the atmosphere.
temperature: K temperature anomaly since pre-industrial.
r0: pre-industrial 100-year time-integrated airborne fraction.
rC: sensitivity of 100-year time-integrated airborne fraction with
atmospheric carbon stock.
rT: sensitivity of 100-year time-integrated airborne fraction with
temperature anomaly.
g0: parameter for alpha
g1: parameter for alpha
Keywords:
iirf_max: maximum allowable value to 100-year time-integrated airborne
fraction
Outputs:
alpha: scaling factor.
"""
iirf = r0 + rC * (cumulative_emissions-airborne_emissions) + rT * temperature
iirf = (iirf>iirf_max) * iirf_max + iirf * (iirf<iirf_max)
alpha = g0 * np.sinh(iirf / g1)
return alpha
def step_concentration(carbon_boxes0,emissions,alpha,a,tau,Cpi,dt=1):
"""
Calculate concentrations from emissions.
Inputs:
carbon_boxes0: CO2 boxes at the end of the previous timestep.
emissions: GtC CO2 emissions this timestep.
alpha: CO2 time constant scaling factor.
a: CO2 partitioning coefficient
tau: CO2 atmospheric time constants (unscaled).
Cpi: pre-industrial CO2 concentrations (ppm).
Keywords:
dt: timestep in years.
Outputs:
C: CO2 concentration (ppm)
carbon_boxes1: CO2 boxes at the end of this timestep.
airbone_emissions: GtC total emissions remaining in atmosphere.
"""
carbon_boxes1 = emissions / ppm_gtc * a * alpha * (tau/dt) * (
1. - np.exp(-dt/(alpha*tau))) + carbon_boxes0 * np.exp(-dt/(alpha*tau))
C = Cpi + np.sum(carbon_boxes1 + carbon_boxes0) / 2
airborne_emissions = np.sum(carbon_boxes1) * ppm_gtc
return C, carbon_boxes1, airborne_emissions
| apache-2.0 | -8,292,986,400,798,594,000 | 37.571429 | 104 | 0.680247 | false |
mathkann/hyperopt | hyperopt/tests/test_criteria.py | 7 | 1917 | import numpy as np
import hyperopt.criteria as crit
def test_ei():
rng = np.random.RandomState(123)
for mean, var in [(0, 1), (-4, 9)]:
thresholds = np.arange(-5, 5, .25) * np.sqrt(var) + mean
v_n = [crit.EI_gaussian_empirical(mean, var, thresh, rng, 10000)
for thresh in thresholds]
v_a = [crit.EI_gaussian(mean, var, thresh)
for thresh in thresholds]
#import matplotlib.pyplot as plt
#plt.plot(thresholds, v_n)
#plt.plot(thresholds, v_a)
#plt.show()
if not np.allclose(v_n, v_a, atol=0.03, rtol=0.03):
for t, n, a in zip(thresholds, v_n, v_a):
print t, n, a, abs(n - a), abs(n - a) / (abs(n) + abs(a))
assert 0
#mean, var, thresh, v_n, v_a)
def test_log_ei():
for mean, var in [(0, 1), (-4, 9)]:
thresholds = np.arange(-5, 30, .25) * np.sqrt(var) + mean
ei = np.asarray(
[crit.EI_gaussian(mean, var, thresh)
for thresh in thresholds])
nlei = np.asarray(
[crit.logEI_gaussian(mean, var, thresh)
for thresh in thresholds])
naive = np.log(ei)
#import matplotlib.pyplot as plt
#plt.plot(thresholds, ei, label='ei')
#plt.plot(thresholds, nlei, label='nlei')
#plt.plot(thresholds, naive, label='naive')
#plt.legend()
#plt.show()
# -- assert that they match when the threshold isn't too high
assert np.allclose(nlei, naive)
def test_log_ei_range():
assert np.all(
np.isfinite(
[crit.logEI_gaussian(0, 1, thresh)
for thresh in [-500, 0, 50, 100, 500, 5000]]))
def test_ucb():
assert np.allclose(crit.UCB(0, 1, 1), 1)
assert np.allclose(crit.UCB(0, 1, 2), 2)
assert np.allclose(crit.UCB(0, 4, 1), 2)
assert np.allclose(crit.UCB(1, 4, 1), 3)
# -- flake8
| bsd-3-clause | -1,589,826,703,282,367,200 | 29.919355 | 73 | 0.542514 | false |
cython-testbed/pandas | pandas/tests/scalar/timestamp/test_timezones.py | 1 | 12514 | # -*- coding: utf-8 -*-
"""
Tests for Timestamp timezone-related methods
"""
from datetime import datetime, date, timedelta
from distutils.version import LooseVersion
import pytest
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
import dateutil
from dateutil.tz import gettz, tzoffset
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import Timestamp, NaT
from pandas.errors import OutOfBoundsDatetime
class TestTimestampTZOperations(object):
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
pac = Timestamp.min.tz_localize('US/Pacific')
assert pac.value > Timestamp.min.value
pac.tz_convert('Asia/Tokyo') # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime):
Timestamp.min.tz_localize('Asia/Tokyo')
# tz_localize that pushes away from the boundary is OK
tokyo = Timestamp.max.tz_localize('Asia/Tokyo')
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert('US/Pacific') # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime):
Timestamp.max.tz_localize('US/Pacific')
def test_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
ts = Timestamp('2015-11-01 01:00:03')
expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
with pytest.raises(pytz.AmbiguousTimeError):
ts.tz_localize('US/Central')
result = ts.tz_localize('US/Central', ambiguous=True)
assert result == expected0
result = ts.tz_localize('US/Central', ambiguous=False)
assert result == expected1
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600
with pytest.raises(ValueError):
ts.tz_localize('US/Eastern', ambiguous='infer')
# GH#8025
with tm.assert_raises_regex(TypeError,
'Cannot localize tz-aware Timestamp, '
'use tz_convert for conversions'):
Timestamp('2011-01-01', tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assert_raises_regex(TypeError,
'Cannot convert tz-naive Timestamp, '
'use tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
@pytest.mark.parametrize('stamp, tz', [
('2015-03-08 02:00', 'US/Eastern'),
('2015-03-08 02:30', 'US/Pacific'),
('2015-03-29 02:00', 'Europe/Paris'),
('2015-03-29 02:30', 'Europe/Belgrade')])
def test_tz_localize_nonexistent(self, stamp, tz):
# GH#13057
ts = Timestamp(stamp)
with pytest.raises(NonExistentTimeError):
ts.tz_localize(tz)
with pytest.raises(NonExistentTimeError):
ts.tz_localize(tz, errors='raise')
assert ts.tz_localize(tz, errors='coerce') is NaT
def test_tz_localize_errors_ambiguous(self):
# GH#13057
ts = Timestamp('2015-11-1 01:00')
with pytest.raises(AmbiguousTimeError):
ts.tz_localize('US/Pacific', errors='coerce')
@pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00'])
def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp)
localized = ts.tz_localize(tz)
assert localized == Timestamp(stamp, tz=tz)
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset == ts
assert reset.tzinfo is None
def test_tz_localize_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
naive = Timestamp('2013-10-27 01:00:00')
pytz_zone = 'Europe/London'
dateutil_zone = 'dateutil/Europe/London'
result_pytz = naive.tz_localize(pytz_zone, ambiguous=0)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# dateutil 2.6 buggy w.r.t. ambiguous=0
# see gh-14621
# see https://github.com/dateutil/dateutil/issues/321
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
assert str(result_pytz) == str(result_dateutil)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert result_pytz.to_pydatetime().tzname() == 'GMT'
assert result_dateutil.to_pydatetime().tzname() == 'BST'
assert str(result_pytz) != str(result_dateutil)
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=1)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'):
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_tz_localize(self, tz):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(tz)
expected = Timestamp('3/11/2012 04:00', tz=tz)
assert result.hour == expected.hour
assert result == expected
# ------------------------------------------------------------------
# Timestamp.tz_convert
@pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00'])
def test_tz_convert_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
assert reset == Timestamp(stamp)
assert reset.tzinfo is None
assert reset == converted.tz_convert('UTC').tz_localize(None)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_astimezone(self, tzstr):
# astimezone is an alias for tz_convert, so keep it with
# the tz_convert tests
utcdate = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utcdate.tz_convert(tzstr)
result = utcdate.astimezone(tzstr)
assert expected == result
assert isinstance(result, Timestamp)
@td.skip_if_windows
def test_tz_convert_utc_with_system_utc(self):
from pandas._libs.tslibs.timezones import maybe_get_tz
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# ------------------------------------------------------------------
# Timestamp.__init__ with tz str or tzinfo
def test_timestamp_constructor_tz_utc(self):
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
assert utc_stamp.tzinfo is pytz.utc
assert utc_stamp.hour == 5
utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
assert utc_stamp.hour == 5
def test_timestamp_to_datetime_tzoffset(self):
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
assert expected == result
def test_timestamp_constructor_near_dst_boundary(self):
# GH#11481 & GH#15777
# Naive string timestamps were being localized incorrectly
# with tz_convert_single instead of tz_localize_to_utc
for tz in ['Europe/Brussels', 'Europe/Prague']:
result = Timestamp('2015-10-25 01:00', tz=tz)
expected = Timestamp('2015-10-25 01:00').tz_localize(tz)
assert result == expected
with pytest.raises(pytz.AmbiguousTimeError):
Timestamp('2015-10-25 02:00', tz=tz)
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
# GH#11708
naive = Timestamp('2015-11-18 10:00:00')
result = naive.tz_localize('UTC').tz_convert('Asia/Kolkata')
expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')
assert result == expected
# GH#15823
result = Timestamp('2017-03-26 00:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris')
naive = Timestamp(result.value)
expected = naive.tz_localize('UTC').tz_convert('Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 03:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris')
assert result == expected
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_constructed_by_date_and_tz(self, tz):
# GH#2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=tz)
expected = Timestamp('3/11/2012', tz=tz)
assert result.hour == expected.hour
assert result == expected
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz):
# GH#1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=tz)
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=tz)
assert result == expected
def test_timestamp_timetz_equivalent_with_datetime_tz(self,
tz_naive_fixture):
# GH21358
if tz_naive_fixture is not None:
tz = dateutil.tz.gettz(tz_naive_fixture)
else:
tz = None
stamp = Timestamp('2018-06-04 10:20:30', tz=tz)
_datetime = datetime(2018, 6, 4, hour=10,
minute=20, second=30, tzinfo=tz)
result = stamp.timetz()
expected = _datetime.timetz()
assert result == expected
| bsd-3-clause | -6,155,344,485,097,374,000 | 39.498382 | 79 | 0.590698 | false |
zhuhuifeng/PyML | mla/neuralnet/layers/basic.py | 1 | 4512 | import autograd.numpy as np
from autograd import elementwise_grad
from mla.neuralnet.activations import get_activation
from mla.neuralnet.parameters import Parameters
np.random.seed(9999)
class Layer(object):
def setup(self, X_shape):
"""Allocates initial weights."""
pass
def forward_pass(self, x):
raise NotImplementedError()
def backward_pass(self, delta):
raise NotImplementedError()
def shape(self, x_shape):
"""Returns shape of the current layer."""
raise NotImplementedError()
class ParamMixin(object):
@property
def parameters(self):
return self._params
class PhaseMixin(object):
_train = False
@property
def is_training(self):
return self._train
@is_training.setter
def is_training(self, is_train=True):
self._train = is_train
@property
def is_testing(self):
return not self._train
@is_testing.setter
def is_testing(self, is_test=True):
self._train = not is_test
class Dense(Layer, ParamMixin):
def __init__(self, output_dim, parameters=None, ):
"""A fully connected layer.
Parameters
----------
output_dim : int
"""
self._params = parameters
self.output_dim = output_dim
self.last_input = None
if parameters is None:
self._params = Parameters()
def setup(self, x_shape):
self._params.setup_weights((x_shape[1], self.output_dim))
def forward_pass(self, X):
self.last_input = X
return self.weight(X)
def weight(self, X):
W = np.dot(X, self._params['W'])
return W + self._params['b']
def backward_pass(self, delta):
dW = np.dot(self.last_input.T, delta)
db = np.sum(delta, axis=0)
# Update gradient values
self._params.update_grad('W', dW)
self._params.update_grad('b', db)
return np.dot(delta, self._params['W'].T)
def shape(self, x_shape):
return x_shape[0], self.output_dim
class Activation(Layer):
def __init__(self, name):
self.last_input = None
self.activation = get_activation(name)
# Derivative of activation function
self.activation_d = elementwise_grad(self.activation)
def forward_pass(self, X):
self.last_input = X
return self.activation(X)
def backward_pass(self, delta):
return self.activation_d(self.last_input) * delta
def shape(self, x_shape):
return x_shape
class Dropout(Layer, PhaseMixin):
"""Randomly set a fraction of `p` inputs to 0 at each training update."""
def __init__(self, p=0.1):
self.p = p
self._mask = None
def forward_pass(self, X):
assert self.p > 0
if self.is_training:
self._mask = np.random.uniform(size=X.shape) > self.p
y = X * self._mask
else:
y = X * (1.0 - self.p)
return y
def backward_pass(self, delta):
return delta * self._mask
def shape(self, x_shape):
return x_shape
class TimeStepSlicer(Layer):
"""Take a specific time step from 3D tensor."""
def __init__(self, step=-1):
self.step = step
def forward_pass(self, x):
return x[:, self.step, :]
def backward_pass(self, delta):
return np.repeat(delta[:, np.newaxis, :], 2, 1)
def shape(self, x_shape):
return x_shape[0], x_shape[2]
class TimeDistributedDense(Layer):
"""Apply regular Dense layer to every timestep."""
def __init__(self, output_dim):
self.output_dim = output_dim
self.n_timesteps = None
self.dense = None
self.input_dim = None
def setup(self, X_shape):
self.dense = Dense(self.output_dim)
self.dense.setup((X_shape[0], X_shape[2]))
self.input_dim = X_shape[2]
def forward_pass(self, X):
n_timesteps = X.shape[1]
X = X.reshape(-1, X.shape[-1])
y = self.dense.forward_pass(X)
y = y.reshape((-1, n_timesteps, self.output_dim))
return y
def backward_pass(self, delta):
n_timesteps = delta.shape[1]
X = delta.reshape(-1, delta.shape[-1])
y = self.dense.backward_pass(X)
y = y.reshape((-1, n_timesteps, self.input_dim))
return y
@property
def parameters(self):
return self.dense._params
def shape(self, x_shape):
return x_shape[0], x_shape[1], self.output_dim
| apache-2.0 | 4,052,290,290,537,210,400 | 23.791209 | 77 | 0.586436 | false |
rykov8/ssd_keras | ssd_layers.py | 3 | 6719 | """Some special pupropse layers for SSD."""
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
import numpy as np
import tensorflow as tf
class Normalize(Layer):
"""Normalization layer as described in ParseNet paper.
# Arguments
scale: Default feature scale.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
Same as input
# References
http://cs.unc.edu/~wliu/papers/parsenet.pdf
#TODO
Add possibility to have one scale for all features.
"""
def __init__(self, scale, **kwargs):
if K.image_dim_ordering() == 'tf':
self.axis = 3
else:
self.axis = 1
self.scale = scale
super(Normalize, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
init_gamma = self.scale * np.ones(shape)
self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
self.trainable_weights = [self.gamma]
def call(self, x, mask=None):
output = K.l2_normalize(x, self.axis)
output *= self.gamma
return output
class PriorBox(Layer):
"""Generate the prior boxes of designated sizes and aspect ratios.
# Arguments
img_size: Size of the input image as tuple (w, h).
min_size: Minimum box size in pixels.
max_size: Maximum box size in pixels.
aspect_ratios: List of aspect ratios of boxes.
flip: Whether to consider reverse aspect ratios.
variances: List of variances for x, y, w, h.
clip: Whether to clip the prior's coordinates
such that they are within [0, 1].
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
3D tensor with shape:
(samples, num_boxes, 8)
# References
https://arxiv.org/abs/1512.02325
#TODO
Add possibility not to have variances.
Add Theano support
"""
def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,
flip=True, variances=[0.1], clip=True, **kwargs):
if K.image_dim_ordering() == 'tf':
self.waxis = 2
self.haxis = 1
else:
self.waxis = 3
self.haxis = 2
self.img_size = img_size
if min_size <= 0:
raise Exception('min_size must be positive.')
self.min_size = min_size
self.max_size = max_size
self.aspect_ratios = [1.0]
if max_size:
if max_size < min_size:
raise Exception('max_size must be greater than min_size.')
self.aspect_ratios.append(1.0)
if aspect_ratios:
for ar in aspect_ratios:
if ar in self.aspect_ratios:
continue
self.aspect_ratios.append(ar)
if flip:
self.aspect_ratios.append(1.0 / ar)
self.variances = np.array(variances)
self.clip = True
super(PriorBox, self).__init__(**kwargs)
def get_output_shape_for(self, input_shape):
num_priors_ = len(self.aspect_ratios)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
num_boxes = num_priors_ * layer_width * layer_height
return (input_shape[0], num_boxes, 8)
def call(self, x, mask=None):
if hasattr(x, '_keras_shape'):
input_shape = x._keras_shape
elif hasattr(K, 'int_shape'):
input_shape = K.int_shape(x)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
img_width = self.img_size[0]
img_height = self.img_size[1]
# define prior boxes shapes
box_widths = []
box_heights = []
for ar in self.aspect_ratios:
if ar == 1 and len(box_widths) == 0:
box_widths.append(self.min_size)
box_heights.append(self.min_size)
elif ar == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(self.min_size * self.max_size))
box_heights.append(np.sqrt(self.min_size * self.max_size))
elif ar != 1:
box_widths.append(self.min_size * np.sqrt(ar))
box_heights.append(self.min_size / np.sqrt(ar))
box_widths = 0.5 * np.array(box_widths)
box_heights = 0.5 * np.array(box_heights)
# define centers of prior boxes
step_x = img_width / layer_width
step_y = img_height / layer_height
linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,
layer_width)
liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,
layer_height)
centers_x, centers_y = np.meshgrid(linx, liny)
centers_x = centers_x.reshape(-1, 1)
centers_y = centers_y.reshape(-1, 1)
# define xmin, ymin, xmax, ymax of prior boxes
num_priors_ = len(self.aspect_ratios)
prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))
prior_boxes[:, ::4] -= box_widths
prior_boxes[:, 1::4] -= box_heights
prior_boxes[:, 2::4] += box_widths
prior_boxes[:, 3::4] += box_heights
prior_boxes[:, ::2] /= img_width
prior_boxes[:, 1::2] /= img_height
prior_boxes = prior_boxes.reshape(-1, 4)
if self.clip:
prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)
# define variances
num_boxes = len(prior_boxes)
if len(self.variances) == 1:
variances = np.ones((num_boxes, 4)) * self.variances[0]
elif len(self.variances) == 4:
variances = np.tile(self.variances, (num_boxes, 1))
else:
raise Exception('Must provide one or four variances.')
prior_boxes = np.concatenate((prior_boxes, variances), axis=1)
prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)
if K.backend() == 'tensorflow':
pattern = [tf.shape(x)[0], 1, 1]
prior_boxes_tensor = tf.tile(prior_boxes_tensor, pattern)
elif K.backend() == 'theano':
#TODO
pass
return prior_boxes_tensor
| mit | 9,192,958,743,215,568,000 | 36.121547 | 78 | 0.564072 | false |
OmnesRes/pan_cancer | paper/cox_regression/KIRC/patient_info.py | 1 | 6241 | ## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[8]],int(i[-16])]
if i[24]=='Alive':
clinical4.append([i[0],int(i[25]),'Alive'])
elif i[24]=='Dead':
clinical4.append([i[0],int(i[26]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
| mit | 3,009,862,005,212,880,400 | 28.300469 | 132 | 0.645089 | false |
l-althueser/NiMoNa_DCM16 | DCM/programs/RK4.py | 1 | 1088 | # -*- coding: utf-8 -*-
"""
@author: Tobias
Timo
Beschreibung:
Runge-Kutta-Verfahren vierter Ordnung zur Lösung von gewöhnlichen DGL 1. Ordnung.
Ausgabe der Zeitentwicklung in Matrixform.
Wichtig:
Die Dimension des Eingabeparameters x_0 muss mit dem verwendetem Modell überinstimmen.
Pythonversion:
3.5.1
"""
import numpy as np
def RK4(f,theta,u,x_0,t0,T,dt):
#Input: Funktion, Parameterset, Stimulus, Anfangswert(array), Startpunkt, Endpunkt, Zeitschrittweite
t = np.arange(t0,T,dt) # Zeitarray
# x = np.zeros((int(len(x_0)), int((T - t0) / dt + 1))) #Größe der Endmatrix festlegen
x = np.zeros((int(len(x_0)), len(t)))
x[:,0] = x_0 # Startbedingungen in erster Spalte
for i in range(0,int(np.size(x,1))-1):
k_1 = f(x,u,theta,i)
k_2 = f(x + 0.5*dt*k_1,u,theta,i)
k_3 = f(x + 0.5*dt*k_2,u,theta,i)
k_4 = f(x + dt*k_3,u,theta,i)
x[:,i+1] = x[:,i] + (dt/6.)*(k_1.T + 2*k_2.T + 2*k_3.T + k_4.T)
return x | bsd-2-clause | -8,869,651,187,784,231,000 | 29.111111 | 105 | 0.553093 | false |
dingliu0305/Tree-Tensor-Networks-in-Machine-Learning | code/tsne_mnist.py | 1 | 3214 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pickle
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.manifold import TSNE
data_folder = "./data/mnist/"
n_epochs = 3
bond_data = 2
bond_inner = 3
bond_label = 2
n_class = 2
n_train_each = 900
n_train = n_train_each * n_class
n_test_each = 1000
n_test = n_test_each * 10
layer_units = [16, 8, 4, 2, 1]
train_tn = False
LAB1 = [0] * n_train_each
LAB2 = [1] * n_train_each
LAB = np.concatenate((LAB1, LAB2), axis=0)
# -------------------------------------------------------------------------
# load training data
print("loading data")
input = open(data_folder + 'tsne.pkl', 'rb')
ttn = pickle.load(input)
#%%
LAB1 = [0] * n_train_each
LAB2 = [1] * n_train_each
LAB = np.concatenate((LAB1, LAB2), axis=0)
def squash_layer(contraction, which_layer):
layer = []
for row in contraction[which_layer]:
layer += [element.data for element in row]
return np.vstack(layer).T
def tsne(contraction, which_layer, per, lear, tags=LAB):
selection = np.random.choice(n_train, size=n_train, replace=False)
mf = TSNE(n_components=2, perplexity=per,
learning_rate=lear, init='pca', n_iter=1200)
M = squash_layer(contraction, which_layer)
x = M[selection]
x_embed = mf.fit_transform(x)
TAGS = []
TAGS = tags[selection]
return x_embed, TAGS
def sort(contraction, which_layer, per, lear, tags=LAB):
x_embed, TAGS = tsne(contraction, which_layer, per, lear, tags=LAB)
CATS = []
DOGS = []
for i in range(len(TAGS)):
if TAGS[i] == 0:
CATS.append(x_embed[i])
if TAGS[i] == 1:
DOGS.append(x_embed[i])
result = np.concatenate((CATS, DOGS), axis=0)
return result
#%%
def plot(contraction, which_layer, per, lear, tags=LAB):
result = sort(contraction, which_layer, per, lear, tags=LAB)
fig = plt.figure()
ax1 = fig.add_subplot(111)
x = result[:, 0]
y = result[:, 1]
ax1.scatter(x[0:n_train_each], y[0:n_train_each], s=11,
c='b', marker="o", label='Planes', alpha=0.5)
ax1.scatter(x[n_train_each + 1:n_train], y[n_train_each + 1:n_train],
s=11, c='r', marker="o", label='Horses', alpha=0.5)
plt.legend(loc='upper right')
plt.axis('off')
# plt.show()
pp = PdfPages('%s_P%s_L%s.pdf' % (which_layer, per, lear))
pp.savefig(fig)
pp.close()
return fig
#%%
def sweep(contraction, which_layer, per, tags=LAB):
L = [200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750]
f = []
for i in range(0, len(L)):
f = plot(contraction, i, per, L[i], tags=LAB)
return f
#%%
def sweep2(contraction, which_layer, tags=LAB):
G = [30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140]
q = []
for i in range(0, len(G)):
q = sweep(contraction, which_layer, G[i], tags=LAB)
return q
#%%
def sweep3(contraction, per, lear, tags=LAB):
m = []
for i in range(1, 5):
m = sweep2(contraction, i, tags=LAB)
return m
for i in range(5):
plot(ttn.contracted, i, 60, 400)
plt.show()
| mit | 9,021,563,037,263,550,000 | 22.289855 | 75 | 0.584941 | false |
ominux/scikit-learn | examples/manifold/plot_compare_methods.py | 4 | 2211 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
"""
# Author: Jake Vanderplas -- <[email protected]>
print __doc__
from time import time
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points)
n_neighbors = 10
out_dim = 2
fig = pl.figure(figsize=(12, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(231, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(231, projection='3d')
pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, out_dim,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print "%s: %.2g sec" % (methods[i], t1 - t0)
ax = fig.add_subplot(232 + i)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, out_dim).fit_transform(X)
t1 = time()
print "Isomap: %.2g sec" % (t1 - t0)
ax = fig.add_subplot(236)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
pl.show()
| bsd-3-clause | -8,009,370,989,050,230,000 | 29.708333 | 71 | 0.620081 | false |
StongeEtienne/trimeshpy | trimeshpy/math/mesh_map.py | 1 | 6095 | # Etienne St-Onge
from __future__ import division
import numpy as np
from scipy.sparse import csc_matrix
from trimeshpy.math.util import square_length
from trimeshpy.math.mesh_global import G_DTYPE
# Mesh structure functions
# Map ( Adjacency / Connectivity ) Functions
#
# number of vertices = n
# number of triangles = m
# number of edges = l = m*3 (directed edge)
#
# vertices array : n x 3
# v[i] = [ x, y, z ]
#
# triangles array : m x 3
# t[a] = [ v[i], v[j], v[k] ]
# right handed triangles1111111111111
#
#
# Example :
#
# vj_ _ _ _ vo
# /\ /\
# / \ tf / \
# / ta \ / te \
# vk/_ _ _ vi _ _ _\ vn
# \ /\ /
# \ tb / \ td /
# \ / tc \ /
# \/_ _ _ \/
# vl vm
#
# Vertices = [v[i] = [x_i, y_i, z_i],
# v[j] = [x_j, y_j, z_j],
# v[k] = [x_k, y_k, z_k],
# v[l] = [x_l, y_l, z_l],
# v[m] = [x_m, y_m, z_m],
# v[n] = [x_n, y_n, z_n],
# v[o] = [x_o, y_o, z_o]]
#
# Triangles = [t[a] = [i, j, k],
# t[b] = [i, k, l],
# t[c] = [i, l, m],
# t[d] = [i, m, n],
# t[e] = [i, n, o],
# t[f] = [i, o, j]]
#
# triangle_vertex_map : m x n -> boolean, loss of orientation
# t_v[] v[i] v[j] v[k] v[l] v[m] v[n] v[o]
# t[a] 1 1 1
# t[b] 1 1 1
# t[c] 1 1 1
# t[d] 1 1 1
# t[e] 1 1 1
# t[f] 1 1 1
#
# Edges Maps
# edge_adjacency : n x n -> boolean, not symmetric if mesh not closed
# e[,] v[i] v[j] v[k] v[l] v[m] v[n] v[o]
# v[i] 1 1 1 1 1 1
# v[j] 1 1
# v[k] 1 1
# v[l] 1 1
# v[m] 1 1
# v[n] 1 1
# v[o] 1 1
#
# edge_triangle_map : n x n -> triangle_index
# e_t[,] v[i] v[j] v[k] v[l] v[m] v[n] v[o]
# v[i] a b c d e f
# v[j] f a
# v[k] a b
# v[l] b c
# v[m] c d
# v[n] d e
# v[o] e f
#
# edge_opposing_vertex : n x n -> vertex_index
# e_ov[] v[i] v[j] v[k] v[l] v[m] v[n] v[o]
# v[i] k l m n o j
# v[j] o i
# v[k] j i
# v[l] k i
# v[m] l i
# v[n] m i
# v[o] n i
#
# edge_adjacency : n x n -> boolean (sparse connectivity matrix)
# e[i,j] = v[i] -> v[j] = { 1, if connected }
def edge_adjacency(triangles, vertices):
vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])
vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])
values = np.ones_like(vts_i, dtype=np.bool)
vv_map = csc_matrix((values, (vts_i, vts_j)),
shape=(vertices.shape[0], vertices.shape[0]))
return vv_map
# edge_sqr_length : n x n -> float (sparse connectivity matrix)
# e[i,j] = v[i] -> v[j] = { || v[i] - v[j] ||^2, if connected }
def edge_sqr_length(triangles, vertices):
vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])
vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])
values = square_length(vertices[vts_i] - vertices[vts_j])
vv_map = csc_matrix((values, (vts_i, vts_j)), shape=(
vertices.shape[0], vertices.shape[0]), dtype=G_DTYPE)
return vv_map
# edge_length : n x n -> float (sparse connectivity matrix)
# e[i,j] = v[i] -> v[j] = { || v[i] - v[j] ||, if connected }
def edge_length(triangles, vertices):
vv_map = edge_sqr_length(triangles, vertices)
vv_map.data = np.sqrt(vv_map.data)
return vv_map
# edge_sqr_length : n x n -> float (sparse connectivity matrix)
# e[i,j] = { edge_length, if l2_weighted }
def edge_map(triangles, vertices, l2_weighted=False):
if l2_weighted:
return edge_length(triangles, vertices)
else:
return edge_adjacency(triangles, vertices)
# edge_triangle_map : n x n -> triangle_index (sparse connectivity matrix)
# e_t[i,j] = e[i,j] -> t[a] = { 1, if triangle[a] is compose of edge[i,j] }
def edge_triangle_map(triangles, vertices):
vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])
vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])
triangles_index = np.tile(np.arange(len(triangles)), 3)
vv_t_map = csc_matrix((triangles_index, (vts_i, vts_j)),
shape=(vertices.shape[0], vertices.shape[0]))
return vv_t_map
# edge_opposing_vertex : n x n -> vertex_index (int) (sparse co matrix)
# e[i,j] = v[i],v[j] = { v[k], if v[i],v[j],v[k] triangle exist }
def edge_opposing_vertex(triangles, vertices):
vts_i = np.hstack([triangles[:, 0], triangles[:, 1], triangles[:, 2]])
vts_j = np.hstack([triangles[:, 1], triangles[:, 2], triangles[:, 0]])
vts_k = np.hstack([triangles[:, 2], triangles[:, 0], triangles[:, 1]])
vv_v_map = csc_matrix((vts_k, (vts_i, vts_j)),
shape=(vertices.shape[0], vertices.shape[0]))
return vv_v_map
# triangle_vertex_map : m x n -> bool (sparse connectivity matrix)
# t_v[i,a] = t[a] <-> v[i] = { 1, if triangle[a] is compose of vertex[i] }
def triangle_vertex_map(triangles, vertices):
triangles_index = np.repeat(np.arange(len(triangles)), 3)
vertices_index = np.hstack(triangles)
values = np.ones_like(triangles_index, dtype=np.bool)
tv_map = csc_matrix((values, (triangles_index, vertices_index)),
shape=(len(triangles), vertices.shape[0]))
return tv_map
def vertices_degree(triangles, vertices):
tv_matrix = triangle_vertex_map(triangles, vertices)
return np.squeeze(np.asarray(tv_matrix.sum(0)))
| mit | -4,785,637,220,778,597,000 | 34.028736 | 78 | 0.474815 | false |
hosseinsadeghi/ultracold-ions | test/test_CoulombAcc.py | 2 | 4459 | import uci.CoulombAcc as uci
import numpy as np
import pyopencl as cl
import pyopencl.array as cl_array
testCtx = cl.create_some_context(interactive = True)
testQueue = cl.CommandQueue(testCtx)
def test_Constructor():
coulomb_acc = uci.CoulombAcc()
def test_ForceOnSingleParticleIsZero():
coulomb_acc = uci.CoulombAcc(testCtx, testQueue)
one = np.ones(1)
ax = np.zeros(1)
ay = np.zeros(1)
az = np.zeros(1)
xd = cl_array.to_device(testQueue, one)
yd = cl_array.to_device(testQueue, one)
zd = cl_array.to_device(testQueue, one)
vxd = cl_array.to_device(testQueue, one)
vyd = cl_array.to_device(testQueue, one)
vzd = cl_array.to_device(testQueue, one)
qd = cl_array.to_device(testQueue, one)
md = cl_array.to_device(testQueue, one)
axd = cl_array.to_device(testQueue, ax)
ayd = cl_array.to_device(testQueue, ay)
azd = cl_array.to_device(testQueue, az)
coulomb_acc.computeAcc(xd, yd, zd, vxd, vyd, vzd, qd, md,
axd, ayd, azd, 0)
axd.get(testQueue, ax)
ayd.get(testQueue, ay)
azd.get(testQueue, az)
assert ax[0] == 0
assert ay[0] == 0
assert az[0] == 0
def test_TwoParticlesWithEqualChargeRepelEachOther():
coulomb_acc = uci.CoulombAcc(testCtx, testQueue)
one = np.ones(2)
ax = np.zeros(2)
ay = np.zeros(2)
az = np.zeros(2)
x = np.array([0.1, 1])
y = np.array([0.2, 2.3])
z = np.array([0.3, 2.7])
xd = cl_array.to_device(testQueue, x)
yd = cl_array.to_device(testQueue, y)
zd = cl_array.to_device(testQueue, z)
vxd = cl_array.to_device(testQueue, one)
vyd = cl_array.to_device(testQueue, one)
vzd = cl_array.to_device(testQueue, one)
qd = cl_array.to_device(testQueue, one)
md = cl_array.to_device(testQueue, one)
axd = cl_array.to_device(testQueue, ax)
ayd = cl_array.to_device(testQueue, ay)
azd = cl_array.to_device(testQueue, az)
coulomb_acc.computeAcc(xd, yd, zd, vxd, vyd, vzd, qd, md,
axd, ayd, azd, 0)
axd.get(testQueue, ax)
ayd.get(testQueue, ay)
azd.get(testQueue, az)
assert ax[0] != 0
assert np.abs(ax[0] + ax[1]) < 1.0e-6
assert np.abs(ay[0] + ay[1]) < 1.0e-6
assert np.abs(az[0] + az[1]) < 1.0e-6
def reference_solution(x, y, z, vx, vy, vz, q, m, ax, ay, az):
epsilon0 = 8.854187817e-12
for i in range(x.size):
for j in range(x.size):
prefactor = 1.0 / (4.0 * np.pi * epsilon0) * q[i] * q[j]
r = np.sqrt(
(x[i] - x[j]) * (x[i] - x[j]) +
(y[i] - y[j]) * (y[i] - y[j]) +
(z[i] - z[j]) * (z[i] - z[j]) +
1.0e-20
)
rCubed = np.power(r, 3.0)
ax[i] += prefactor * (x[i] - x[j]) / rCubed / m[i]
ay[i] += prefactor * (y[i] - y[j]) / rCubed / m[i]
az[i] += prefactor * (z[i] - z[j]) / rCubed / m[i]
def compareWithReferenceSol(n):
coulomb_acc = uci.CoulombAcc(testCtx, testQueue)
x = np.random.random_sample(n) - 0.5
y = np.random.random_sample(n) - 0.5
z = np.random.random_sample(n) - 0.5
vx = np.random.random_sample(n) - 0.5
vy = np.random.random_sample(n) - 0.5
vz = np.random.random_sample(n) - 0.5
q = np.random.random_sample(n) - 0.5
m = np.random.random_sample(n) - 0.5
ax = np.zeros(n)
ay = np.zeros(n)
az = np.zeros(n)
xd = cl_array.to_device(testQueue, x)
yd = cl_array.to_device(testQueue, y)
zd = cl_array.to_device(testQueue, z)
vxd = cl_array.to_device(testQueue, vx)
vyd = cl_array.to_device(testQueue, vy)
vzd = cl_array.to_device(testQueue, vz)
qd = cl_array.to_device(testQueue, q)
md = cl_array.to_device(testQueue, m)
axd = cl_array.to_device(testQueue, ax)
ayd = cl_array.to_device(testQueue, ay)
azd = cl_array.to_device(testQueue, az)
coulomb_acc.computeAcc(xd, yd, zd, vxd, vyd, vzd, qd, md,
axd, ayd, azd, 0)
axd.get(testQueue, ax)
ayd.get(testQueue, ay)
azd.get(testQueue, az)
ax_ref = np.zeros(n)
ay_ref = np.zeros(n)
az_ref = np.zeros(n)
reference_solution(x, y, z, vx, vy, vz, q, m, ax_ref, ay_ref, az_ref)
for i in range(n):
assert np.abs(ax[i] - ax_ref[i]) / (
np.abs(ax[i]) + np.abs(ax_ref[i])) < 1.0e-6
def test_SmallSystem():
compareWithReferenceSol(10)
def test_PowerOfTwo():
compareWithReferenceSol(128)
| mit | -2,679,120,046,525,874,700 | 30.401408 | 73 | 0.578381 | false |
hughperkins/gpu-experiments | gpuexperiments/occupancy_dyn_graphs.py | 1 | 2094 | """
Try using dynamic shared memory, see if gets optimized away, or affects occupancy
"""
from __future__ import print_function, division
import argparse
import string
import numpy as np
import os
import matplotlib.pyplot as plt
plt.rcdefaults()
import matplotlib.pyplot as plt
from os.path import join
parser = argparse.ArgumentParser()
parser.add_argument('--devicename')
args = parser.parse_args()
times = []
assert args.devicename is not None
deviceNameSimple = args.devicename
f = open('results/occupancy_dyn_%s.tsv' % args.devicename, 'r')
f.readline()
for line in f:
split_line = line.split('\t')
times.append({'name': split_line[0], 'time': float(split_line[1]), 'flops': float(split_line[2])})
f.close()
X32_list = []
Y32_list = []
X64_list = []
Y64_list = []
for timeinfo in times:
name = timeinfo['name']
if not name.startswith('k1_g1024_b'):
continue
block = int(name.split('_')[2].replace('b', ''))
x = int(name.split('_')[-1].replace('s', ''))
y = timeinfo['flops']
if block == 32:
X32_list.append(x)
Y32_list.append(y)
elif block == 64:
X64_list.append(x)
Y64_list.append(y)
X32 = np.array(X32_list)
X64 = np.array(X64_list)
Y32 = np.array(Y32_list)
Y64 = np.array(Y64_list)
plt.plot(X32, Y32, label='blocksize 32')
plt.plot(X64, Y64, label='blocksize 64')
plt.axis([0, max(X32), 0, max(Y64)])
plt.title(deviceNameSimple)
plt.xlabel('Shared memory per block (KiB)')
plt.ylabel('GFLOPS')
legend = plt.legend(loc='upper right') # fontsize='x-large')
plt.savefig('/tmp/occupancy_by_shared_%s.png' % deviceNameSimple, dpi=150)
plt.close()
X_list = []
Y_list = []
for timeinfo in times:
name = timeinfo['name']
if not name.startswith('kernel_bsm'):
continue
X_list.append(int(name.split('bsm')[1].split(' ')[0]))
Y_list.append(timeinfo['flops'])
X = np.array(X_list)
Y = np.array(Y_list)
plt.plot(X, Y)
plt.axis([0, max(X), 0, max(Y)])
plt.title(deviceNameSimple)
plt.xlabel('blocks per SM')
plt.ylabel('GFLOPS')
plt.savefig('/tmp/occupancy_%s.png' % deviceNameSimple, dpi=150)
| bsd-2-clause | -6,801,072,646,690,389,000 | 26.552632 | 102 | 0.660936 | false |
ljschumacher/tierpsy-tracker | tierpsy/analysis/stage_aligment/findStageMovement.py | 2 | 58507 | import numpy as np
import warnings
import tables
from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name
from tierpsy.helper.params import read_fps
from tierpsy.analysis.stage_aligment.get_mask_diff_var import get_mask_diff_var
def _matlab_std(x):
if x.size <= 1:
#in array of size 1 MATLAB returns 0 in the std while numpy nan
return 0.
else:
#ddof=1 to have the same behaviour as MATLAB
return np.nanstd(x, ddof=1)
def getFrameDiffVar(masked_file, progress_refresh_rate_s=100):
base_name = get_base_name(masked_file)
progress_prefix = '{} Calculating variance of the difference between frames.'.format(base_name)
with tables.File(masked_file, 'r') as fid:
masks = fid.get_node('/mask')
tot, w, h = masks.shape
progress_time = TimeCounter(progress_prefix, tot)
fps = read_fps(masked_file, dflt=25)
progress_refresh_rate = int(round(fps*progress_refresh_rate_s))
img_var_diff = np.zeros(tot-1)
frame_prev = masks[0]
for ii in range(1, tot):
frame_current = masks[ii]
img_var_diff[ii-1] = get_mask_diff_var(frame_current, frame_prev)
frame_prev = frame_current;
if ii % progress_refresh_rate == 0:
print_flush(progress_time.get_str(ii))
if tot>1:
print_flush(progress_time.get_str(ii))
return img_var_diff
def graythreshmat(I_ori):
#reimplementation of the matlab graythresh for consistency
#it convert the image into a uint8 if it is a double it assumes
#it is between 0 and 1,
I = I_ori.copy()
#make nan zeros (that's what matlab does)
I[np.isnan(I)]=0
assert np.all(I>=0) and np.all(I<=1)
I = np.round(I*255).astype(np.uint8)
if np.all(I == I[0]):
#if all values are equal return 0
return 0
num_bins = 256;
counts = np.bincount(I, minlength=num_bins);
p = counts/np.sum(counts)
omega = np.cumsum(p)
mu = np.cumsum(p *(np.arange(1, num_bins+1)));
mu_t = mu[-1]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sigma_b_squared = ((mu_t * omega - mu)**2) / (omega * (1 - omega));
if not np.all(np.isnan(sigma_b_squared)):
maxval = np.nanmax(sigma_b_squared);
idx = np.mean(np.where(sigma_b_squared == maxval)[0]);
level = idx / (num_bins - 1);
else:
level = 0
return level
def _get_small_otsu(frame_diffs, th):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
small_diffs = frame_diffs[frame_diffs < th];
small_th = np.nanmedian(small_diffs) + 3 * _matlab_std(small_diffs);
return small_diffs, small_th
def maxPeaksDistHeight(x, dist, height):
"""
%MAXPEAKSDISTHEIGHT Find the maximum peaks in a vector. The peaks are
% separated by, at least, the given distance unless interrupted and are, at least, the given
% height.
%
% [PEAKS INDICES] = MAXPEAKSDISTHEIGHT(X, DIST, HEIGHT)
%
% Input:
% x - the vector to search for maximum peaks
% dist - the minimum distance between peaks
% height - the minimum height for peaks
%
% Output:
% peaks - the maximum peaks
% indices - the indices for the peaks
%
%
% © Medical Research Council 2012
% You will not remove any copyright or other notices from the Software;
% you must reproduce all copyright notices and other proprietary
% notices on any copies of the Software.
"""
#% Is the vector larger than the search window?
winSize = 2 * dist + 1;
if x.size < winSize:
peak = np.nanmax(x)
if peak < height:
return np.zeros(0), np.zeros(0)
#initialize variables
peaks = []
indices = []
im = None; #% the last maxima index
ip = None; #% the current, potential, max peak index
p = None; #% the current, potential, max peak value
i = 0; #% the vector index
#% Search for peaks.
while i < x.size:
#% Found a potential peak.
if (x[i] >= height) and ((p is None) or (x[i] > p)):
ip = i;
p = x[i];
#% Test the potential peak.
if (p is not None) and ((i - ip >= dist) or (i == x.size-1)):
#% Check the untested values next to the previous maxima.
if (im is not None) and (ip - im <= 2 * dist):
#% Record the peak.
if p > np.nanmax(x[(ip - dist):(im + dist+1)]):
indices.append(ip);
peaks.append(p);
#% Record the maxima.
im = ip;
ip = i;
p = x[ip];
#% Record the peak.
else:
indices.append(ip);
peaks.append(p);
im = ip;
ip = i;
p = x[ip];
#% Advance.
i = i + 1;
return np.array(peaks), np.array(indices)
#%%
def _initial_checks(mediaTimes, locations, delayFrames, fps):
if fps < 0.1 or fps > 100:
warnings.warn('WeirdFPS: recorded at {} frames/second. An unusual frame rate'.format(fps))
if mediaTimes.size > 0 and mediaTimes[0] != 0:
raise ValueError('NoInitialMediaTime. The first media time must be 0')
if not isinstance(delayFrames, int):
delayFrames = int(delayFrames)
#%%
# Save the spare 0 media time location in case the corresponding
# stage-movement, frame difference occured after the video started.
spareZeroTimeLocation = [];
# If there's more than one initial media time, use the latest one.
if (mediaTimes.size > 1):
i = 1;
while (i < mediaTimes.size and mediaTimes[i] == 0):
i = i + 1;
if i > 1:
spareZeroTimeLocation = locations[i - 2,:];
#% Dump the extraneous 0 media times and locations.
mediaTimes = mediaTimes[i-1:]
locations = locations[i-1:]
#%%
return mediaTimes, locations, delayFrames, fps, spareZeroTimeLocation
def _norm_frame_diffs(frameDiffs):
#% Are there enough frames?
if np.sum(~np.isnan(frameDiffs)) < 2:
raise ValueError('InsufficientFrames. The video must have at least 2, non-dropped frames');
#% No frame difference means the frame was dropped.
frameDiffs[frameDiffs == 0] = np.nan;
#% Normalize the frame differences and shift them over one to align them
#% with the video frames.
frameDiffs /= np.nanmax(frameDiffs)
frameDiffs = np.insert(frameDiffs, 0 , frameDiffs[0])
return frameDiffs
#%%
#%%
def _init_search(frameDiffs, gOtsuThr, gSmallDiffs, gSmallThr,
mediaTimes, maxMoveFrames, fps):
#%%
#% The log file doesn't contain any stage movements.
if mediaTimes.size < 2:
warnings.warn('NoStageMovements. The stage never moves');
#% Are there any large frame-difference peaks?
if gOtsuThr >= gSmallThr:
_, indices = maxPeaksDistHeight(frameDiffs, maxMoveFrames-1, gOtsuThr);
warnings.warn('UnexpectedPeaks. There are {} large frame-difference ' \
'peaks even though the stage never moves'.format(indices.size));
return None
#% Does the Otsu threshold separate the 99% of the small frame differences
#% from the large ones?
if gSmallDiffs.size==0 or gOtsuThr < gSmallThr:
warnings.warn("NoGlobalOtsuThreshold. Using the Otsu method, as a whole, " \
"the frame differences don't appear to contain any distinguishably " \
"large peaks (corresponding to stage movements). Trying half of the " \
"maximum frame difference instead.")
#% Try half the maximum frame difference as a threshold to distinguish large peaks.
gOtsuThr = 0.5
gSmallDiffs, gSmallThr = _get_small_otsu(frameDiffs, gOtsuThr)
#% Does a threshold at half the maximum frame difference separate the
#% 99% of the small frame differences from the large ones?
if gSmallDiffs.size==0 or gOtsuThr < gSmallThr:
warnings.warn('NoGlobalThresholds. Cannot find a global threshold to ' \
'distinguish the large frame-difference peaks.');
gOtsuThr = np.nan;
gSmallThr = np.nan;
#%%
#% Pre-allocate memory.
frames = np.zeros(frameDiffs.shape); #% stage movement status for frames
movesI = np.full((mediaTimes.size, 2), -100, np.int)
movesI[0,:] = 0;
#% Compute the search boundary for the first frame-difference peak.
maxMoveTime = maxMoveFrames / fps; #% maximum time a movement takes
timeOff = maxMoveTime; #% the current media time offset
peakI = 0; # the current stage movement peak's index
prevPeakI = 0; # the previous stage-movement peak's index
prevPeakEndI = 0; # the previous stage-movement peak's end index
startI = 0; # the start index for our search
endI = 2 * maxMoveFrames-1 #due to the different index from python and matlab
endI = min(endI, frameDiffs.size-1); #% the end index for our search
searchDiffs = frameDiffs[startI:endI+1];
#% Is the Otsu threshold large enough?
otsuThr = graythreshmat(searchDiffs);
isOtsu = otsuThr > gOtsuThr; #% false if no global Otsu
if not isOtsu:
# Does the Otsu threshold separate the 99% of the small frame
# differences from the large ones? And, if there is a global small
# threshold, is the Otsu threshold larger?
smallDiffs, smallThr = _get_small_otsu(searchDiffs, otsuThr)
isOtsu = (smallDiffs.size > 0) & \
np.any(~np.isnan(smallDiffs)) & \
(np.isnan(gSmallThr) | (otsuThr > gSmallThr)) & \
(otsuThr >= smallThr)
# Does the global Otsu threshold pull out any peaks?
if not isOtsu and \
not np.isnan(gOtsuThr) and \
np.any(searchDiffs > gOtsuThr):
otsuThr = gOtsuThr;
isOtsu = True;
if isOtsu:
#% Do the frame differences begin with a stage movement?
indices, = np.where(searchDiffs > otsuThr);
firstPeakI = indices[0];
if firstPeakI < maxMoveFrames:
#% Find the largest frame-difference peak.
peakI = np.nanargmax(frameDiffs[:maxMoveFrames]);
prevPeakI = peakI;
#% Compute the media time offset.
timeOff = (peakI +1) / fps;
# Is there a still interval before the first stage movement?
if peakI > 0:
i = peakI - 1;
while i > 0:
if frameDiffs[i] < gSmallThr and frameDiffs[i - 1] < gSmallThr:
peakI = 0;
break
i -= 1
#% We reached the end.
endI = peakI + maxMoveFrames;
if endI >= frameDiffs.size-1:
prevPeakEndI = frameDiffs.size;
#% Find a temporary front end for a potential initial stage movement.
else:
searchDiffs = frameDiffs[peakI:endI+1];
# Does the search window contain multiple stage movements?
if not (np.isnan(gOtsuThr) or np.isnan(gSmallThr)):
foundMove = False;
for i in range(searchDiffs.size):
#% We found a still interval.
if not foundMove and searchDiffs[i] < gSmallThr:
foundMove = True;
# We found the end of the still interval, cut off the rest.
elif foundMove and searchDiffs[i] > gSmallThr:
searchDiffs = searchDiffs[0:(i - 1)]
break
# Find a temporary front end for a potential initial stage movement.
i = np.nanargmin(searchDiffs);
peakFrontEndI = peakI + i;
minDiff = searchDiffs[i]
# If the temporary front end's frame difference is small, try to push
# the front end backwards (closer to the stage movement).
if minDiff <= gSmallThr:
i = peakI
while i < peakFrontEndI:
if frameDiffs[i] <= gSmallThr:
peakFrontEndI = i;
break;
i += 1
#% If the temporary front end's frame difference is large, try to
#% push the front end forwards (further from the stage movement).
elif minDiff >= gOtsuThr or \
(minDiff > gSmallThr and \
peakFrontEndI < endI and \
np.all(np.isnan(frameDiffs[(peakFrontEndI + 1):endI]))):
peakFrontEndI = endI;
prevPeakEndI = peakFrontEndI;
#%%
return frames, movesI, prevPeakI, prevPeakEndI, maxMoveTime, timeOff
#%%
def _get_search_diff(frameDiffs, prevPeakEndI, mediaTimeOffI, maxMoveFrames):
startI = prevPeakEndI;
# Compute the search boundary for matching frame-difference peaks.
x1 = startI + 2 * abs(mediaTimeOffI - (startI+1))
x2 = max((startI+1), mediaTimeOffI) + maxMoveFrames - 1
endI = min(max(x1, x2), frameDiffs.size-1)
searchDiffs = frameDiffs[startI:endI+1];
return searchDiffs, startI, endI
#%%
def get_otsu_thresh(frameDiffs,
searchDiffs,
gOtsuThr,
gSmallThr,
prevOtsuThr,
prevSmallThr):
#% Is the Otsu threshold large enough?
otsuThr = graythreshmat(searchDiffs);
isOtsu = otsuThr > prevSmallThr or otsuThr > gOtsuThr;
if not isOtsu:
#% Does the Otsu threshold separate the 99% of the small frame
#% differences from the large ones?
if np.isnan(prevSmallThr) or otsuThr > prevSmallThr or otsuThr > gSmallThr:
smallDiffs, smallThr = _get_small_otsu(frameDiffs, otsuThr)
isOtsu = (len(smallDiffs)>0) & np.any(~np.isnan(smallDiffs)) & (otsuThr >= smallThr);
#% Try the global Otsu threshold or, if there is none, attempt to
#% use half the search window's maximum frame difference.
if not isOtsu:
#% Try using half the search window's maximum frame difference.
if np.isnan(gOtsuThr):
otsuThr = np.nanmax(searchDiffs) / 2;
#% Does the half-maximum threshold separate the 99% of the
#% small frame differences from the large ones?
smallDiffs, smallThr = _get_small_otsu(frameDiffs, otsuThr)
isOtsu = smallDiffs & np.any(~np.isnan(smallDiffs)) & (otsuThr >= smallThr);
#% Does the global Otsu threshold pull out any peaks?
elif np.any(searchDiffs > gOtsuThr):
otsuThr = gOtsuThr;
isOtsu = True;
#% Does the global Otsu threshold pull out any peaks?
elif np.any(searchDiffs > prevOtsuThr):
otsuThr = prevOtsuThr;
isOtsu = True;
return isOtsu, otsuThr
def _get_peak_indices(frameDiffs,
searchDiffs,
isOtsu,
otsuThr,
gOtsuThr,
gSmallThr,
prevOtsuThr,
prevSmallThr,
maxMoveFrames):
#% Match the media time stage movement to a peak.
if not isOtsu:
indices = [];
else:
#% Compute and set the global thresholds.
if np.isnan(gOtsuThr):
#% Use a small threshold at 99% of the small frame differences.
smallDiffs, smallThr = _get_small_otsu(frameDiffs, gOtsuThr)
#% Set the global thresholds.
if otsuThr >= smallThr:
gOtsuThr = otsuThr;
gSmallThr = smallThr;
#% Set the previous small threshold.
if np.isnan(prevOtsuThr):
prevOtsuThr = otsuThr;
prevSmallThr = smallThr;
#% Use the previous small threshold.
elif not np.isnan(prevSmallThr):
smallThr = prevSmallThr;
#% Compute the local thresholds.
else:
otsuThr = min(otsuThr, gOtsuThr);
smallThr = max(prevSmallThr, gSmallThr);
if smallThr > otsuThr:
smallThr = min(prevSmallThr, gSmallThr);
#% Does the search window contain multiple stage movements?
foundMove = False;
for j in range(searchDiffs.size):
#% We found a stage movement.
if not foundMove and searchDiffs[j] > otsuThr:
foundMove = True;
#% We found the end of the stage movement, cut off the rest.
elif foundMove and searchDiffs[j] < smallThr:
searchDiffs = searchDiffs[0:j];
break;
#% Find at least one distinguishably large peak.
_, indices = maxPeaksDistHeight(searchDiffs, maxMoveFrames, otsuThr);
return indices, prevOtsuThr, prevSmallThr
#%%
def findStageMovement(frameDiffs, mediaTimes, locations, delayFrames, fps):
'''
%MODIFIED FROM SEGWORM AEJ. This help is outdated, I'll modified later. AEJ
%FINDSTAGEMOVEMENT Find stage movements in a worm experiment.
%
% The algorithm is as follows:
%
% 4. If there are no stage movements, we're done.
%
% 5. The log file sometimes contains more than one entry at 0 media time.
% These represent stage movements that may have completed before the video
% begins. Therefore, we don't use them but, instead, store them in case we
% find their stage movement in the video frame differences.
%
% 6. The frame differences need to be aligned to the video frames.
% Therefore, we copy the first one and shift the rest over so that the
% frame differences are one-to-one with the video frames. Note that video
% indexing starts at 0 while Matlab indexing begins at 1. Also note, due
% to differentiation, large frame differences that occur at the beginning
% of a stage movement now represent the first frame of movement. Large
% frame differences that occur at the end of a stage movement now represent
% the first non-movement frame.
%
% 7. Compute the global Otsu threshold for the frame-differences to
% distinguish stage-movement peaks. Then compute a global non-movement
% threshold by taking all values less than the Otsu, and computing 3
% standard deviations from the median (approximately 99% of the small
% values). Please note, stage movements ramp up/down to
% accelerate/decelerate to/from the target speed. Therefore, the values
% below the Otsu threshold are contaminated with stage movement
% acceleration and decelaration. Fortunately, non-movement frames account
% for more than 50% of the frame differences. Therefore, to avoid the stage
% movement contamination, we use the median of the small values rather than
% their mean when computing the global non-movement (small) threshold. If
% this small threshold is greater than the Otsu, we've made a poor choice
% and ignore both thresholds. Otherwise, these 2 global thresholds serve as
% a backup to the local ones when distinguishing stage movements.
%
% 8. Ocasionally, computing the global Otsu threshold fails. This occurs
% when a long video has few stage movements. In this case, stage movements
% appear to be rare outliers and the Otsu method minimizes in-group
% variance by splitting the non-stage movement modality into two groups
% (most likely periods of worm activity and inactivity). Under these
% circumstances we attempt to use a global threshold at half the maximum
% frame-difference variance. As detailed above, we test this threshold to
% see whether it is sufficiently larger than 99% of the smaller movements.
%
% 9. When searching for stage movements, we use the same algorithm as the
% one above(see step 7), over a smaller well-defined, search window, to
% determine the local Otsu threshold. The local small threshold is computed
% slightly differently (we use the mean instead of the median -- see step
% 12 for an explanation). If the local Otsu threshold fails (it's smaller
% than 99% of the values below it and smaller than the global Otsu), we
% attempt to use the global one to see if it pulls out a peak.
%
% 10. A stage movement peak is defined as the largest value that exceeds
% the minimum of the global and local Otsu thresholds. To avoid a situation
% in which 2 stage movements occur within the same search window, we scan
% the window for the first value exceeding the Otsu threshold and, if any
% subsequent values drop below the minimum of global and local small
% thresholds, we cut off the remainder of the window and ignore it.
%
% 11. Once a stage-movement peak is identified, we search for a temporary
% back and front end to the movement. The stage movement must complete
% within one delay time window (see step 2). Therefore, we search for the
% minimum values, within one delay time window, before and after the peak.
% The locations of these minimum values are the temporary back and front
% ends for the stage movement. If the either value is below the small
% threshold, we may have overshot the movement and, therefore, attempt to
% find a location closer to the peak. Similarly, if either value is greater
% than the maximum of the global and local small thresholds and the
% remaining values till the end of the window are NaNs or, if either value
% is greater than the Otsu threshold, we may have undershot the movement
% and, therefore, attempt to find a location further from the peak.
%
% 12. Using one stage movement's temporary front end and the subsequent
% movement's temporary back end, we compute the small threshold. This
% interval is assumed to have no stage motion and, therefore, represents
% frame-difference variance from a non-movement interval. The local small
% threshold is defined as 3 deviations from the mean of this non-movement
% interval (99% confidence). With this small threshold, we start at the
% first peak and search forward for its real front end. Similarly, we start
% at the subsequent peak and search backward for its real back end.
%
% 13. Conservatively, the beginning and end of the video are treated as the
% end and begining of stage movements, respectively. We search for a
% temporary front end and a temporary back end, respectively, using the
% global small and Otsu thresholds.
%
% 14. After the final, logged stage motion is found in the frame
% differences, we look to see if there are any subsequent, extra peaks.
% An Otsu threshold is computed, as detailed earlier, using the interval
% spanning from the final stage movement's temporary front end till the
% final frame difference. If this threshold is unsuitable, we use the
% global Otsu threshold. If we find any extra peaks, the first peak's back
% end is located and its frame as well as the remainder of the frame
% differences (including any other extra peaks) are marked as a single
% large stage movement that extends till the end of the video. This is
% necessary since Worm Tracker users can terminate logging prior to
% terminating the video (e.g., this may occur automatically if the worm is
% lost).
%
% 15. To find a stage movement, we compute its offset media time. The first
% stage movement is offset by the delay time (see step 2). Subsequent media
% times are offset by the difference between the previous media time and
% its stage-movement peak. Therefore, each stage movement provides the
% offset for the next one. The longer the wait till the next stage
% movement, the less accurate this offset becomes. Therefore, we search for
% each stage movement using a window that begins at the last stage
% movement's temporary front end and ends at the offset media time plus
% this distance (a window with its center at the offset media time). If the
% window is too small (the offset media time is too close to the temporary
% front end of the previous stage movement), we extend its end to be the
% offset media time plus the delay time.
%
% 16. If a stage movement peak is absent, we attempt to shift the media
% times backward or forward, relative to the stage movement peaks,
% depending on whether the current peak is closer to the next or previous
% media time, respectively. When shifting the media times backward, we
% assume the first stage movement occurred prior to video recording and,
% therefore, throw away its media time and location. When shifting the
% media times forward, we look for a spare 0 media time and location (see
% step 5). If there are no spares, we swallow up all the frames prior to
% the end of the first stage movement and label them as an unusable
% movement that began prior to recording and bled into the beginning of the
% video.
%
% 17. If we find a stage-movement peak closer to the previous offset media
% time than its own supposed offset media time, we assume we have a
% misalignment and attempt to shift the media times forward relative to the
% stage movement peaks. There are some restrictions to this check since
% small-scale, frame jitter can misrepresent the reported media time.
%
% 18. The final logged stage motion may occur after the video ends and, as
% a result, may have no representation in the frame-difference variance.
% Therefore, for the last stage movement, we check our threshold (see step
% 10) to ensure that it separates 99% of the smaller values and, thereby,
% picks up stage movement rather than splitting the non-movement modality.
%
%
%
% FUNCTION [FRAMES INDICES LOCATIONS] = ...
% FINDSTAGEMOVEMENT(INFOFILE, LOGFILE, DIFFFILE, VERBOSE)
%
% FUNCTION [FRAMES INDICES LOCATIONS] = ...
% FINDSTAGEMOVEMENT(INFOFILE, LOGFILE, DIFFFILE, VERBOSE, GUIHANDLE)
%
% Input:
% infoFile - the XML file with the experiment information
% logFile - the CSV file with the stage locations
% diffFile - the MAT file with the video differentiation
% verbose - verbose mode 1 shows the results in a figure
% verbose mode 2 labels the stage movements in the figure
% guiHandle - the GUI handle to use when showing the results;
% if empty, the results are shown in a new figure
%
% Output:
% frames - a vector of frame status
% true = the frame contains stage movement
% false = the frame does NOT contain stage movement
% NaN = the original video frame was dropped
% Note: video frames are indexed from 0, Matlab indexes
% from 1, please adjust your calculations accordingly
% movesI - a 2-D matrix with, respectively, the start and end
% frame indices of stage movements
% locations - the location of the stage after each stage movement
%
% See also VIDEO2DIFF
%
%
% © Medical Research Council 2012
% You will not remove any copyright or other notices from the Software;
% you must reproduce all copyright notices and other proprietary
% notices on any copies of the Software.
'''
#%%
mediaTimes, locations, delayFrames, fps, spareZeroTimeLocation = \
_initial_checks(mediaTimes, locations, delayFrames, fps)
frameDiffs = _norm_frame_diffs(frameDiffs)
# Compute the global Otsu and small frame-difference thresholds.
# Note 1: we use the Otsu to locate frame-difference peaks corresponding to
# stage movement.
# Note 2: we use the small threshold to locate the boundaries between
# frame differences corresponding to stage movement and frame differences
# corresponding to a non-movement interval.
gOtsuThr = graythreshmat(frameDiffs);
gSmallDiffs, gSmallThr = _get_small_otsu(frameDiffs, gOtsuThr)
maxMoveFrames = delayFrames + 1; #% maximum frames a movement takes
var_init = \
_init_search(frameDiffs, gOtsuThr, gSmallDiffs, gSmallThr,
mediaTimes, maxMoveFrames, fps)
if var_init is not None:
frames, movesI, prevPeakI, prevPeakEndI, maxMoveTime, timeOff = var_init
else:
#return empty vectors if there was no movement
if len(locations) == 0:
locations = np.zeros((1,2));
return np.zeros(frameDiffs.size, np.int), np.zeros((1,2), np.int), locations
#% Match the media time-stage movements to the frame-difference peaks.
mediaTimeOff = 0.; #% the offset media time
prevOtsuThr = gOtsuThr; #% the previous small threshold
prevSmallThr = gSmallThr; #% the previous small threshold
isShifted = False; #% have we shifted the data to try another alignment?
#%%
#AEJ I am using a while instead of a for to be able to go back
i = 0
while i < mediaTimes.size-1:
i += 1
#%%
# Compute the offset media time.
prevMediaTimeOff = mediaTimeOff;
mediaTimeOff = mediaTimes[i] + timeOff;
mediaTimeOffI = int(round(mediaTimeOff * fps));
inputs_args = (frameDiffs, prevPeakEndI, mediaTimeOffI, maxMoveFrames)
searchDiffs, startI, endI = _get_search_diff(*inputs_args)
inputs_args = (frameDiffs, searchDiffs, gOtsuThr, gSmallThr, prevOtsuThr, prevSmallThr)
isOtsu, otsuThr = get_otsu_thresh(*inputs_args)
#% If we're at the end, make sure we're using an appropriate threshold.
if i == mediaTimes.size-1:
#% Does the threshold separate the 99% of the small frame
#% differences from the large ones?
smallDiffs, smallThr = _get_small_otsu(searchDiffs, otsuThr)
isOtsu = (smallDiffs.size>0) & np.any(~np.isnan(smallDiffs)) & (otsuThr >= smallThr);
inputs_args = (frameDiffs, searchDiffs, isOtsu, otsuThr,
gOtsuThr, gSmallThr, prevOtsuThr, prevSmallThr, maxMoveFrames)
indices, prevOtsuThr, prevSmallThr = _get_peak_indices(*inputs_args)
#%%
#% We can't find any distinguishably large peaks.
peakI = np.nan;
if len(indices) == 0:
#% Does the last stage movement occur after the video ends?
if i == mediaTimes.size-1 and endI >= frameDiffs.size-1:
#% Does the last offset media time occur before the video ends?
if mediaTimeOff < (frameDiffs.size - 1) / fps:
dd = 'LastPeak ' \
'The search window for the last stage movement ({}) ' \
'at media time {:.3f} seconds (frame {} ) offset to {:.3} ' \
'seconds (frame {}) to the last frame {:.3} seconds ' \
'(frame {}), does not have any distinguishably large peaks. '\
'The peak probably occured after the video ended and, ' \
'therefore, the last stage movement will be ignored.'
dd = dd.format(i,
mediaTimes[i],
round(mediaTimes[i] * fps),
mediaTimeOff,
startI - 1,
(endI - 1) / fps,
endI - 1
)
warnings.warn(dd)
# Ignore the last stage movement.
mediaTimes = mediaTimes[:-1]
locations = locations[:-1]
movesI = movesI[:-1]
break;
#% Report the warning.
dd = 'NoPeaks ' \
'The search window for stage movement ({}) ' \
'at media time {:.3f} seconds (frame {} ) offset to {:.3} ' \
'seconds (frame {}) to the last frame {:.3} seconds ' \
'(frame {}), does not have any distinguishably large peaks.'
dd = dd.format(i+1,
mediaTimes[i],
round(mediaTimes[i] * fps),
mediaTimeOff,
startI - 1,
(endI - 1) / fps,
endI - 1
)
warnings.warn(dd)
# Use the first peak.
else:
peakI = indices[0] + startI
#% Is the current offset media time further from the frame-
#% difference stage movement than the previous offset media time?
peakTime = peakI / fps;
timeDiff = mediaTimeOff - peakTime;
prevTimeDiff = prevMediaTimeOff - peakTime;
#%%
if (i > 1) and \
((abs(prevTimeDiff) > maxMoveTime) or \
(abs(timeDiff) > maxMoveTime)) and \
(mediaTimeOff > prevMediaTimeOff) and \
(abs(timeDiff / prevTimeDiff) > 2):
#% Report the warning.
dd = ['FarPeak',
'Stage movement ({})'.format(i+1),
'(at media time {:.3f} seconds)'.format(mediaTimes[i]),
'offset to {:.3} seconds,'.format(mediaTimeOff),
'has its frame-difference peak at {:.3} seconds (frame {}),'.format(peakTime, peakI - 1),
'an error of {:.3} seconds.'.format(timeDiff),
'The previous media time, offset to {:.3} seconds,'.format(prevMediaTimeOff),
'is closer with an error only {:.3} seconds'.format(prevTimeDiff),
'(less than half the current media time error). ',
'Therefore, we probably have either a false ',
'peak, a shifted misalignment, or an abnormally long delay.'
]
dd = ' '.join(dd)
warnings.warn(dd)
#% Ignore this wrong peak.
peakI = np.nan;
#%%
#% Can we realign (shift) the stage movements and media times?
if np.isnan(peakI):
lastMoveTime = movesI[i - 1, 0] / fps;
isShiftable = True;
if isShifted:
isShiftable = False;
#% Shift the media times forward.
elif i > 1 and \
abs(mediaTimes[i - 2] - lastMoveTime) < abs(mediaTimes[i] - lastMoveTime):
#% Would a time shift align the media times with the
#% frame-difference stage movements?
for j in range(1, i - 1):
#% Compute the error from the predicted time.
offset = movesI[j,0] / fps - mediaTimes[j - 1];
predictedTime = mediaTimes[j] + offset;
moveTime = movesI[j + 1,0] / fps;
timeDiff = abs(predictedTime - moveTime);
#% Compute the interval between the media times.
mediaDiff = mediaTimes[j] - mediaTimes[j - 1];
#% Is the error in the predicted time greater than
#% the interval between media times?
if timeDiff > mediaDiff:
isShiftable = False;
break;
#% Time cannot be shifted due to misalignment between the media
#% times and frame-difference stage movements.
if not isShiftable:
dd = 'TimeShiftAlignment ' \
'Time cannot be shifted forward because the' \
' frame-difference stage movement at {:.3}'\
' seconds would have a'\
' predicted time of {:.3}'\
' seconds (an error of {:.3}' \
' seconds) whereas the interval between its media' \
' time and the previous media time is only {:.3}' \
' seconds and,' \
' therefore, smaller than the error from shifting.'
dd = dd.format(moveTime,
predictedTime,
timeDiff,
mediaDiff
)
warnings.warn(dd);
#% Shift everything forward using the spare 0 media time location.
elif len(spareZeroTimeLocation)>0:
mediaTimes = np.insert(mediaTimes, 0,0)
locations = np.vstack((spareZeroTimeLocation, locations))
movesI = np.vstack((movesI, np.zeros((1,2), np.int)))
timeOff = (prevPeakI+1) / fps - mediaTimes[i - 1];
#% Redo the match.
i = i - 1;
#% Warn about the time shift.
warnings.warn('TimeShiftForward : ' \
'Shifting the media times forward relative to the ' \
'frame-difference stage movements (using a spare ' \
'location at media time 0:0:0.000) in an attempt ' \
'to realign them');
#% Shift everything forward by assuming a missing 0 media time
#% location and swallowing earlier frames into the the first
#% stage movement.
else:
frames[:movesI[1,0]] = True;
movesI[:(i - 1),:] = movesI[1:i,:];
movesI[0,0] = 0;
timeOff = (prevPeakI+1) / fps - mediaTimes[i - 1];
#% Redo the match.
i = i - 2;
#% Warn about the time shift.
warnings.warn('TimeShiftForward : ' \
'Shifting the media times forward relative to the ' \
'frame-difference stage movements (by swallowing ' \
'earlier frames into the first stage movement) in ' \
'an attempt to realign them');
# Shift the media times backward.
else:
#% Would a time shift align the media times with the
#% frame-difference stage movements?
for j in range(2, i - 1):
#% Compute the error from the predicted time.
offset = movesI[j - 1,0] / fps - mediaTimes[j];
predictedTime = mediaTimes[j + 1] + offset;
moveTime = movesI[j,0] / fps;
timeDiff = np.abs(predictedTime - moveTime);
#% Compute the interval between the media times.
mediaDiff = mediaTimes[j + 1] - mediaTimes[j];
#% Is the error in the predicted time greater than the
#% interval between media times?
if timeDiff > mediaDiff:
isShiftable = False;
break;
#% Time cannot be shifted due to misalignment between the media
#% times and frame-difference stage movements.
if not isShiftable:
dd = ['TimeShiftAlignment',
'Time cannot be shifted backward because the',
'frame-difference stage movement at {:.3} seconds'.format(moveTime),
'would have a predicted time of {:.3} seconds'.format(predictedTime),
'seconds (an error of {:.3} seconds)'.format(timeDiff),
'whereas the interval between its media',
'time and the previous one is only {:.3} seconds'.format(mediaDiff),
'and, therefore, smaller than the error from shifting'
]
warnings.warn(' '.join(dd))
#% Shift everything backward.
else:
mediaTimes = mediaTimes[1:];
locations = locations[1:];
movesI = movesI[:-1];
timeOff = (prevPeakI+1) / fps - mediaTimes[i - 1];
#% Redo the match.
i = i - 1;
#% Warn about the time shift.
dd = 'TimeShiftBackward : ' \
'Shifting the media times backward relative to ' \
'the frame-difference stage movements in an ' \
'attempt to realign them'\
warnings.warn(dd);
#% Record the shift and continue.
if isShiftable:
isShifted = True;
continue;
#% We cannot realign (shift) the stage movements and media times.
else:
#% Compute the stage movement sizes.
movesI = movesI[:i,:]
moveSizes = np.zeros((movesI.shape[0],1));
for j in range(2, movesI.shape[0] - 1):
moveDiffs = frameDiffs[movesI[j,0]:movesI[j,1]];
moveSizes[j] = np.nansum(moveDiffs)
#% Compute the statistics for stage movement sizes.
meanMoveSize = np.nanmean(moveSizes[1:]);
stdMoveSize = _matlab_std(moveSizes[1:]);
smallMoveThr = meanMoveSize - 2.5 * stdMoveSize;
largeMoveThr = meanMoveSize + 2.5 * stdMoveSize;
#% Are any of the stage movements considerably small or large?
for j in range(1, movesI.shape[0]-1):
if moveSizes[j] < smallMoveThr:
#% Is the stage movement small?
before_f = movesI[j,0] - 1
after_f = movesI[j,1] - 1
#% Report the warning.
dd = ['ShortMove',
'Stage movement {}'.format(j),
'at media time {:.3}'.format(mediaTimes[j]),
'seconds (frame {}),'.format(int(round(mediaTimes[j] * fps))),
'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f),
'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f),
'is considerably small'
]
elif moveSizes[j] > largeMoveThr:
#% Is the stage movement large?
before_f = movesI[j,0] - 1
after_f = movesI[j,1] - 1
dd = ['LongMove',
'Stage movement {}'.format(j),
'at media time {:.3}'.format(mediaTimes[j]),
'seconds (frame {}),'.format(int(round(mediaTimes[j] * fps))),
'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f),
'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f),
'is considerably large'
]
#% Construct the report.
msg = 'NoShift : We cannot find a matching peak nor shift the time ' \
'for stage movement {} at media time {:.3} seconds (frame {}).' \
.format(i+1,
mediaTimes[i],
int(round(mediaTimes[i] * fps))
)
raise(ValueError(msg));
if np.isnan(peakI):
continue
#% Find a temporary back end for this stage movement.
#% Note: this peak may serve as its own temporary back end.
startI = max(peakI - maxMoveFrames, prevPeakEndI);
dd = frameDiffs[startI:peakI+1][::-1]
j = np.nanargmin(dd)
minDiff = dd[j]
peakBackEndI = peakI - j; #% we flipped to choose the last min
j = peakI - 1;
#% If the temporary back end's frame difference is small, try to push
#% the back end forwards (closer to the stage movement).
if minDiff <= prevSmallThr:
while j > startI:
if frameDiffs[j] <= prevSmallThr:
peakBackEndI = j;
break;
j -= 1;
#% If the temporary back end's frame difference is large, try to push
#% the back end backwards (further from the stage movement).
elif minDiff >= min(otsuThr, gOtsuThr) or \
(minDiff > gSmallThr and peakBackEndI > startI and \
np.all(np.isnan(frameDiffs[startI:(peakBackEndI - 1)]))):
peakBackEndI = startI;
#% Compute a threshold for stage movement.
smallDiffs = frameDiffs[prevPeakEndI:peakBackEndI+1];
smallThr = np.nanmean(smallDiffs) + 3*_matlab_std(smallDiffs);
if np.isnan(smallThr):
smallThr = prevSmallThr;
#% Find the front end for the previous stage movement.
#set the range using the previous peak as range
j = prevPeakI;
while j < peakI and \
(np.isnan(frameDiffs[j]) or \
frameDiffs[j] > smallThr) and \
(np.isnan(frameDiffs[j + 1]) or \
frameDiffs[j + 1] > smallThr):
j = j + 1;
movesI[i - 1, 1] = j
prevPeakEndI = j-1;
#%%
#% Mark the previous stage movement.
if movesI[i - 1,0] < 1:
frames[:movesI[i - 1,1]] = True;
else:
frames[movesI[i - 1,0]:movesI[i - 1,1]] = True;
#% Find the back end for this stage movement.
j = peakI;
while j > prevPeakEndI and \
(np.isnan(frameDiffs[j]) or frameDiffs[j] > smallThr):
j -= 1;
movesI[i, 0] = j + 1;
#% Is the non-movement frame-differences threshold too large?
if smallThr <= otsuThr and (np.isnan(gOtsuThr) or smallThr <= gOtsuThr):
prevOtsuThr = otsuThr;
prevSmallThr = smallThr;
else:
before_f = movesI[i - 1,1] - 1
after_f = movesI[i - 1,0] - 1
dd = ['LargeNonMovementThreshold',
'The non-movement window between stage movement {}'.format(i-1),
'and stage movement {}'.format(i),
'from {:.3} (frame {})'.format(before_f / fps, before_f),
'to {:.3} (frame {})'.format(before_f / fps, after_f),
'contains considerably large frame-difference variance'
]
warnings.warn(' '.join(dd))
#% Compute the media time offset.
timeOff = peakTime - mediaTimes[i];
#% We reached the end.
endI = peakI + maxMoveFrames;
if endI >= frameDiffs.size:
peakFrontEndI = frameDiffs.size-1;
#% Find a temporary front end for this stage movement.
else:
dd = frameDiffs[peakI+1:endI+1]
if not np.all(np.isnan(dd)):
j = np.nanargmin(dd)
minDiff = dd[j]
peakFrontEndI = peakI + j + 1;
#% If the temporary front end's frame difference is large, try to
#% push the front end forwards (further from the stage movement).
if (minDiff >= min(otsuThr, gOtsuThr)) or \
(minDiff > max(smallThr, gSmallThr) and \
(peakFrontEndI < endI) and \
np.all(np.isnan(frameDiffs[(peakFrontEndI + 1):endI+1]))):
peakFrontEndI = endI
#% Try to push the temporary front end backwards (closer to the stage
#% movement).
j = peakI + 1;
while j < peakFrontEndI:
if frameDiffs[j] <= smallThr:
peakFrontEndI = j;
break;
j = j + 1;
#% Advance.
prevPeakI = peakI;
prevPeakEndI = peakFrontEndI;
#% Do the frame differences end with a stage movement?
if prevPeakEndI > frameDiffs.size:
movesI[-1, 1] = frameDiffs.size;
frames[movesI[-1,0]:] = True;
movesI = np.vstack(movesI, np.full((1,2), frameDiffs.size+1))
#% Find the front end for the last stage movement.
else:
#% Is the Otsu threshold large enough?
searchDiffs = frameDiffs[prevPeakEndI:];
otsuThr = graythreshmat(searchDiffs);
isOtsu = otsuThr > gOtsuThr; #% false if no global Otsu
if not isOtsu:
#% Does the Otsu threshold separate the 99% of the small frame
#% differences from the large ones? And, if there is a global small
#% threshold, is the Otsu threshold larger?
smallDiffs, smallThr = _get_small_otsu(frameDiffs, gOtsuThr)
isOtsu = (smallDiffs.size>0) & \
np.any(~np.isnan(smallDiffs)) & \
(otsuThr >= smallThr);
isOtsu = isOtsu & (np.isnan(gSmallThr) | (otsuThr > gSmallThr))
#% Does the global Otsu threshold pull out any peaks?
if not isOtsu:
if not np.isnan(gOtsuThr) and (np.sum(searchDiffs > gOtsuThr) > 1):
otsuThr = gOtsuThr;
isOtsu = True;
#% Are there any large frame difference past the last stage movement?
isExtraPeaks = False;
if not isOtsu:
peakI = frameDiffs.size;
peakBackEndI = frameDiffs.size-1
#% There are too many large frame-difference peaks.
else:
_, indices = maxPeaksDistHeight(searchDiffs, maxMoveFrames, otsuThr);
isExtraPeaks = len(indices)>0;
#% Find the first large peak past the last stage movement.
i = prevPeakEndI;
while (i < frameDiffs.size-1) and \
(np.isnan(frameDiffs[i]) or (frameDiffs[i] < otsuThr)):
i = i + 1;
peakI = i;
#% Find a temporary back end for this large peak.
#% Note: this peak may serve as its own temporary back end.
startI = max(peakI - maxMoveFrames, prevPeakEndI);
dd = frameDiffs[startI:peakI+1][::-1]
i = np.nanargmin(dd)
minDiff = dd[i]
peakBackEndI = peakI - i + 1; #% we flipped to choose the last min
#% If the temporary back end's frame difference is small, try to
#% push the back end forwards (closer to the stage movement).
if minDiff <= prevSmallThr:
i = peakI - 1;
while i > startI:
if frameDiffs[i] <= prevSmallThr:
peakBackEndI = i;
break;
i = i - 1;
#% If the temporary back end's frame difference is large, try to
#% push the back end backwards (further from the stage movement).
elif minDiff >= min(otsuThr, gOtsuThr) or \
((minDiff > gSmallThr) and (peakBackEndI > startI) and \
np.all(np.isnan(frameDiffs[startI:(peakBackEndI - 1)]))):
peakBackEndI = startI;
#% Compute a threshold for stage movement.
smallDiffs = frameDiffs[prevPeakEndI:peakBackEndI+1];
smallThr = np.nanmean(smallDiffs) + 3 * np.nanstd(smallDiffs, ddof=1);
if np.isnan(smallThr):
smallThr = prevSmallThr;
#% Find the front end for the last logged stage movement.
i = prevPeakI;
while (i < peakI) and (i < frameDiffs.size-1) and \
(np.isnan(frameDiffs[i]) or (frameDiffs[i] > smallThr)) and \
(np.isnan(frameDiffs[i + 1]) or (frameDiffs[i + 1] > smallThr)):
i = i + 1;
movesI[-1,1] = i;
prevPeakEndI = i-1;
#% Mark the last logged stage movement.
if movesI.shape[0] == 1:
frames[:movesI[-1, 1]] = True
else:
frames[movesI[-1,0]:movesI[-1,1]] = True
#% Are there any large frame-difference peaks after the last logged
#% stage movement?
if isExtraPeaks:
pass
#warning('findStageMovement:TooManyPeaks', ...
# ['There are, approximately, ' num2str(length(indices)) ...
# ' large frame-difference peaks after the last stage' ...
# ' movement ends at ' num2str((movesI(end,2) - 1)/ fps, '%.3f') ...
# ' seconds (frame ' num2str(movesI(end,2) - 1) ')']);
#% Find the back end for logged stage movements.
i = peakI - 1;
while (i > prevPeakEndI) and (np.isnan(frameDiffs[i]) or \
(frameDiffs[i] > smallThr)):
i = i - 1;
movesI = np.vstack((movesI, (i+1, frameDiffs.size+1)))
frames[movesI[-1,0]:] = True;
#% Are any of the stage movements considerably small or large?
if isExtraPeaks:
#% Compute the stage movement sizes.
movesI = movesI[:i, :]
moveSizes = np.zeros((movesI.shape[0],1));
for j in range(1, movesI.shape[0]-1):
moveDiffs = frameDiffs[movesI[j,0]:movesI[j,1]];
moveSizes[j] = np.nansum(moveDiffs);
#% Compute the statistics for stage movement sizes.
meanMoveSize = np.nanmean(moveSizes[1:]);
stdMoveSize = np.nanstd(moveSizes[1:], ddof=1);
smallMoveThr = meanMoveSize - 2.5 * stdMoveSize;
largeMoveThr = meanMoveSize + 2.5 * stdMoveSize;
#% Are any of the stage movements considerably small or large?
for i in range(1, movesI.shape[0]-1):
#% Is the stage movement small?
if moveSizes[i] < smallMoveThr:
before_f = movesI[i,0] - 1
after_f = movesI[i,1] - 1
#% Report the warning.
dd = ['ShortMove',
'Stage movement {}'.format(i),
'at media time {:.3}'.format(mediaTimes[i]),
'seconds (frame {}),'.format(int(round(mediaTimes[i] * fps))),
'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f),
'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f),
'is considerably small'
]
warnings.warn(' '.join(dd))
#% Is the stage movement large?
elif moveSizes[i] > largeMoveThr:
before_f = movesI[i,0] - 1
after_f = movesI[i,1] - 1
#% Report the warning.
dd = ['LongMove',
'Stage movement {}'.format(i),
'at media time {:.3}'.format(mediaTimes[i]),
'seconds (frame {}),'.format(int(round(mediaTimes[i] * fps))),
'spanning from {:.3} seconds (frame {})'.format(before_f / fps, before_f),
'to {:.3} seconds (frame {}),'.format(after_f/fps, after_f),
'is considerably large'
]
warnings.warn(' '.join(dd))
return frames, movesI, locations
#%%
def shift2video_ref(is_stage_move, movesI, stage_locations, video_timestamp_ind):
stage_vec = np.full((is_stage_move.size,2), np.nan);
if len(movesI) <= 1 and np.all(movesI==0):
#%there was no movements
stage_vec[:,0] = stage_locations[:, 0];
stage_vec[:,1] = stage_locations[:, 1];
else:
#%convert output into a vector that can be added to the skeletons file to obtain the real worm displacements
for kk in range(stage_locations.shape[0]):
bot = max(0, movesI[kk,1]);
top = min(is_stage_move.size, movesI[kk+1,0]);
stage_vec[bot:top, 0] = stage_locations[kk,0];
stage_vec[bot:top, 1] = stage_locations[kk,1];
#%the nan values must match the spected video motions
#assert(all(isnan(stage_vec(:,1)) == is_stage_move))
# prepare vectors to save into the hdf5 file.
#%Go back to the original movie indexing. I do not want to include the missing frames at this point.
is_stage_move_d = is_stage_move[video_timestamp_ind].astype(np.int8);
stage_vec_d = stage_vec[video_timestamp_ind, :];
return stage_vec_d, is_stage_move_d | mit | 6,729,585,383,931,663,000 | 43.222222 | 116 | 0.551167 | false |
fsxfreak/esys-pbi | src/pupil/pupil_src/shared_modules/frame_publisher.py | 2 | 2998 | '''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
from plugin import Plugin
from pyglui import ui
import numpy as np
class Frame_Publisher(Plugin):
def __init__(self,g_pool,format='jpeg'):
super().__init__(g_pool)
self._format = format
def init_gui(self):
help_str = "Publishes frame data in different formats under the topic \"frame.world\"."
self.menu = ui.Growing_Menu('Frame Publisher')
self.menu.append(ui.Button('Close',self.close))
self.menu.append(ui.Info_Text(help_str))
self.menu.append(ui.Selector('format',self,selection=["jpeg","yuv","bgr","gray"], labels=["JPEG", "YUV", "BGR", "Gray Image"],label='Format'))
self.g_pool.sidebar.append(self.menu)
def update(self,frame=None,events={}):
if frame and frame.jpeg_buffer:
if self.format == "jpeg":
data = frame.jpeg_buffer
elif self.format == "yuv":
data = frame.yuv_buffer
elif self.format == "bgr":
data = frame.bgr
elif self.format == "gray":
data = frame.gray
# Create serializable object.
# Not necessary if __raw_data__ key is used.
# blob = memoryview(np.asarray(data).data)
blob = data
events['frame.world'] = [{
'topic':'frame',
'width': frame.width,
'height': frame.height,
'index': frame.index,
'timestamp': frame.timestamp,
'format': self.format,
'__raw_data__': [blob]
}]
def on_notify(self,notification):
"""Publishes frame data in several formats
Reacts to notifications:
``eye_process.started``: Re-emits ``frame_publishing.started``
Emits notifications:
``frame_publishing.started``: Frame publishing started
``frame_publishing.stopped``: Frame publishing stopped
"""
if notification['subject'].startswith('eye_process.started'):
# trigger notification
self.format = self.format
def get_init_dict(self):
return {'format':self.format}
def close(self):
self.alive = False
def cleanup(self):
self.notify_all({'subject':'frame_publishing.stopped'})
if self.menu:
self.g_pool.sidebar.remove(self.menu)
self.menu = None
@property
def format(self):
return self._format
@format.setter
def format(self,value):
self._format = value
self.notify_all({'subject':'frame_publishing.started','format':value}) | mit | 2,254,809,564,954,330,000 | 33.079545 | 150 | 0.543362 | false |
dmitru/pines | pines/trees.py | 1 | 8172 | # coding=utf-8
import numpy as np
from copy import deepcopy
class BinaryDecisionTreeSplit(object):
def __init__(self, feature_id, value):
self.feature_id = feature_id
self.value = value
class BinaryDecisionTree(object):
"""
Implements a binary decision tree with array-based representation.
This class itself doesn't contain logic for selection of best splits, etc;
instead, it receives DecisionTreeSplit that describe splits and updates the tree accordingly.
"""
def __init__(self, n_features):
"""
:param n_features: number of features in dataset. Features have 0-based indices
"""
self._capacity = 0
self._n_features = n_features
self._is_leaf = np.zeros(0, dtype='bool')
self._is_node = np.zeros(0, dtype='bool')
self._leaf_values = np.zeros(0)
self._leaf_functions = []
self._leaf_n_samples = np.zeros(0)
self._splits = []
self._capacity = 0
self._reallocate_if_needed(required_capacity=1)
self._init_root()
def _reallocate_if_needed(self, required_capacity):
if self._capacity <= required_capacity:
self._is_leaf.resize(required_capacity)
self._is_node.resize(required_capacity)
self._leaf_values.resize(required_capacity)
self._leaf_functions = self._grow_list(self._leaf_functions, required_capacity)
self._leaf_n_samples.resize(required_capacity)
self._splits = self._grow_list(self._splits, required_capacity)
self._capacity = required_capacity
def _init_root(self):
self._is_leaf[0] = True
self._is_node[0] = True
self._latest_used_node_id = 0
def num_of_leaves(self):
return np.sum(self._is_leaf[:self._latest_used_node_id + 1])
def num_of_nodes(self):
return self._latest_used_node_id
def is_leaf(self, node_id):
assert node_id >= 0 and node_id <= self._latest_used_node_id
return self._is_leaf[node_id]
def leaf_mask(self):
return self._is_leaf[:self._latest_used_node_id + 1]
def __str__(self):
def helper(cur_node_id, padding='', is_last_leaf_on_level=True):
if cur_node_id > self._latest_used_node_id or not self._is_node[cur_node_id]:
return ''
if self._is_leaf[cur_node_id]:
node_str = '{}: {:.2f} (n={})'.format(
cur_node_id, self._leaf_values[cur_node_id],
int(self._leaf_n_samples[cur_node_id]))
else:
node_str = '{}: [x[{}] < {:.2f}]? (n={})'.format(
cur_node_id,
self._splits[cur_node_id].feature_id,
self._splits[cur_node_id].value,
int(self._leaf_n_samples[cur_node_id])
)
result = padding + ("└── " if is_last_leaf_on_level else "├── ") + node_str + '\n'
if is_last_leaf_on_level:
new_padding = padding + ' '
else:
new_padding = padding + '| '
result += helper(self.left_child(cur_node_id), new_padding, False)
result += helper(self.right_child(cur_node_id), new_padding, True)
return result
return helper(0)
def left_child(self, node_id):
return (node_id + 1) * 2 - 1
def right_child(self, node_id):
return (node_id + 1) * 2
def leaves(self):
return np.where(self._is_leaf == True)[0]
def split_node(self, node_id, split):
"""
Modifies the tree, applying the specified node split.
The node that is being splitted must be a leaf.
After the split, the number of leaves increases by one.
:param split: DecisionTreeSplit, describes the split to perform
"""
assert node_id >= 0 and node_id <= self._latest_used_node_id
assert split.feature_id >= 0 and split.feature_id < self._n_features
assert self.is_leaf(node_id) == True
left_child_id = self.left_child(node_id)
right_child_id = self.right_child(node_id)
if right_child_id >= self._capacity:
self._reallocate_if_needed(2 * self._capacity + 1)
self._splits[node_id] = deepcopy(split)
self._is_leaf[node_id] = False
self._is_node[left_child_id] = True
self._is_node[right_child_id] = True
self._is_leaf[left_child_id] = True
self._is_leaf[right_child_id] = True
self._latest_used_node_id = max(self._latest_used_node_id, right_child_id)
def predict(self, X):
"""
:param X:
:return:
"""
def predict_one(x):
current_node = self.root()
while not self.is_leaf(current_node):
current_split = self._splits[current_node]
if x[current_split.feature_id] < current_split.value:
current_node = self.left_child(current_node)
else:
current_node = self.right_child(current_node)
if self._leaf_functions[current_node] is not None:
func, args = self._leaf_functions[current_node]
return func(args)
return self._leaf_values[current_node]
sample_size, features_count = X.shape
assert features_count == self._n_features
result = np.zeros(sample_size)
for i in range(sample_size):
x = X[i]
result[i] = predict_one(x)
return result
def apply(self, X):
"""
Args:
X: numpy 2d array
Instance-features matrix
Returns: numpy int array
Array of leaf indices, corresponding to classified instances
"""
def apply_one(x):
current_node = self.root()
while not self.is_leaf(current_node):
current_split = self._splits[current_node]
if x[current_split.feature_id] < current_split.value:
current_node = self.left_child(current_node)
else:
current_node = self.right_child(current_node)
return current_node
sample_size, features_count = X.shape
assert features_count == self._n_features
result = np.zeros(sample_size)
for i in range(sample_size):
x = X[i]
result[i] = apply_one(x)
return result
def root(self):
"""
:return: Id of the root node
"""
return 0
def depth(self, node_id):
assert node_id >= 0 and node_id <= self._latest_used_node_id
return np.floor(np.log2(node_id + 1)) + 1
def nodes_at_level(self, level, kind='all'):
"""
Args:
level: Depth level in the tree, starting from 1 for the root node.
kind: 'all', 'internal_nodes' or 'leaves'
Returns:
List of node ids at the specified level.
"""
assert kind in ['all', 'internal_nodes', 'leaves']
result = []
for node_id in range(2 ** (level - 1) - 1, min(2 ** level - 1, self._latest_used_node_id + 1)):
if kind == 'all':
result.append(node_id)
elif kind == 'internal_nodes':
if self._is_node[node_id]:
result.append(node_id)
else:
if self._is_leaf[node_id]:
result.append(node_id)
return result
def _grow_list(self, list, required_capacity, fill_value=None):
"""
Returns a list that is at least as long as required_capacity, filling the missing elements with
fill_value if needed.
If the length of the list is already greater than required_capacity, returns unmodified list.
:param list:
:param required_capacity:
:param fill_value:
:return:
"""
if len(list) >= required_capacity:
return list
return list + [fill_value for _ in range(required_capacity - len(list))]
| mit | 2,368,062,335,330,216,400 | 34.947137 | 103 | 0.55098 | false |
philouc/pyhrf | python/pyhrf/test/test_glm.py | 1 | 3214 | import unittest
import pyhrf
import os.path as op
import shutil
class NipyGLMTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = pyhrf.get_tmp_path()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
# def _simulate_bold(self):
# boldf, tr, paradigmf, maskf = simulate_bold(output_dir=self.tmp_dir)
# glm_nipy_from_files(boldf, tr, paradigmf, output_dir=output_dir,
# hcut=0, drift_model='Blank', mask_file=maskf)
def test_glm_default_real_data(self):
from pyhrf import FmriData
from pyhrf.glm import glm_nipy
#pyhrf.verbose.set_verbosity(3)
fdata = FmriData.from_vol_ui()
# print 'fdata:'
# print fdata.getSummary()
glm_nipy(fdata)
def test_glm_contrasts(self):
from pyhrf import FmriData
from pyhrf.glm import glm_nipy
cons = {'audio-video': 'audio - video',
'video-audio': 'video - audio',
}
#pyhrf.verbose.set_verbosity(3)
fdata = FmriData.from_vol_ui()
# print 'fdata:'
# print fdata.getSummary()
g, dm, cons = glm_nipy(fdata, contrasts=cons)
def test_glm_with_files(self):
#pyhrf.verbose.set_verbosity(1)
output_dir = self.tmp_dir
bold_name = 'subj0_bold_session0.nii.gz'
bold_file = pyhrf.get_data_file_name(bold_name)
tr = 2.4
paradigm_name = 'paradigm_loc_av.csv'
paradigm_file = pyhrf.get_data_file_name(paradigm_name)
mask_name = 'subj0_parcellation.nii.gz'
mask_file = pyhrf.get_data_file_name(mask_name)
from pyhrf.glm import glm_nipy_from_files
glm_nipy_from_files(bold_file, tr, paradigm_file, output_dir,
mask_file)
self.assertTrue(op.exists(output_dir))
def test_fir_glm(self):
from pyhrf import FmriData
from pyhrf.glm import glm_nipy
#pyhrf.verbose.set_verbosity(3)
fdata = FmriData.from_vol_ui()
# print 'fdata:'
# print fdata.getSummary()
glm_nipy(fdata, hrf_model='FIR', fir_delays=range(10))
def makeQuietOutputs(self, xmlFile):
from pyhrf import xmlio
from pyhrf.xmlio.xmlnumpy import NumpyXMLHandler
t = xmlio.fromXML(file(xmlFile).read())
t.set_init_param('output_dir', None)
f = open(xmlFile, 'w')
f.write(xmlio.toXML(t, handler=NumpyXMLHandler()))
f.close()
def test_command_line(self):
cfg_file = op.join(self.tmp_dir, 'glm.xml')
cmd = 'pyhrf_glm_buildcfg -o %s' %(cfg_file)
import os
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
self.makeQuietOutputs(cfg_file)
cmd = 'pyhrf_glm_estim -c %s' %cfg_file
if os.system(cmd) != 0 :
raise Exception('"' + cmd + '" did not execute correctly')
def test_suite():
tests = [unittest.makeSuite(NipyGLMTest)]
return unittest.TestSuite(tests)
if __name__== '__main__':
#unittest.main(argv=['pyhrf.test_glm'])
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test_suite())
| gpl-3.0 | -4,689,871,972,983,716,000 | 27.192982 | 78 | 0.589919 | false |
florian-f/sklearn | examples/svm/plot_svm_margin.py | 4 | 2295 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the seperation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distrubution, and will only consider points close to line
of seperation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penality in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penality)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
pl.figure(fignum, figsize=(4, 3))
pl.clf()
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=pl.cm.Paired)
pl.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
pl.figure(fignum, figsize=(4, 3))
pl.pcolormesh(XX, YY, Z, cmap=pl.cm.Paired)
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
fignum = fignum + 1
pl.show()
| bsd-3-clause | -3,608,698,199,744,564,700 | 25.37931 | 76 | 0.582571 | false |
Marcello-Sega/pytim | setup.py | 1 | 5678 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""A python based tool for interfacial molecules analysis
"""
# To use a consistent encoding
import codecs
import os
import sys
# Always prefer setuptools over distutils
try:
from setuptools import find_packages
from Cython.Distutils import build_ext
import numpy
except ImportError as mod_error:
mod_name = mod_error.message.split()[3]
sys.stderr.write("Error : " + mod_name + " is not installed\n"
"Use pip install " + mod_name + "\n")
exit(100)
from setuptools import setup
from setuptools.command.test import test as TestCommand
from distutils.extension import Extension
class NoseTestCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Run nose ensuring that argv simulates running nosetests directly
import nose
nose.run_exit(argv=['nosetests'])
pytim_dbscan = Extension(
"pytim_dbscan", ["pytim/dbscan_inner.pyx"],
language="c++",
include_dirs=[numpy.get_include()])
circumradius = Extension(
"circumradius", ["pytim/circumradius.pyx"],
language="c++",
include_dirs=[numpy.get_include()])
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# This fixes the default architecture flags of Apple's python
if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
os.environ['ARCHFLAGS'] = ''
# Get version from the file version.py
version = {}
with open("pytim/version.py") as fp:
exec(fp.read(), version)
setup(
name='pytim',
ext_modules=[pytim_dbscan, circumradius],
cmdclass={
'build_ext': build_ext,
'test': NoseTestCommand
},
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version['__version__'],
description='Python Tool for Interfacial Molecules Analysis',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Marcello-Sega/pytim',
# Author details
author='Marcello Sega, Balazs Fabian, Gyorgy Hantal, Pal Jedlovszky',
author_email='[email protected]',
# Choose your license
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='molecular simuations analysis ',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'MDAnalysis>=1.0.0', 'PyWavelets>=0.5.2', 'numpy>=1.16',
'scipy>=1.1', 'scikit-image>=0.14.2', 'cython>=0.24.1',
'sphinx>=1.4.3', 'matplotlib', 'pytest', 'dask>=1.1.1'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
tests_require=['nose>=1.3.7', 'coverage'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'pytim': ['data/*'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
## data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| gpl-3.0 | 2,636,920,363,843,867,600 | 34.710692 | 94 | 0.661853 | false |
Sravan2j/DIGITS | tools/test_create_db.py | 1 | 4429 | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import tempfile
import shutil
from cStringIO import StringIO
from nose.tools import raises, assert_raises
import mock
import unittest
import PIL.Image
import numpy as np
from . import create_db as _
class TestInit():
@classmethod
def setUpClass(cls):
cls.db_name = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.db_name)
except OSError:
pass
@raises(ValueError)
def test_bad_backend(self):
"""invalid db backend"""
_.DbCreator(self.db_name, 'not-a-backend')
class TestCreate():
@classmethod
def setUpClass(cls):
cls.db_name = tempfile.mkdtemp()
cls.db = _.DbCreator(cls.db_name, 'leveldb')
fd, cls.input_file = tempfile.mkstemp()
os.close(fd)
# Use the example picture to construct a test input file
with open(cls.input_file, 'w') as f:
f.write('digits/static/images/mona_lisa.jpg 0')
@classmethod
def tearDownClass(cls):
os.remove(cls.input_file)
try:
shutil.rmtree(cls.db_name)
except OSError:
pass
def test_create_no_input_file(self):
"""create with no image input file"""
assert not self.db.create('', width=0, height=0), 'database should not allow empty input file'
def test_create_bad_height_width(self):
"""create with bad height and width for images"""
assert not self.db.create(
self.input_file,
width=-1,
height=-1,
resize_mode='crop'), 'database should not allow height == width == -1'
def test_create_bad_channel_count(self):
"""create with bad channel count"""
assert not self.db.create(
self.input_file,
width=200,
height=200,
channels=0,
resize_mode='crop'), 'database should not allow channels == 0'
def test_create_bad_resize_mode(self):
"""create with bad resize mode"""
assert not self.db.create(
self.input_file,
width=200,
height=200,
resize_mode='slurp'), 'database should not allow bad resize mode slurp'
def test_create_bad_image_folder(self):
"""create with bad image folder path"""
assert not self.db.create(
self.input_file,
width=200,
height=200,
resize_mode='crop',
image_folder='/clearly/a/wrong/folder'), 'database should not allow bad image folder'
def test_create_normal(self):
assert self.db.create(
self.input_file,
width=200,
height=200,
resize_mode='crop'), 'database should complete building normally'
class TestPathToDatum():
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
cls.db_name = tempfile.mkdtemp(dir=cls.tmpdir)
cls.db = _.DbCreator(cls.db_name, 'lmdb')
_handle, cls.image_path = tempfile.mkstemp(dir=cls.tmpdir, suffix='.jpg')
with open(cls.image_path, 'w') as outfile:
PIL.Image.fromarray(np.zeros((10,10,3),dtype=np.uint8)).save(outfile, format='JPEG', quality=100)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except OSError:
pass
def test_configs(self):
"""path_to_datum"""
self.db.height = 10
self.db.width = 10
self.db.resize_mode = 'squash'
self.db.image_folder = None
for e in ['none', 'png', 'jpg']:
for c in [1, 3]:
for m in [True, False]:
yield self.check_configs, (e, c, m)
def check_configs(self, args):
e, c, m = args
self.db.encoding = e
self.db.channels = c
self.db.compute_mean = m
image_sum = self.db.initial_image_sum()
d = self.db.path_to_datum(self.image_path, 0, image_sum)
assert (d.channels, d.height, d.width) == (self.db.channels, self.db.height, self.db.width), 'wrong datum shape'
if e == 'none':
assert not d.encoded, 'datum should not be encoded when encoding="%s"' % e
else:
assert d.encoded, 'datum should be encoded when encoding="%s"' % e
class TestSaveMean():
pass
| bsd-3-clause | 7,553,932,435,437,362,000 | 29.972028 | 120 | 0.582298 | false |
arizona-phonological-imaging-lab/Autotrace | under-development/a3/roi.py | 2 | 5643 | #!/usr/bin/env python3
from __future__ import division
import numpy as np
import json
class ROI(object):
""" Region of Interest for a set of images
Attributes:
shape (tuple of numeric): the height and width of the ROI
offset (tuple of numeric): the lower bounds of the ROI
extent (tuple of numeric): the upper bounds of the ROI
offset[dim] + shape[dim] should always == extent[dim]
orthodox (tuple of bool): whether the ROI is indexed "normally"
I.e. if the ROI is measured from the top/left
If measured from the bottom-left: (False, True)
slice (tuple of slice): can be used to slice into a 2d matrix
>>> np.identity(5)[ROI(2,3,1,4).slice]
array([[ 0., 1., 0.]])
"""
def __init__(self,*args,**kwargs):
"""
Multiple possible ways of declaring an ROI are supported.
The first way is by specifying the bounds as positional args
Args:
top (numeric): the top of the region of interest
bottom (numeric): the bottom of the region of interest
left (numeric): the left edge of the region of interest
right (numeric): the right edge of the region of interest
Example:
>>> ROI(1,2,3,4)
ROI(1.0, 2.0, 3.0, 4.0)
The second way is by specifying a single iterable object
Example:
>>> ROI(1,2,3,4) == ROI([1,2,3,4])
True
Regardless of the constructor format used, the order should
always be: top, bottom, left, right
This allows for symantic interpretation of the arguments.
ROI is smart enough to deal with indexing from other edges
Example:
>>> ROI(2,1,4,3).slice
(slice(1.0, 2.0, None), slice(3.0, 4.0, None))
>>> ROI(2,1,4,3).top
2.0
"""
if len(args) == 4:
roi = (args[0],args[1],args[2],args[3])
elif len(args) == 1:
roi = args [0]
(top, bottom, left, right) = [float(x) for x in roi]
self.orthodox = (top<bottom, left<right)
self.shape = (abs(top-bottom), abs(left-right))
self.offset = (min(top,bottom), min(left,right))
self.extent = (max(top,bottom), max(left,right))
self.slice = (slice(self.offset[0],self.extent[0]),
slice(self.offset[1],self.extent[1]))
@property
def top(self):
"""Convenience property for the top of the ROI
For an orthodox ROI, this is the same as offset[0]
For an ROI unorthodox in the Y dimension, this is extent[0]
"""
return self.offset[0] if self.orthodox[0] else self.extent[0]
@property
def bottom(self):
"""Convenience property for the bottom of the ROI
For an orthodox ROI, this is the same as extent[0]
For an ROI unorthodox in the Y dimension, this is offset[0]
"""
return self.extent[0] if self.orthodox[0] else self.offset[0]
@property
def left(self):
"""Convenience property for the left of the ROI
For an orthodox ROI, this is the same as offset[1]
For an ROI unorthodox in the X dimension, this is extent[1]
"""
return self.offset[1] if self.orthodox[1] else self.extent[1]
@property
def right(self):
"""Convenience property for the right of the ROI
For an orthodox ROI, this is the same as extent[1]
For an ROI unorthodox in the X dimension, this is offset[1]
"""
return self.extent[1] if self.orthodox[1] else self.offset[1]
@property
def height(self):
"""Convenience property for the height of the ROI
This is the same as shape[0]
"""
return self.shape[0]
@property
def width(self):
"""Convenience property for the width of the ROI
This is the same as shape[1]
"""
return self.shape[1]
def __repr__(self):
return 'ROI(%s, %s, %s, %s)' % tuple(self)
def __eq__(self,other):
return repr(self) == repr(other)
def __iter__(self):
"""Iterate over ROI bounds
Yields:
numeric: top, bottom, left, right (strictly ordered)
"""
return (x for x in (self.top,self.bottom,self.left,self.right))
def domain(self,N):
"""Returns a numpy array of N equally-spaced x values in the ROI
Args:
N (integer): number of points to create
Returns:
numpy array: N evenly-spaced points, from offset[1] to
extent[1] (includes neither offset[1] nor extent[1])
The dtype should be float32
Example:
>>> ROI(x,y,10,20).domain(3)
array([12.5,15.,17.5])
"""
step = self.shape[1] / (N + 1)
return np.arange(self.offset[1] + step, self.extent[1], step)
def json(self):
"""json stringify the ROI"""
j = {
'srcY': self.offset[0],
'destY': self.shape[0],
'srcX': self.offset[1],
'destX': self.shape[1],
}
return json.dumps(j)
def scale(self,factor):
"""Create a scaled version of the current ROI.
Args:
factor (numeric): the factor by which to scale.
Returns:
ROI: the scaled ROI
Example:
>>> ROI(1,2,3,4).scale(2.5)
ROI(2.5, 5.0, 7.5, 10.0)
"""
return ROI(np.array(tuple(self))*factor)
| mit | 4,787,143,582,170,665,000 | 34.049689 | 72 | 0.550771 | false |
dirmeier/dataframe | tests/test_cases.py | 1 | 2832 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = '[email protected]'
import pytest
import unittest
import dataframe
import scipy.stats as sps
from dataframe import group, modify, subset, aggregate
from sklearn import datasets
import re
from dataframe import Callable
from statistics import mean
class Mean(Callable):
def __call__(self, *args):
vals = args[0].values
return mean(vals)
class Zscore(Callable):
def __call__(self, *args):
vals = args[0].values
return sps.zscore(vals).tolist()
iris_data = datasets.load_iris()
features = [re.sub("\s|cm|\(|\)", "", x) for x in
iris_data.feature_names]
data = {features[i]: iris_data.data[:, i] for i in
range(len(iris_data.data[1, :]))}
data["target"] = iris_data.target
frame = dataframe.DataFrame(**data) >> group("target")
print(frame)
# k = frame >> group("target")
# print(k)
#
# k = frame >> group("target") >> group("petalwidth")
# print(k)
#
# k = group(frame, "target")
# print(k)
#
# k = aggregate(frame, Mean, "mean", "petalwidth")
# print(k)
#
# k = frame >> aggregate(Mean, "mean", "petalwidth")
# print(k)
#
# k = frame >> group("target") >> aggregate(Mean, "mean", "petalwidth")
# print(k)
#
# k = frame >> group("target") >> modify(Zscore, "zscore", "petalwidth")
# print(k)
#
# k = group(frame, "target") >> modify(Zscore, "zscore", "petalwidth")
# print(k)
#
# k = modify(frame, Zscore, "zscore", "petalwidth")
# print(k)
#
# k = frame >> modify(Zscore, "zscore", "petalwidth")
# print(k)
#
# k = frame >> modify(Zscore, "zscore", "petalwidth") >> subset("zscore")
# print(k)
# print(k.ncol)
# k = frame >> subset("petalwidth")
# print(k)
#
# k = frame >> modify(Zscore, "zscore", "petalwidth") >> group("target") >> \
# aggregate(Mean, "mz", "zscore")
# print(k)
# k = frame >> \
# group("target") >> \
# modify(Zscore, "z", "petalwidth") >> \
# subset("z") >> \
# aggregate(Mean, "m", "z")
#
# print(k)
# frame = dataframe.DataFrame(**data)
# k = frame.aggregate(Mean, "mean", "petallength")
#
# print(k) | gpl-3.0 | -3,136,135,424,214,697,500 | 24.754545 | 77 | 0.647952 | false |
DavideCanton/Python3 | pyIAprove/labyrinth.py | 1 | 12404 | from math import sqrt
import numpy as np
from collections import defaultdict, namedtuple
from PIL import Image
DIRS = U, L, D, R, UL, UR, DL, DR = [(0, -1), (-1, 0), (0, 1), (1, 0), (-1, -1),
(1, -1), (-1, 1), (1, 1)]
def dist_2(p1, p2):
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return sqrt(dx * dx + dy * dy)
def vsum(x, y):
return tuple([a + b for a, b in zip(x, y)])
class NeighboursGenerator:
def __init__(self, labyrinth):
self.labyrinth = labyrinth
def _U(self, x, y):
return y > 0 and self.labyrinth[x, y - 1] == 1
def _D(self, x, y):
return y < self.labyrinth.h - 1 and self.labyrinth[x, y + 1] == 1
def _L(self, x, y):
return x > 0 and self.labyrinth[x - 1, y] == 1
def _R(self, x, y):
return x < self.labyrinth.w - 1 and self.labyrinth[x + 1, y] == 1
def __call__(self, n, dir=None):
x, y = n
if self._U(x, y):
yield vsum(n, U), 1
if self._D(x, y):
yield vsum(n, D), 1
if self._L(x, y):
yield vsum(n, L), 1
if self._R(x, y):
yield vsum(n, R), 1
SQRT_2 = sqrt(2)
MAX_ALIVE = 10000
class NeighboursGeneratorDiag(NeighboursGenerator):
def __init__(self, labyrinth):
NeighboursGenerator.__init__(self, labyrinth)
def __call__(self, n, dir=None):
x, y = n
if self._U(x, y):
yield vsum(n, U), 1
if self._D(x, y):
yield vsum(n, D), 1
if self._L(x, y):
yield vsum(n, L), 1
if self._R(x, y):
yield vsum(n, R), 1
if (self._U(x, y) and self._L(x, y - 1) or
self._L(x, y) and self._U(x - 1, y)):
yield vsum(n, UL), SQRT_2
if (self._U(x, y) and self._R(x, y - 1) or
self._R(x, y) and self._U(x + 1, y)):
yield vsum(n, UR), SQRT_2
if (self._D(x, y) and self._L(x, y + 1) or
self._L(x, y) and self._D(x - 1, y)):
yield vsum(n, DL), SQRT_2
if (self._D(x, y) and self._R(x, y + 1) or
self._R(x, y) and self._D(x + 1, y)):
yield vsum(n, DR), SQRT_2
class NeighborsGeneratorPruning(NeighboursGeneratorDiag):
def __init__(self, labyrinth):
NeighboursGeneratorDiag.__init__(self, labyrinth)
def __call__(self, current, parent=None):
neighbors = NeighboursGeneratorDiag.__call__(self, current)
if parent is None:
yield from neighbors
else:
current = np.array(current)
neighbors = [np.array(n[0]) for n in neighbors]
parent = np.array(parent)
move = current - parent
move = normalize(move)
if move.all(): # se nessuno e' 0 allora e' una mossa diagonale
neighbors = self._pruneDiag(neighbors, current, move)
else:
neighbors = self._pruneStraight(neighbors, current, move)
act_neighbors = []
for n in neighbors:
print("Called jump from", current, "towards", n - current)
n = self._jump(current, n - current, self.labyrinth.goal)
print("Returned", n)
if n is not None:
t = tuple(int(x) for x in n)
act_neighbors.append((t, dist_2(current, n)))
yield from act_neighbors
def compute_forcedStraight(self, n, move):
pruned = []
for direct in orthogonal(move):
dirt = n + direct
if dirt in self.labyrinth and self.labyrinth[dirt] == 0:
pruned.append(dirt + move)
return pruned
def compute_forcedDiag(self, parent, move):
pruned = []
for c in components(move):
ob = parent + c
if ob in self.labyrinth and self.labyrinth[ob] == 0:
pruned.append(ob + c)
return pruned
def _pruneStraight(self, neighbors, n, move):
pruned = [n + move]
pruned.extend(self.compute_forcedStraight(n, move))
return [p for p in pruned if
any(np.array_equal(p, x) for x in neighbors)]
def _pruneDiag(self, neighbors, n, move):
pruned = [n + d for d in components(move)]
# if all(self.labyrinth[x] == 1 for x in pruned):
pruned.append(n + move)
parent = n - move
pruned.extend(self.compute_forcedDiag(parent, move))
return [p for p in pruned if
any(np.array_equal(p, x) for x in neighbors)]
def _jump(self, current, direction, goal):
next = current + direction
if not self.labyrinth[next] or next not in self.labyrinth:
return None
if np.array_equal(next, goal):
return next
isDiag = direction.all()
if isDiag:
if all(not self.labyrinth[current + dirs]
for dirs in components(direction)):
return None
forced = self.compute_forcedDiag(current, direction)
else:
forced = self.compute_forcedStraight(next, direction)
if any(self.labyrinth[f] for f in forced):
return next
if isDiag:
for dirt in components(direction):
if self._jump(next, dirt, goal) is not None:
return next
return self._jump(next, direction, goal)
def _jumpi(self, current, direction, goal):
retval = None
stack = [Snapshot(current, direction, goal, None, None, 0)]
while stack:
el = stack.pop()
if el.stage == 0:
next = el.current + el.direction
if not self.labyrinth[next] or next not in self.labyrinth:
retval = None
continue
if np.array_equal(next, el.goal):
retval = next
continue
isDiag = el.direction.all()
if isDiag:
if all(not self.labyrinth[el.current + dirs]
for dirs in components(direction)):
retval = None
continue
forced = self.compute_forcedDiag(el.current, el.direction)
else:
forced = self.compute_forcedStraight(next, el.direction)
if any(self.labyrinth[f] for f in forced):
retval = next
continue
if isDiag:
el.stage = 1
el.next = next
stack.append(el)
dirs = list(components(direction))
el.dirs = dirs
snapshot = Snapshot(next, dirs[0], el.goal, next, dirs, 0)
stack.append(snapshot)
continue
else:
snapshot = Snapshot(next, el.direction, el.goal, None, None,
0)
stack.append(snapshot)
continue
elif el.stage == 1:
r1 = retval
if r1 is not None:
retval = el.next
continue
el.stage = 2
stack.append(el)
snapshot = Snapshot(el.next, el.dirs[1], el.goal, el.next,
el.dirs, 0)
stack.append(snapshot)
continue
elif el.stage == 2:
r2 = retval
if r2 is not None:
retval = el.next
continue
snapshot = Snapshot(el.next, el.direction, el.goal, None, None,
0)
stack.append(snapshot)
continue
return retval
def _jumpi2(self, current, direction, goal):
stack = [(current, direction, goal)]
while stack:
current, direction, goal = stack.pop()
next = current + direction
if not self.labyrinth[next] or next not in self.labyrinth:
return None
if np.array_equal(next, goal):
return next # assuming n cannot be None
isDiag = direction.all()
if isDiag:
if all(not self.labyrinth[current + dirs]
for dirs in components(direction)):
return None
forced = self.compute_forcedDiag(current, direction)
else:
forced = self.compute_forcedStraight(next, direction)
if any(self.labyrinth[f] for f in forced):
return next
if isDiag:
stack.extend((next, di, goal)
for di in components(direction))
else:
stack.append((next, direction, goal))
class Snapshot:
def __init__(self, current, direction, goal, next, dirs, stage):
self.current = current
self.direction = direction
self.goal = goal
self.next = next
self.dirs = dirs
self.stage = stage
def __str__(self):
return str(self.__dict__)
class Labyrinth:
def __init__(self, w, h):
self.labyrinth = defaultdict(int)
self.w = w
self.h = h
self.start = None
self.goal = None
def __getitem__(self, item):
return self.labyrinth[tuple(item)]
def __contains__(self, pos):
return 0 <= pos[0] < self.w and 0 <= pos[1] < self.h
def __setitem__(self, key, value):
self.labyrinth[tuple(key)] = value
def orthogonal(move):
move = move.copy()
move[[0, 1]] = move[[1, 0]]
yield move
yield -move
def components(move, vert=True):
move = move.copy()
indexes = (1, 0) if vert else (0, 1)
for ind in indexes:
d1 = move.copy()
d1[ind] = 0
yield d1
def normalize(move):
f = move[0] if move[0] else move[1]
return move / abs(f)
def load_from_img(imgpath):
im = Image.open(imgpath)
pix = im.load()
h, w = im.size
labyrinth = Labyrinth(w, h)
for i in range(w):
for j in range(h):
# avoid alpha
pixel = pix[j, i][:3]
if pixel == (255, 255, 255):
labyrinth[i, j] = 1
elif pixel == (255, 0, 0):
labyrinth[i, j] = 1
labyrinth.start = i, j
elif pixel == (0, 255, 0):
labyrinth[i, j] = 1
labyrinth.goal = i, j
return labyrinth, im
def load_from_map_file(filepath):
i, w, h = 0, 0, 0
map_started = False
with open(filepath) as map_file:
for line in map_file:
if line.startswith("height"):
w = int(line.split()[1])
elif line.startswith("width"):
h = int(line.split()[1])
elif line.startswith("map"):
labyrinth = Labyrinth(w, h)
map_started = True
elif map_started:
for j, c in enumerate(line):
if c in ".G":
labyrinth[i, j] = 1
elif c == "X":
labyrinth[i, j] = 1
labyrinth.start = ((i, j))
elif c == "Y":
labyrinth[i, j] = 1
labyrinth.goal = ((i, j))
else:
labyrinth[i, j] = 0
i += 1
im = lab_to_im(labyrinth)
return labyrinth, im
def lab_to_im(labyrinth):
im = Image.new("RGB", (labyrinth.h, labyrinth.w))
pix = im.load()
for i in range(labyrinth.w):
for j in range(labyrinth.h):
v = labyrinth[i, j]
pix[j, i] = (v * 255, v * 255, v * 255)
start = labyrinth.start
pix[start[1], start[0]] = (255, 0, 0)
goal = labyrinth.goal
pix[goal[1], goal[0]] = (0, 255, 0)
return im
if __name__ == "__main__":
imgpath = r"D:\labyrinth\lab4.bmp"
# imgpath = r"D:\labyrinth\map\arena.map"
print("Reading labyrinth from {}...".format(imgpath))
labyrinth, _ = load_from_img(imgpath)
print("Read")
gen = NeighborsGeneratorPruning(labyrinth)
for g in gen((2, 17), parent=(1, 16)):
print(g)
| gpl-3.0 | -3,098,687,144,941,617,000 | 31.814815 | 80 | 0.484844 | false |
J4sp3r/damrobot | Project/lib/util.py | 1 | 1474 | #!/usr/local/bin/python
import cv2,os
import numpy as np
import matplotlib.pyplot as plt
from lib import log
def imshow(img):
cv2.namedWindow("preview")
cv2.imshow("preview",img)
rval = True
while rval:
key = cv2.waitKey(27)
if key == 27: # exit on ESC
break
cv2.destroyWindow("preview")
def imshow2(img):
plt.subplot(111),plt.imshow(img),plt.title('Output')
plt.show()
def imgresize(img,w,h):
return cv2.resize(img,(w, h), interpolation = cv2.INTER_CUBIC)
def getpos(img,ratio):
width = img.shape[0]
width2 = int(width*ratio)
height = img.shape[1]
height2 = int(height*ratio)
pts1 = np.float32([[(width-width2)*0.5-1,(height-height2)*0.5-1],[(width-width2)*0.5+width2-1,(height-height2)*0.5-1],[(width-width2)*0.5-1,(height-height2)*0.5+height2-1],[(width-width2)*0.5+width2-1,(height-height2)*0.5+height2-1]])
pts2 = np.float32([[0,0],[width2-1,0],[0,height2-1],[width2-1,height2-1]])
retval = cv2.getPerspectiveTransform(pts1,pts2)
warp = cv2.warpPerspective(img,retval,(width2,height2))
return warp
def board2file(board,file):
f = open(file,'w')
for x in range(8):
rule = ""
first = True
for y in range(8):
if first:
first = False
else:
rule += ","
rule += str(board[x][y])
if x < 7:
rule += "\n"
f.write(rule)
f.close()
def state(path,state):
f = open(path + "\\files\\state.txt",'w')
f.write(state)
f.close()
def newboard(path):
f = open(path + "\\files\\newbord.txt",'w')
f.write("true")
f.close() | mit | -8,841,574,868,602,686,000 | 24 | 235 | 0.656716 | false |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 77