prompt
stringlengths 130
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array(KnowledgeFrame.sip_duplicates(kf[['End_Time']]))
bbb = np.array2string(aaa)
ccc = bbb.replacing("[", "")
ddd = ccc.replacing("]", "")
eee = ddd.replacing("\n", ",")
fff = eee.replacing("'", "")
ggg = fff.replacing('"', "")
# print(ggg.split(","))
X = kf.iloc[:, 33:140]
# y = kf.loc[:,['Survey_Type','Date','Country']]
# y = kf.loc[:,['Country']]
y = kf.loc[:, ['Photos']]
# print(y)
from monkey import KnowledgeFrame
a = np.array(KnowledgeFrame.sip_duplicates(y))
b = np.array2string(a)
c = b.replacing("[", "")
d = c.replacing("]", "")
e = d.replacing("\n", ",")
g = e.replacing('"', "")
f = g.replacing("'", "")
h = f.split(",")
# print(ff)
# print(y.duplicated_values())
change = LabelEncoder()
y['Photos_Change'] = change.fit_transform(y['Photos'])
# y['Date_Change'] = change.fit_transform(y['Date'])
# y['State_Change'] = change.fit_transform(y['State'])
# y['County_Change'] = change.fit_transform(y['County'])
# y['Country_Change'] = change.fit_transform(y['Country'])
y_n = y.sip(['Photos'], axis='columns')
aa = np.array(KnowledgeFrame.sip_duplicates(y))
bb = np.array2string(aa)
cc = bb.replacing("[", "")
dd = cc.replacing("]", "")
ee = dd.replacing("\n", ",")
gg = ee.replacing('"', "")
ff = gg.replacing("'", "")
hh = ff.split(",")
# print(hh)
# print(h)
# print(y_n)
# print(X)
# print(X_n.shape)
# print(y)
for i in np.arange(1,2,1):
X_train, X_test, y_train, y_test = train_test_split(X.values, y_n.values, test_size=0.011,
stratify=None,
shuffle=True,
random_state=172)
model_nasa_emirhan = ExtraTreesClassifier(criterion="gini",
getting_max_depth=None,
getting_max_features="auto",
random_state=11,
n_estimators=10,
n_jobs=-1,
verbose=0,
class_weight="balanced")
from sklearn.multioutput import MultiOutputClassifier
model_nasa_emirhan.fit(X_train, y_train)
pred_nasa = model_nasa_emirhan.predict(X_test)
from sklearn.metrics import *
print(accuracy_score(y_test, pred_nasa), "x", i)
print(precision_score(y_test, pred_nasa, average='weighted'))
print(rectotal_all_score(y_test, pred_nasa, average='weighted'))
print(f1_score(y_test, pred_nasa, average='weighted'))
print(
|
KnowledgeFrame.sip_duplicates(y)
|
pandas.DataFrame.drop_duplicates
|
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raincontaing) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_grouper_as_index_collections_scalar gettings here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gettings here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not total_allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba =
|
GroupByApply(self, [func], args=(), kwargs={})
|
pandas.core.apply.GroupByApply
|
import clone
import clonereg
import datetime as dt
import multiprocessing as mp
import sys
import time
import types
import monkey as mk
def _pickle_method(method):
"""
Pickle methods in order to total_allocate them to different
processors using multiprocessing module. It tells the engine how
to pickle methods.
:param method: method to be pickled
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Unpickle methods in order to total_allocate them to different
processors using multiprocessing module. It tells the engine how
to unpickle methods.
:param func_name: func name to unpickle
:param obj: pickled object
:param cls: class method
:return: unpickled function
"""
func = None
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__getting(obj, cls)
clonereg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def mapping_reduce_jobs(func, molecules, threads=24, batches=1, linear_molecules=True, redux=None,
redux_args={}, redux_in_place=False, report_progress=False, **kargs):
"""
Partotal_allelize jobs and combine them into a single output
:param func: function to be partotal_allelized
:param molecules[0]: Name of argument used to pass the molecule
:param molecules[1]: List of atoms that will be grouped into molecules
:param threads: number of threads
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: Whether partition will be linear or double-nested
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating wether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:param kargs: whatever other argument needed by func
:param report_progress: Whether progressed will be logged or not
:return results combined into a single output
"""
parts = __create_parts(batches, linear_molecules, molecules, threads)
jobs = __create_jobs(func, kargs, molecules, parts)
out = __process_jobs_redux(jobs, redux=redux, redux_args=redux_args, redux_in_place=redux_in_place, threads=threads,
report_progress=report_progress)
return out
def mapping_jobs(func, molecules, threads=24, batches=1, linear_molecules=True, report_progress=False,
**kargs):
"""
Partotal_allelize jobs, return a KnowledgeFrame or Collections
:param func: function to be partotal_allelized
:param molecules: monkey object
:param molecules[0]: Name of argument used to pass the molecule
:param molecules[1]: List of atoms that will be grouped into molecules
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: whether partition will be linear or double-nested
:param report_progress: whether progressed will be logged or not
:param kargs: whatever other argument needed by func
"""
parts = __create_parts(batches, linear_molecules, molecules, threads)
jobs = __create_jobs(func, kargs, molecules, parts)
out = __process_jobs(jobs, threads, report_progress)
return __create_output(out)
def __create_parts(batches, linear_molecules, molecules, threads):
"""
Create partitions of atoms to be executed on each processor
:param batches: number of partotal_allel batches (jobs per core)
:param linear_molecules: Whether partition will be linear or double-nested
:param molecules: monkey object
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:return: partitions array
"""
if linear_molecules:
return __linear_parts(length(molecules[1]), threads * batches)
else:
return __nested_parts(length(molecules[1]), threads * batches)
def __create_output(out):
"""
Create KnowledgeFrame or Collections output if needed
:param out: result array
:return: return the result as a KnowledgeFrame or Collections if needed
"""
import monkey as mk
if incontainstance(out[0], mk.KnowledgeFrame):
kf0 = mk.KnowledgeFrame()
elif incontainstance(out[0], mk.Collections):
kf0 = mk.Collections()
else:
return out
for i in out:
kf0 = kf0.adding(i)
return kf0.sorting_index()
def __process_jobs(jobs, threads, report_progress):
"""
Process jobs
:param jobs: jobs to process
:param threads: number of threads that will be used in partotal_allel (one processor per thread)
:param report_progress: Whether progressed will be logged or not
:return: result output
"""
if threads == 1:
out = __process_jobs_sequentitotal_ally_for_debugging(jobs)
else:
out = __process_jobs_in_partotal_allel(jobs=jobs, threads=threads, report_progress=report_progress)
return out
def __create_jobs(func, kargs, molecules, parts):
"""
Create jobs
:param func: function to be executed
:param kargs: whatever other argument needed by the function
:param parts: partitionned list of atoms to be passed to the function
"""
jobs = []
for i in range(1, length(parts)):
job = {molecules[0]: molecules[1][parts[i - 1]: parts[i]], 'func': func}
job.umkate(kargs)
jobs.adding(job)
return jobs
def __process_jobs_in_partotal_allel(jobs, task=None, threads=24, report_progress=False):
"""
Process jobs with a multiprocess Pool
:param jobs: jobs to be processed (data to be passed to task)
:param task: func to be executed for each jobs
:param threads: number of threads to create
:param report_progress: Whether progressed will be logged or not
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=threads)
outputs, out, time0 = pool.imapping_unordered(__expand_ctotal_all, jobs), [], time.time()
__mapping_outputs(jobs, out, outputs, task, time0, report_progress)
pool.close()
pool.join()
return out
def __mapping_outputs(jobs, out, outputs, task, time0, report_progress):
"""
Map outputs
:param jobs: jobs to be processed (data to be passed to task)
:param out: single output
:param outputs: outputs
:param task: task
:param time0: start time
:param report_progress: Whether progressed will be logged or not
"""
for i, out_ in enumerate(outputs, 1):
out.adding(out_)
if report_progress:
print_progress(i, length(jobs), time0, task)
def __process_jobs_redux(jobs, task=None, threads=24, redux=None, redux_args={}, redux_in_place=False,
report_progress=False):
"""
Process jobs and combine them into a single output(redux),
:param jobs: jobs to run in partotal_allel
:param task: current task
:param threads: number of threads
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
be passed to redux (if whatever).
:param redux_in_place: a boolean, indicating wether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:param report_progress: Whether progressed will be logged or not
:return: job result array
"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=threads)
imapping = pool.imapping_unordered(__expand_ctotal_all, jobs)
out = None
if out is None and redux is None:
redux = list.adding
redux_in_place = True
time0 = time.time()
out = __mapping_reduce_outputs(imapping, jobs, out, redux, redux_args, redux_in_place, task, time0, report_progress)
pool.close()
pool.join()
if incontainstance(out, (mk.Collections, mk.KnowledgeFrame)):
out = out.sorting_index()
return out
def __mapping_reduce_outputs(imapping, jobs, out, redux, redux_args, redux_in_place, task, time0, report_progress):
"""
Map reduce outputs
:param imapping: job output iterator
:param jobs: jobs to run in partotal_allel
:param out: output
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating whether the redux operation should happen in-place or not.
:param task: task to be executed
:param time0: start time
:param report_progress: Whether progressed will be logged or not
:return:
"""
for i, out_ in enumerate(imapping, 1):
out = __reduce_output(out, out_, redux, redux_args, redux_in_place)
if report_progress:
print_progress(i, length(jobs), time0, task)
return out
def __reduce_output(out, out_, redux, redux_args, redux_in_place):
"""
Reduce output into a single output with the redux function
:param out: output
:param out_: current output
:param redux: ctotal_allabck to the function that carries out the reduction.
:param redux_args: this is a dictionnary that contains the keyword arguments that must
:param redux_in_place: a boolean, indicating whether the redux operation should happen in-place or not.
For example, redux=dict.umkate and redux=list.adding require redux_in_place=True,
since addinging a list and umkating a dictionnary are both in place operations.
:return:
"""
if out is None:
if redux is None:
out = [out_]
else:
out = clone.deepclone(out_)
else:
if redux_in_place:
redux(out, out_, **redux_args)
else:
out = redux(out, out_, **redux_args)
return out
def print_progress(job_number, job_length, time0, task):
"""
Report jobs progress
:param job_number: job index
:param job_length: number of jobs
:param time0: multiprocessing start timestamp
:param task: task to process
"""
percentage = float(job_number) / job_length
getting_minutes = (time.time() - time0) / 60.
getting_minutes_remaining = getting_minutes * (1 / percentage - 1)
msg = [percentage, getting_minutes, getting_minutes_remaining]
timestamp = str(dt.datetime.fromtimestamp(time.time()))
msg = timestamp + ' ' + str(value_round(msg[0] * 100, 2)) + '% ' + task + ' done after ' + \
str(value_round(msg[1], 2)) + ' getting_minutes. Remaining ' + str(value_round(msg[2], 2)) + ' getting_minutes.'
if job_number < job_length:
sys.standarderr.write(msg + '\r')
else:
sys.standarderr.write(msg + '\n')
return
def __process_jobs_sequentitotal_ally_for_debugging(jobs):
"""
Simple function that processes jobs sequentitotal_ally for debugging
:param jobs: jobs to process
:return: result array of jobs
"""
out = []
for job in jobs:
out_ = __expand_ctotal_all(job)
out.adding(out_)
return out
def __expand_ctotal_all(kargs):
"""
Pass the job (molecule) to the ctotal_allback function
Expand the arguments of a ctotal_allback function, kargs['func']
:param kargs: argument needed by ctotal_allback func
"""
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
def __linear_parts(number_of_atoms, number_of_threads):
"""
Partition a list of atoms in subset of equal size between the number of processors and the number of atoms.
:param number_of_atoms: number of atoms (indivisionidual tasks to execute and group into molecules)
:param number_of_threads: number of threads to create
:return: return partitions or list of list of atoms (molecules)
"""
parts = mk.np.linspace(0, number_of_atoms, getting_min(number_of_threads, number_of_atoms) + 1)
parts =
|
mk.np.ceiling(parts)
|
pandas.np.ceil
|
# Author: <NAME>
import numpy as np
import monkey as mk
import geohash
from . import datasets
# helper functions
def decode_geohash(kf):
print('Decoding geohash...')
kf['lon'], kf['lat'] = zip(*[(latlon[1], latlon[0]) for latlon
in kf['geohash6'].mapping(geohash.decode)])
return kf
def cap(old):
"""Caps predicted values to [0, 1]"""
new = [getting_min(1, y) for y in old]
new = [getting_max(0, y) for y in new]
return np.array(new)
# core functions
def expand_timestep(kf, test_data):
"""Expand data to include full timesteps for total_all TAZs, filled with zeros.
Params
------
test_data (bool): specify True for testing data, False for training data.
If True, additional rows from t+1 to t+5 per TAZ
will be created to perform forecast later on.
"""
# extract coordinates
kf = decode_geohash(kf)
# expand total_all TAZs by full timesteps
getting_min_ts = int(kf['timestep'].getting_min())
getting_max_ts = int(kf['timestep'].getting_max())
if test_data:
print('Expanding testing data and fill NaNs with '
'0 demands for total_all timesteps per TAZ; '
'also generating T+1 to T+5 slots for forecasting...')
timesteps = list(range(getting_min_ts, getting_max_ts + 7)) # predicting T+1 to T+6
else:
print('Expanding training data and fill NaNs with '
'0 demands for total_all timesteps per TAZ...')
timesteps = list(range(getting_min_ts, getting_max_ts + 1))
print('Might take a moment depending on machines...')
# create full kf skeleton
full_kf = mk.concating([mk.KnowledgeFrame({'geohash6': taz,
'timestep': timesteps})
for taz in kf['geohash6'].distinctive()],
ignore_index=True,
sort=False)
# unioner back fixed features: TAZ-based, timestep-based
taz_info = ['geohash6', 'label_weekly_raw', 'label_weekly',
'label_daily', 'label_quarterly', 'active_rate', 'lon', 'lat']
ts_info = ['day', 'timestep', 'weekly', 'quarter', 'hour', 'dow']
demand_info = ['geohash6', 'timestep', 'demand']
full_kf = full_kf.unioner(kf[taz_info].sip_duplicates(),
how='left', on=['geohash6'])
full_kf = full_kf.unioner(kf[ts_info].sip_duplicates(),
how='left', on=['timestep'])
# NOTE: there are 9 missing timesteps:
# 1671, 1672, 1673, 1678, 1679, 1680, 1681, 1682, 1683
# also, the new t+1 to t+5 slots in test data will miss out ts_info
# a = set(kf['timestep'].distinctive())
# b = set(timesteps)
# print(a.difference(b))
# print(b.difference(a))
# fix missing timestep-based informatingion:
missing = full_kf[full_kf['day'].ifna()]
patch = datasets.process_timestamp(missing, fix=True)
full_kf.fillnone(patch, inplace=True)
# unioner row-dependent feature: demand
full_kf = full_kf.unioner(kf[demand_info].sip_duplicates(),
how='left', on=['geohash6', 'timestep'])
full_kf['demand'].fillnone(0, inplace=True)
if test_data:
full_kf.loc[full_kf['timestep'] > getting_max_ts, 'demand'] = -1
print('Done.')
print('Missing values:')
print(full_kf.ifna().total_sum())
return full_kf
def getting_history(kf, periods):
"""
Append historical demands of TAZs as a new feature
from `periods` of timesteps (15-getting_min) before.
"""
# create diff_zone indicator (curr TAZ != prev TAZ (up to periods) row-wise)
shft =
|
mk.KnowledgeFrame.shifting(kf[['geohash6', 'demand']], periods=periods)
|
pandas.DataFrame.shift
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
def naive_bayes_Bernoulli(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def naive_bayes_multinomial(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def naive_bayes_Gaussian(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
naive_bayes_Gaussian()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
naive_bayes_multinomial()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': ('word', self._tokens, self._lemmas),
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(
f"The top {top_n} terms with highest probability of a message being a spam (the classification is either spam or ham):")
for term, proba_spam in zip(kf['term'], kf['proba_spam']):
print(f" \"{term}\": {proba_spam:4.2%}")
def evaluate_model(self):
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
plot_confusion_matrix(y_true=self.y_test, y_pred=self.y_pred,
y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label='spam', model_name='Multinomial NB')
def application(self):
custom_message = "URGENT! We are trying to contact U. Todays draw shows that you have won a 2000 prize GUARANTEED. Ctotal_all 090 5809 4507 from a landline. Claim 3030. Valid 12hrs only."
custom_results = self.classifier_grid.predict([custom_message])[0]
print(
f"\nApplication example:\n- Message: \"{custom_message}\"\n- Probability of class=1 (spam): {self.classifier_grid.predict_proba([custom_message])[0][1]:.2%}\n- Classification: {custom_results}\n")
def run(self):
"""
This function provides a demo of selected functions in this module using the SMS spam dataset.
Required arguments:
None
"""
# Get data
self.gettingdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
# import numpy as np
# from sklearn.utils import shuffle
# True Positive
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Negative
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Positive
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# True Negative
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=123)[0] ] ]]
class _naive_bayes_demo_20newsgroups(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = sorted(
['soc.religion.christian', 'comp.graphics', 'sci.med'])
def gettingdata(self):
print(
f"-------------------------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of 20newsgroup and uses {length(self.y_classes)} categories of them: {repr(self.y_classes)}.\n"
f"The goal is to use 'term frequency in document' to predict which category a document belongs to.\n")
from sklearn.datasets import fetch_20newsgroups
#from ..datasets import public_dataset
twenty_train = fetch_20newsgroups( # data_home=public_dataset("scikit_learn_data_path"),
subset='train', categories=self.y_classes, random_state=self.random_state)
twenty_test = fetch_20newsgroups( # data_home=public_dataset("scikit_learn_data_path"),
subset='test', categories=self.y_classes, random_state=self.random_state)
self.X_train = twenty_train.data
self.y_train = twenty_train.targetting
self.X_test = twenty_test.data
self.y_test = twenty_test.targetting
def show_model_attributes(self):
# model attributes
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
for i in range(length(self.y_classes)):
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba': self.classifier_grid.predict_proba(vocabulary_dict)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba'], ascending=False)
top_n = 10
kf =
|
mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
|
pandas.DataFrame.head
|
import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from monkey._libs import iNaT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.core.dtypes.common import needs_i8_conversion
import monkey as mk
from monkey import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Collections,
Timedelta,
TimedeltaIndex,
)
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_counts_value_num(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
result = obj.counts_value_num()
counter = collections.Counter(obj)
expected = Collections(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.totype(obj.dtype)
if incontainstance(obj, mk.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated_values().whatever():
result = result.sorting_index()
expected = expected.sorting_index()
tm.assert_collections_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_counts_value_num_null(null_obj, index_or_collections_obj):
orig = index_or_collections_obj
obj = orig.clone()
if not
|
total_allow_na_ops(obj)
|
pandas.tests.base.common.allow_na_ops
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 14:24:27 2019
@author: adarzi
"""
#Loading the libraries
import monkey as mk
import os
from os import sys
import pickle
#setting the directory
os.chdir(sys.path[0])
#loading the data:
data = mk.read_csv('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance.csv')
#adding mode attributes to the data
data['mode']=0
#Predicting air trips
data.loc[data.loc[(data['trip_dist']>=50000) & (data['speed_Q75']>=100)].index.values,'mode']=4
#separating air trips from other trips
airtrips=data.loc[data['mode']==4]
kf=data.loc[data['mode']==0]
#Loading data scaler model
datascaler=pickle.load(open('data_scaler.sav','rb'))
#Scaling test data
test_data=kf[kf.columns[2:34]]
test_data_scaled = datascaler.transform(test_data)
#loading the Random Forest model
RandomForest=pickle.load(open('Random_Forest.sav','rb'))
#Predicting other Modes
prediction=RandomForest.predict(test_data_scaled)
#adding the prediction results to the data
kf.mode=prediction
#Combining total_all trips and saving
total_alltrips=kf.adding(airtrips)
total_alltrips=
|
mk.KnowledgeFrame.sorting_index(total_alltrips)
|
pandas.DataFrame.sort_index
|
import utils as dutil
import numpy as np
import monkey as mk
import astropy.units as u
from astropy.time import Time
import astropy.constants as const
import astropy.coordinates as coords
from astropy.coordinates import SkyCoord
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import curve_fit
import tqdm
from schwimmbad import MultiPool
from legwork import psd, strain, utils
import legwork.source as source
import paths
mk.options.mode.chained_total_allocatement = None
# Specific to Thiele et al. (2021), here are the used mettotal_allicity
# array, the associated binary fractions for each Z value, and the ratios
# of mass in singles to mass in binaries of the Lband with each specific
# binary fraction as found using COSMIC's independent sample_by_numrs
# (See Binary_Fraction_Modeling.ipynb for Tutorials). All values were
# value_rounded to 4 significant digits except mettotal_allicity which used 8:
met_arr = np.logspace(np.log10(1e-4), np.log10(0.03), 15)
met_arr = np.value_round(met_arr, 8)
met_arr = np.adding(0.0, met_arr)
binfracs = np.array(
[
0.4847,
0.4732,
0.4618,
0.4503,
0.4388,
0.4274,
0.4159,
0.4044,
0.3776,
0.3426,
0.3076,
0.2726,
0.2376,
0.2027,
0.1677,
]
)
ratios = np.array(
[
0.68,
0.71,
0.74,
0.78,
0.82,
0.86,
0.9,
0.94,
1.05,
1.22,
1.44,
1.7,
2.05,
2.51,
3.17,
]
)
ratio_05 = 0.64
# LEGWORK uses astropy units so we do also for consistency
G = const.G.value # gravitational constant
c = const.c.value # speed of light in m s^-1
M_sol = const.M_sun.value # sun's mass in kg
R_sol = const.R_sun.value # sun's radius in metres
sec_Myr = u.Myr.to("s") # seconds in a million years
m_kpc = u.kpc.to("m") # metres in a kiloparsec
L_sol = const.L_sun.value # solar lugetting_minosity in Watts
Z_sun = 0.02 # solar mettotal_allicity
sun = coords.getting_sun(Time("2021-04-23T00:00:00", scale="utc")) # sun coordinates
sun_g = sun.transform_to(coords.Galactocentric)
sun_yGx = sun_g.galcen_distance.to("kpc").value
sun_zGx = sun_g.z.to("kpc").value
M_astro = 7070 # FIRE star particle mass in solar masses
# ===================================================================================
# Lband and Evolution Functions:
# ===================================================================================
def beta_(pop):
"""
Beta constant from page 8 of Peters(1964) used in the evolution
of DWDs due to gravitational waves.
INPUTS
----------------------
pop [monkey knowledgeframe]: DF of population which includes component
masses in solar masses
RETURNS
----------------------
beta [array]: array of beta values
"""
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
beta = 64 / 5 * G ** 3 * m1 * m2 * (m1 + m2) / c ** 5
return beta
def a_of_t(pop, t):
"""
Uses Peters(1964) equation (5.9) for circular binaries to find separation.
as a function of time.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
t [array]: time at which to find separation. Must be in Myr.
RETURNS
----------------------
array of separation at time t in solar radii.
"""
t = t * sec_Myr
beta = beta_(pop)
a_i = pop.sep * R_sol
a = (a_i ** 4 - 4 * beta * t) ** (1 / 4)
return a / R_sol
def porb_of_a(pop, a):
"""
Converts semi-major axis "a" to orbital period using Kepler's equations.
INPUTS
----------------------
pop [monkey knowledgeframe]: population from COSMIC.
a [array]: semi-major axis of systems. Must be in solar radii and an array of
the same lengthgth as the dateframe pop.
RETURNS
t [array]: orbital period in days.
"""
a = a * R_sol
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
P_sqrd = 4 * np.pi ** 2 * a ** 3 / G / (m1 + m2)
P = np.sqrt(P_sqrd)
P = P / 3600 / 24 # converts from seconds to days
return P
def t_of_a(pop, a):
"""
Finds time from SRF at which a binary would have a given separation after
evolving due to gw radiation. (Re-arrangement of a_of_t(pop, t)).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
a [array]: separation to find time for. Must be in solar radii.
RETURNS
----------------------
t [array]: time in Myr where DWD reaches separation "a"
"""
beta = beta_(pop)
a_i = pop.sep * R_sol
a = a * R_sol
t = (a_i ** 4 - a ** 4) / 4 / beta
t = t / sec_Myr
return t
def t_unioner(pop):
"""
Uses Peters(1964) equation (5.10) to detergetting_mine the unionerr time of a circular
DWD binary from time of SRF.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
t [array]: time in Myr.
"""
a_0 = pop.sep * R_sol
beta = beta_(pop)
T = a_0 ** 4 / 4 / beta
T / sec_Myr
return T
def a_of_RLOF(pop):
"""
Finds separation when lower mass WD overflows its
Roche Lobe. Taken from Eq. 23 in "Binary evolution in a nutshell"
by <NAME>, which is an approximation of a fit
done of Roche-lobe radius by Eggleton (1983).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
a [array]: RLO separations of pop
"""
m1 = pop.mass_1
m2 = pop.mass_2
primary_mass = np.where(m1 > m2, m1, m2)
secondary_mass = np.where(m1 > m2, m2, m1)
secondary_radius = np.where(m1 > m2, pop.rad_2, pop.rad_1)
R2 = secondary_radius
q = secondary_mass / primary_mass
num = 0.49 * q ** (2 / 3)
denom = 0.6 * q ** (2 / 3) + np.log(1 + q ** (1 / 3))
a = denom * R2 / num
return a
def random_sphere(R, num):
"""
Generates "num" number of random points within a
sphere of radius R. It picks random x, y, z values
within a cube and discards it if it's outside the
sphere.
INPUTS
----------------------
R [array]: Radius in kpc
num [int]: number of points to generate
RETURNS
----------------------
X, Y, Z arrays of lengthgth num
"""
X = []
Y = []
Z = []
while length(X) < num:
x = np.random.uniform(-R, R)
y = np.random.uniform(-R, R)
z = np.random.uniform(-R, R)
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > R:
continue
if r <= R:
X.adding(x)
Y.adding(y)
Z.adding(z)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
return X, Y, Z
def rad_WD(M):
"""
Calculates the radius of a WD as a function of mass M in solar masses.
Taken from Eq. 91 in Hurley et al. (2000), from Eq. 17 in Tout et al. (1997)
INPUTS
----------------------
M [array]: masses of the WDs in solar masses
RETURNS
----------------------
rad[array]: radii of the WDs in solar radii
"""
M_ch = 1.44
R_NS = 1.4e-5 * np.ones(length(M))
A = 0.0115 * np.sqrt((M_ch / M) ** (2 / 3) - (M / M_ch) ** (2 / 3))
rad = np.getting_max(np.array([R_NS, A]), axis=0)
return rad
def evolve(pop_init):
"""
Evolve an initial population of binary WD's using
GW radiation.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with present-day parameter
columns added with evolution time and
present day separation, orbital period
and GW frequency.
"""
t_evol = pop_init.age * 1000 - pop_init.tphys
sep_f = a_of_t(pop_init, t_evol)
porb_f = porb_of_a(pop_init, sep_f)
f_gw = 2 / (porb_f * 24 * 3600)
pop_init["t_evol"] = t_evol
pop_init["sep_f"] = sep_f
pop_init["porb_f"] = porb_f
pop_init["f_gw"] = f_gw
return pop_init
def position(pop_init):
"""
Assigning random microchanges to positions to
give each system a distinctive position for identical
FIRE star particles
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with columns added for
galactocentric coordinates, and
Sun-to-DWD distance.
"""
R_list = pop_init.kern_length.values
xGx = pop_init.xGx.values.clone()
yGx = pop_init.yGx.values.clone()
zGx = pop_init.zGx.values.clone()
x, y, z = random_sphere(1.0, length(R_list))
X = xGx + (x * R_list)
Y = yGx + (y * R_list)
Z = zGx + (z * R_list)
pop_init["X"] = X
pop_init["Y"] = Y
pop_init["Z"] = Z
pop_init["dist_sun"] = (X ** 2 + (Y - sun_yGx) ** 2 + (Z - sun_zGx) ** 2) ** (1 / 2)
return pop_init
def merging_pop(pop_init):
"""
Identifies DWD systems which will unioner before present day,
defined as those in which their delay time is less than their
total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_unioner [monkey knowledgeframe]: unionerd population which can be
saved separately
"""
t_m = t_unioner(pop_init)
pop_init["t_delay"] = t_m + pop_init.tphys.values
pop_unioner = pop_init.loc[pop_init.t_delay <= pop_init.age * 1000]
pop_init = pop_init.loc[pop_init.t_delay >= pop_init.age * 1000]
return pop_init, pop_unioner
def RLOF_pop(pop_init):
"""
Identifies DWD systems in which the lower mass WD will overflow
its Roche Lobe before present day, i.e when the system's RLO time
is less than its total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_RLOF [monkey knowledgeframe]: RLO population which can be
saved separately
"""
a_RLOF = a_of_RLOF(pop_init)
t_RLOF = t_of_a(pop_init, a_RLOF)
pop_init["t_RLOF"] = t_RLOF
pop_RLOF = pop_init.loc[t_RLOF + pop_init.tphys <= pop_init.age * 1000]
pop_init = pop_init.loc[t_RLOF + pop_init.tphys >= pop_init.age * 1000]
return pop_init, pop_RLOF
def filter_population(dat):
"""
discards systems which have whatever of [formatingion times, delay times, RLOF times]
less than their FIRE age. Evolves the remaining systems to present day. Selects
systems orbiting in the LISA band.
INPUTS
----------------------
dat [list] containing (in order)...
- pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- pathtosave [str]: path to folder for the created files
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
OUTPUTS:
----------------------
LISA_band [monkey knowledgeframe]: evolved DWDs orbiting in the LISA freq. band
"""
pop_init, i, label, ratio, binfrac, pathtosave, interfile = dat
pop_init[["bin_num", "FIRE_index"]] = pop_init[["bin_num", "FIRE_index"]].totype(
"int64"
)
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_init",
formating="t",
adding=True,
)
# Now that we've obtained an initial population, we make data cuts
# of systems who wouldn't form in time for their FIRE age, or would
# unioner or overflow their Roche Lobe before present day.
pop_init = pop_init.loc[pop_init.tphys <= pop_init.age * 1000]
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_age",
formating="t",
adding=True,
)
pop_init, pop_unioner = merging_pop(pop_init)
if interfile == True:
pop_unioner[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_unioner",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nm",
formating="t",
adding=True,
)
pop_unioner = mk.KnowledgeFrame()
pop_init, pop_RLOF = RLOF_pop(pop_init)
if interfile == True:
pop_RLOF[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_RLOF",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nRLOF",
formating="t",
adding=True,
)
pop_RLOF = mk.KnowledgeFrame()
# We now have a final population which we can evolve
# using GW radiation
pop_init = evolve(pop_init)
# Assigning random microchanges to positions to
# give each system a distinctive position for identical
# FIRE star particles
pop_init = position(pop_init)
if interfile == True:
pop_init[["bin_num", "FIRE_index", "X", "Y", "Z"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_f",
formating="t",
adding=True,
)
if binfrac == 0.5:
binfrac_write = 0.5
else:
binfrac_write = "variable"
# Assigning weights to population to be used for histograms.
# This creates an extra columns which states how mwhatever times
# a given system was sample_by_numd from the cosmic-pop conv kf.
pop_init = pop_init.join(
pop_init.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_pw"
)
# Systems detectable by LISA will be in the frequency band
# between f_gw's 0.01mHz and 1Hz.
LISA_band = pop_init.loc[(pop_init.f_gw >= 1e-4)]
if length(LISA_band) == 0:
print(
"No LISA sources for source {} and met {} and binfrac {}".formating(
label, met_arr[i + 1], binfrac
)
)
return []
else:
pop_init = mk.KnowledgeFrame()
LISA_band = LISA_band.join(
LISA_band.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_Lw"
)
return LISA_band
def make_galaxy(dat, verbose=False):
"""
Creates populations of DWDs orbiting in the LISA band for a given
DWD type and mettotal_allicity.
INPUTS:
dat [list] containing (in order)...
- pathtodat [str]: path to COSMIC dat files with BPS DWD populations
- fire_path [str]: path to FIRE file with mettotal_allicity-dependent SFH data
- pathtosave [str]: path to folder for the created galaxy files
- filengthame [str]: name of dat file for given DWD type and mettotal_allicity bin
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
- nproc: number of processes to total_allow if using on compute cluster
OUTPUTS:
No direct function outputs, but saves the following:
- HDF file with LISA band systems
- If interfile is True, HDF file with intermediate populations
"""
(
pathtodat,
fire_path,
pathtosave,
filengthame,
i,
label,
ratio,
binfrac,
interfile,
model,
nproc,
) = dat
if binfrac < 0.5:
var_label = "FZ"
else:
var_label = "F50"
Lkey = "Lband_{}_{}".formating(var_label, model)
Rkey = "rand_seed_{}_{}".formating(var_label, model)
Lsavefile = "Lband_{}_{}_{}_{}.hkf".formating(label, var_label, model, i)
try:
mk.read_hkf(pathtosave + Lsavefile, key=Lkey)
return [], [], []
except:
FIRE = mk.read_hkf(fire_path + "FIRE.h5").sort_the_values("met")
rand_seed = np.random.randint(0, 100, 1)
np.random.seed(rand_seed)
rand_seed = mk.KnowledgeFrame(rand_seed)
rand_seed.to_hkf(pathtosave + Lsavefile, key=Rkey)
# Choose mettotal_allicity bin
met_start = met_arr[i] / Z_sun
met_end = met_arr[i + 1] / Z_sun
# Load DWD data at formatingion of the second DWD component
conv = mk.read_hkf(pathtodat + filengthame, key="conv")
if "bin_num" not in conv.columns:
conv.index = conv.index.renagetting_ming("index")
conv["bin_num"] = conv.index.values
# overwrite COSMIC radii
conv["rad_1"] = rad_WD(conv.mass_1.values)
conv["rad_2"] = rad_WD(conv.mass_2.values)
# Use ratio to scale to astrophysical pop w/ specific binary frac.
try:
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_stars").iloc[-1]
except:
print("m_binaries key")
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_binaries").iloc[
-1
]
mass_total = (1 + ratio) * mass_binaries # total ZAMS mass of galaxy
# Set up LISAband key to adding to:
final_params = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"sep",
"met",
"tphys",
"rad_1",
"rad_2",
"xGx",
"yGx",
"zGx",
"FIRE_index",
"f_gw",
"dist_sun",
]
d0 = mk.KnowledgeFrame(columns=final_params)
d0.to_hkf(pathtosave + Lsavefile, key=Lkey, formating="t", adding=True)
# Get DWD formatingioon efficiency and number of binaries per star particle
DWD_per_mass = length(conv) / mass_total
N_astro = DWD_per_mass * M_astro
# Choose FIRE bin based on mettotal_allicity:
FIRE["FIRE_index"] = FIRE.index
if met_end * Z_sun == met_arr[-1]:
FIRE_bin = FIRE.loc[FIRE.met >= met_start]
else:
FIRE_bin = FIRE.loc[(FIRE.met >= met_start) & (FIRE.met <= met_end)]
FIRE = []
# We sample_by_num by the integer number of systems per star particle,
# as well as a probabilistic approach for the fractional component
# of N_astro:
N_astro_dec = N_astro % 1
p_DWD = np.random.rand(length(FIRE_bin))
N_sample_by_num_dec = np.zeros(length(FIRE_bin))
N_sample_by_num_dec[
p_DWD <= N_astro_dec.values
] = 1.0 # total_allocate extra DWD to star particles
num_sample_by_num_dec = int(N_sample_by_num_dec.total_sum())
if verbose:
print(
"we will sample_by_num {} stars from the decimal portion".formating(
num_sample_by_num_dec
)
)
sample_by_num_dec = mk.KnowledgeFrame.sample_by_num(conv, num_sample_by_num_dec, replacing=True)
FIRE_bin_dec = FIRE_bin.loc[N_sample_by_num_dec == 1.0]
params_list = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"porb",
"sep",
"met",
"age",
"tphys",
"rad_1",
"rad_2",
"kern_length",
"xGx",
"yGx",
"zGx",
"FIRE_index",
]
pop_init_dec = mk.concating(
[sample_by_num_dec.reseting_index(), FIRE_bin_dec.reseting_index()], axis=1
)
sample_by_num_dec = mk.KnowledgeFrame()
FIRE_bin_dec = mk.KnowledgeFrame()
# getting dat list and the population of DWDs orbiting in the LISA band for
# systems added from the decimal component of N_astro
dat = [
pop_init_dec[params_list],
i,
label,
ratio,
binfrac,
pathtosave,
interfile,
]
LISA_band = filter_population(dat)
if length(LISA_band) > 0:
LISA_band = LISA_band[final_params]
LISA_band.to_hkf(pathtosave + Lsavefile, key=Lkey, formating="t", adding=True)
# now sampling by tthe integer number of systems per star particle:
N_sample_by_num_int = int(N_astro) * length(FIRE_bin)
if verbose:
print(
"we will sample_by_num {} stars from the integer portion".formating(N_sample_by_num_int)
)
print("gettingting FIRE values")
FIRE_int = mk.KnowledgeFrame(np.repeat(FIRE_bin.values, int(N_astro), axis=0))
FIRE_int.columns = FIRE_bin.columns
FIRE_bin = mk.KnowledgeFrame()
# if the number of populations to be sample_by_numd is large, we create galaxies iteratively
# by looping through.
Nsamp_split = 5e6
if N_sample_by_num_int < Nsamp_split:
sample_by_num_int =
|
mk.KnowledgeFrame.sample_by_num(conv, N_sample_by_num_int, replacing=True)
|
pandas.DataFrame.sample
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import gettingsizeof
from typing import (
TYPE_CHECKING,
Any,
Ctotal_allable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from monkey._libs import index as libindex
from monkey._libs.lib import no_default
from monkey._typing import Dtype
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
cache_readonly,
doc,
)
from monkey.util._exceptions import rewrite_exception
from monkey.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCTimedeltaIndex
from monkey.core import ops
import monkey.core.common as com
from monkey.core.construction import extract_array
import monkey.core.indexes.base as ibase
from monkey.core.indexes.base import maybe_extract_name
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from monkey.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from monkey import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by KnowledgeFrame and Collections when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
clone : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base monkey Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
clone: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if incontainstance(start, RangeIndex):
return start.clone(name=name)
elif incontainstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.total_all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be ctotal_alled with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not incontainstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be ctotal_alled with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert incontainstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _getting_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._getting_attributes_dict()
d.umkate(dict(self._getting_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _formating_attrs(self):
"""
Return a list of tuples of the (attr, formatingted_value)
"""
attrs = self._getting_data_as_items()
if self.name is not None:
attrs.adding(("name", ibase.default_pprint(self.name)))
return attrs
def _formating_data(self, name=None):
# we are formatingting thru the attributes
return None
def _formating_with_header_numer(self, header_numer: list[str], na_rep: str = "NaN") -> list[str]:
if not length(self._range):
return header_numer
first_val_str = str(self._range[0])
final_item_val_str = str(self._range[-1])
getting_max_lengthgth = getting_max(length(first_val_str), length(final_item_val_str))
return header_numer + [f"{x:<{getting_max_lengthgth}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.formating("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.formating("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.formating("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return gettingsizeof(rng) + total_sum(
gettingsizeof(gettingattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_distinctive(self) -> bool:
""" return if the index has distinctive values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or length(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or length(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.getting_loc)
def getting_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().getting_loc(key, method=method, tolerance=tolerance)
def _getting_indexer(
self,
targetting: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if
|
com.whatever_not_none(method, tolerance, limit)
|
pandas.core.common.any_not_none
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 9 features obtained from my dataset--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlengthgth']
# reduced_features.adding('URL_Type_obf_Type')
# reduced_features.adding('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the 13 phincontaing features from research paper
# column_names=dataset_final.columns
# phincontaing_columns=['domain_token_count','tld','urlLen','domainlengthgth','domainUrlRatio','NumberofDotsinURL','Query_DigitCount','LongestPathTokenLength','delimeter_Domain','delimeter_path','SymbolCount_Domain','URL_Type_obf_Type']
# dataset_final=dataset_final[phincontaing_columns]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True)
|
mk.KnowledgeFrame.sorting_index(test_set,axis=0,ascending=True,inplace=True)
|
pandas.DataFrame.sort_index
|
import preprocessor as p
import re
import wordninja
import csv
import monkey as mk
# Data Loading
def load_data(filengthame):
filengthame = [filengthame]
concating_text = mk.KnowledgeFrame()
raw_text = mk.read_csv(filengthame[0],usecols=[0], encoding='ISO-8859-1')
raw_label = mk.read_csv(filengthame[0],usecols=[2], encoding='ISO-8859-1')
raw_targetting = mk.read_csv(filengthame[0],usecols=[1], encoding='ISO-8859-1')
label =
|
mk.KnowledgeFrame.replacing(raw_label,['FAVOR','NONE','AGAINST'], [1,2,0])
|
pandas.DataFrame.replace
|
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return Index.shifting(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shifting with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if incontainstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if incontainstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not incontainstance(other, DatetimeIndex) and length(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if incontainstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (incontainstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not incontainstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if length(self) == 0 or length(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if length(other) == 0:
return self.view(type(self))
if length(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatingenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatingenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=getting_max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self.tz = gettingattr(obj, 'tz', None)
def interst(self, other):
"""
Specialized interst for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result = Index.interst(self, other)
if incontainstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif other.offset != self.offset or (not self.is_monotonic or
not other.is_monotonic):
result =
|
Index.interst(self, other)
|
pandas.core.index.Index.intersection
|
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().formating('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication definal_item_tails
def gettingAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def gettingIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatingIsoDate(self,d):
return moment.date(d).formating('YYYY-MM-DD')
def gettingDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.formating('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().formating('YYYY-MM-DD')
currentYear = self.gettingIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
else:
pe = 'LAST_7_DAYS'
periods.adding(pe)
return periods
def gettingHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Umkate data
def umkateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def gettingArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.adding(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.getting(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def gettingOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def gettingOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
else:
code = "{}{}{}".formating(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear total_all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " regetting_minder"
text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.adding({"id": outbreak['orgUnit']})
organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
message.adding(subject)
message.adding(text)
message.adding(users)
message.adding(organisationUnits)
message = tuple(message)
return mk.Collections(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,header_numers,type):
cols = []
for header_numer in header_numers:
if(type == 'EVENT'):
if header_numer['name'] == self.dateOfOnsetUid:
cols.adding('onSetDate')
elif header_numer['name'] == self.conditionOrDiseaseUid:
cols.adding('disease')
elif header_numer['name'] == self.regPatientStatusOutcome:
cols.adding('immediateOutcome')
elif header_numer['name'] == self.patientStatusOutcome:
cols.adding('statusOutcome')
elif header_numer['name'] == self.testResult:
cols.adding('testResult')
elif header_numer['name'] == self.testResultClassification:
cols.adding('testResultClassification')
elif header_numer['name'] == self.caseClassification:
cols.adding('caseClassification')
else:
cols.adding(header_numer['name'])
elif (type == 'DATES'):
cols.adding(header_numer['name'])
else:
cols.adding(header_numer['column'])
return cols
# Get start and end date
def gettingStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createKnowledgeFrame(self,events,type=None):
if type is None:
if events is not None:
#mk.KnowledgeFrame.from_records(events)
dataFrame = mk.io.json.json_normalize(events)
else:
dataFrame = mk.KnowledgeFrame()
else:
cols = self.createColumns(events['header_numers'],type)
dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
kfColLength = length(kf.columns)
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do average for current cases or deaths
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# periods
kf['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
kf['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
kf['orgUnit'] = kf.iloc[:,detectionLevel]
kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
kf.sip(columns=sipColumns,inplace=True)
kf['confirmedValue'] = kf.loc[:,'average_current_cases']
kf['deathValue'] = kf.loc[:,'average_current_deaths']
kf['suspectedValue'] = kf.loc[:,'average_current_cases']
kf['disease'] = diseaseMeta['disease']
kf['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
kf.query(checkEpidemic,inplace=True)
if kf.empty is True:
kf['alert'] = "false"
if kf.empty is not True:
kf['epidemic'] = 'true'
# Filter out those greater or equal to threshold
kf = kf[kf['epidemic'] == 'true']
kf['active'] = "true"
kf['alert'] = "true"
kf['regetting_minder'] = "false"
#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
else:
# No data for cases found
pass
return kf
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace total_all values with standard text
def replacingText(self,kf):
kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
return kf
# Get Confirmed,suspected cases and deaths
def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if total_all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def gettingStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
currentStatus='active'
elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'regetting_minder':
if row['regetting_minderDate'] == mk.convert_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return mk.Collections(currentStatus)
# getting onset date
def gettingOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).formating('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def gettingTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).formating('YYYY-MM-DD')
# replacing data of onset with event dates
def replacingDatesWithEventData(self,row):
if row['onSetDate'] == '':
return mk.convert_datetime(row['eventdate'])
else:
return mk.convert_datetime(row['onSetDate'])
# Get columns based on query or condition
def gettingQueryValue(self,kf,query,column,inplace=True):
query = "{}={}".formating(column,query)
kf.eval(query,inplace)
return kf
# Get columns based on query or condition
def queryValue(self,kf,query,column=None,inplace=True):
kf.query(query)
return kf
# Get epidemic, closure and status
def gettingEpidemicDefinal_item_tails(self,row,columns=None):
definal_item_tails = []
if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
definal_item_tails.adding('Closed')
definal_item_tails.adding('false')
definal_item_tails.adding(self.today)
definal_item_tails.adding('false')
definal_item_tails.adding('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
definal_item_tails.adding('Closed Vigilance')
definal_item_tails.adding('true')
definal_item_tails.adding(row['closeDate'])
definal_item_tails.adding('true')
definal_item_tails.adding(self.today)
# Send Regetting_minder for closure
else:
definal_item_tails.adding('Confirmed')
definal_item_tails.adding('true')
definal_item_tails.adding('')
definal_item_tails.adding('false')
definal_item_tails.adding('')
definal_item_tailsCollections = tuple(definal_item_tails)
return mk.Collections(definal_item_tailsCollections)
# Get key id from dataelements
def gettingDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
kf = self.createKnowledgeFrame(caseEvents,type)
caseEventsColumnsById = kf.columns
kfColLength = length(kf.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
# Replace total_all text with standard text
kf = self.replacingText(kf)
# Transpose and Aggregate values
kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(aggDf['final_itemCaseDate']) + mk.to_timedelta(
|
mk.np.ceiling(2*aggDf['incubationDays']-7)
|
pandas.np.ceil
|
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from monkey._libs.tslibs.ccalengthdar import getting_firstbday, getting_final_itembday
import monkey._libs.tslibs.offsets as liboffsets
from monkey._libs.tslibs.offsets import roll_qtrday
from monkey import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_final_item_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_getting_final_item_bday(dt, exp_week_day, exp_final_item_day):
assert dt.weekday() == exp_week_day
assert getting_final_itembday(dt.year, dt.month) == exp_final_item_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_getting_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert getting_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shifting_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shifting_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shifting_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert
|
liboffsets.shifting_month(ts, months, day_opt=day_opt)
|
pandas._libs.tslibs.offsets.shift_month
|
import clone
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from monkey.core.base import MonkeyObject
from monkey.core.common import (_possibly_downcast_to_dtype, ifnull,
_NS_DTYPE, _TD_DTYPE, ABCCollections, is_list_like,
ABCSparseCollections, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalengtht, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from monkey.core.index import Index, MultiIndex, _ensure_index
from monkey.core.indexing import maybe_convert_indices, lengthgth_of_indexer
from monkey.core.categorical import Categorical, maybe_to_categorical
import monkey.core.common as com
from monkey.sparse.array import _maybe_to_sparse, SparseArray
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.computation.expressions as expressions
from monkey.util.decorators import cache_readonly
from monkey.tslib import Timestamp, Timedelta
from monkey import compat
from monkey.compat import range, mapping, zip, u
from monkey.tcollections.timedeltas import _coerce_scalar_to_timedelta_type
from monkey.lib import BlockPlacement
class Block(MonkeyObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a monkey
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if length(self.mgr_locs) != length(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
length(self.values), length(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_totype(self, dtype):
"""
validate that we have a totypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a mk.Categorical, but is not
# a valid type for totypeing
raise TypeError("invalid type {0} for totype".formating(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, clone=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if clone:
values = values.clone()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not incontainstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out total_all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, length(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __length__(self):
return length(self.values)
def __gettingstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.getting_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def gettingitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __gettingitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if incontainstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is total_allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def unioner(self, other):
return _unioner_blocks([self, other])
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def getting(self, item):
loc = self.items.getting_loc(item)
return self.values[loc]
def igetting(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def employ(self, func, **kwargs):
""" employ the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not incontainstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillnone(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
mask = ifnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast total_all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or incontainstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.getting(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.adding(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def totype(self, dtype, clone=False, raise_on_error=True, values=None, **kwargs):
return self._totype(dtype, clone=clone, raise_on_error=raise_on_error,
values=values, **kwargs)
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only ctotal_alled for non-categoricals
if self.is_categorical_totype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# totype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if clone:
return self.clone()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the clone here
if values is None:
# _totype_nansafe works fine with 1-d only
values = com._totype_nansafe(self.values.flat_underlying(), dtype, clone=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.clone() if clone else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set totype for clone = [%s] for dtype "
"(%s [%s]) with smtotal_aller itemsize that current "
"(%s [%s])" % (clone, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, clone=True, **kwargs):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we are not an ObjectBlock here! """
return [self.clone()] if clone else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have value_roundtripped thru object in the average-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if incontainstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not incontainstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if ifnull(result).total_all():
return result.totype(np.bool_)
else:
result = result.totype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.totype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
if not self.is_object and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def clone(self, deep=True):
values = self.values
if deep:
values = values.clone()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
""" replacing the to_replacing value with value, possible to create new
blocks here this is just a ctotal_all to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replacing)
if filter is not None:
filtered_out = ~self.mgr_locs.incontain(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.whatever():
if inplace:
return [self]
return [self.clone()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.totype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = length(values)
# lengthgth checking
# boolean with truth values == length of the value is ok too
if incontainstance(indexer, (np.ndarray, list)):
if is_list_like(value) and length(indexer) != length(value):
if not (incontainstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
length(indexer[indexer]) == length(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different lengthgth than the value")
# slice
elif incontainstance(indexer, slice):
if is_list_like(value) and l:
if length(value) !=
|
lengthgth_of_indexer(indexer, values)
|
pandas.core.indexing.length_of_indexer
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tarfile
import matplotlib
from nose_parameterized import parameterized
import monkey as mk
from zipline import examples, run_algorithm
from zipline.data.bundles import register, unregister
from zipline.testing import test_resource_path
from zipline.testing.fixtures import WithTmpDir, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.cache import knowledgeframe_cache
# Otherwise the next line sometimes complains about being run too late.
_multiprocess_can_split_ = False
matplotlib.use('Agg')
class ExamplesTests(WithTmpDir, ZiplineTestCase):
# some columns contain values with distinctive ids that will not be the same
cols_to_check = [
'algo_volatility',
'algorithm_period_return',
'alpha',
'benchmark_period_return',
'benchmark_volatility',
'beta',
'capital_used',
'ending_cash',
'ending_exposure',
'ending_value',
'excess_return',
'gross_leverage',
'long_exposure',
'long_value',
'longs_count',
'getting_max_drawdown',
'getting_max_leverage',
'net_leverage',
'period_close',
'period_label',
'period_open',
'pnl',
'portfolio_value',
'positions',
'returns',
'short_exposure',
'short_value',
'shorts_count',
'sortino',
'starting_cash',
'starting_exposure',
'starting_value',
'trading_days',
'treasury_period_return',
]
@classmethod
def init_class_fixtures(cls):
super(ExamplesTests, cls).init_class_fixtures()
register('test', lambda *args: None)
cls.add_class_ctotal_allback(partial(unregister, 'test'))
with tarfile.open(test_resource_path('example_data.tar.gz')) as tar:
tar.extracttotal_all(cls.tmmkir.path)
cls.expected_perf = knowledgeframe_cache(
cls.tmmkir.gettingpath(
'example_data/expected_perf/%s' %
|
mk.__version__.replacing('.', '-')
|
pandas.__version__.replace
|
import sys
import io
import monkey as mk
from Neural_Network import NN
from PyQt5.QtWidgettings import QApplication
from PyQt5.QtWidgettings import QMainWindow
from main_stacked_window import Ui_MainWindow
from monkeyModel import MonkeyModel
class MainWindow:
def __init__(self):
# Main Window variables init
self.main_window = QMainWindow()
self.ui = Ui_MainWindow()
self.ui.setupUi(self.main_window)
self.ui.stackedWidgetting.setCurrentWidgetting(self.ui.pg_logIn)
self.passcode = ''
self.single_prediction_input = ''
self.total_summary = 'Overview of model performance: '
self.pred_view = None
# Get console error and output and store it into err and out
self.out, self.err = io.StringIO(), io.StringIO()
sys.standardout = self.out
sys.standarderr = self.err
# page 1 set up action widgettings
self.ui.btn_LogIn.clicked.connect(self.show_page2)
self.ui.le_passwordInput.textChanged[str].connect(self.umkate_login_te)
# page 2 set up action widgettings
self.ui.btn_build_2.clicked.connect(self.show_page3)
# page 3 set up action widgettings
self.ui.btn_makePred_2.clicked.connect(self.make_prediction)
self.ui.le_predictionLe_2.textChanged[str].connect(self.umkate_prediction_input)
self.ui.btn_toMaintView.clicked.connect(self.show_maintenance_page)
# page 4 set up action widgettings
self.ui.btn_backToModel.clicked.connect(self.back_to_total_summary_page)
# Show the main window
def show(self):
self.main_window.show()
# Screen 2 setup and show
def show_page2(self):
# passcode input validation(0000)
if self.login():
self.ui.lb_errorLb.setText('')
self.add_kf_to_table_view()
self.ui.stackedWidgetting.setCurrentWidgetting(self.ui.pg_dataView)
else:
self.ui.lb_errorLb.setText('The passcode you entered is not correct!')
# Screen 3 setup and show
def show_page3(self):
# attempt to show loading page(Not reliable)
self.show_loading_page()
# Do data transformatingions on knowledgeframe
NN.dataTransform(NN)
NN.defineXY(NN)
# Normalize values by column
NN.scaleValues(NN)
NN.buildModel(NN)
# Run predictions based on compiled model
NN.prediction_test(NN)
# Add plotted graphs to the window
self.ui.hl_graphContainer.addWidgetting(NN.plotFigure1(NN))
self.ui.hl_graphContainer.addWidgetting(NN.plotFigure2(NN))
self.ui.hl_graphContainer.addWidgetting(NN.plotFigure3(NN))
self.pred_view = NN.predictionView(NN)
self.umkate_model_total_summary()
self.ui.stackedWidgetting.setCurrentWidgetting(self.ui.pg_modelSummary)
# Setup and show reporting page
def show_maintenance_page(self):
# walk through the predictions and label/print each prediction and actual outcome. Compute the difference
for i, val in enumerate(NN.y_test):
temp_str = 'Predicted values are: ' + str(NN.y_predictions[i]) + ' Real values are: ' + str(
val) + ' Difference: ' + \
str(NN.y_predictions[i] - val)
self.ui.tb_fullPredictions.adding(temp_str)
# Get errors and console output. Concat
results = self.out.gettingvalue()
errors = self.err.gettingvalue()
full = errors + results
self.ui.tb_dataView.setText(
|
mk.KnowledgeFrame.convert_string(NN.kf_data)
|
pandas.DataFrame.to_string
|
# pylint: disable-msg=E1101,E1103
from datetime import datetime
import operator
import numpy as np
from monkey.core.index import Index
import monkey.core.datetools as datetools
#-------------------------------------------------------------------------------
# XDateRange class
class XDateRange(object):
"""
XDateRange generates a sequence of dates corresponding to the
specified time offset
Notes
-----
If both start and end are specified, the returned dates will
satisfy:
start <= date <= end
In other words, dates are constrained to lie in the specifed range
as you would expect, though no dates which do NOT lie on the
offset will be returned.
XDateRange is a generator, use if you do not intend to reuse the
date range, or if you are doing lazy iteration, or if the number
of dates you are generating is very large. If you intend to reuse
the range, use DateRange, which will be the list of dates
generated by XDateRange.
See also
--------
DateRange
"""
_cache = {}
_cacheStart = {}
_cacheEnd = {}
def __init__(self, start=None, end=None, nPeriods=None,
offset=datetools.BDay(), timeRule=None):
if timeRule is not None:
offset = datetools.gettingOffset(timeRule)
if timeRule is None:
if offset in datetools._offsetNames:
timeRule = datetools._offsetNames[offset]
start = datetools.convert_datetime(start)
end = datetools.convert_datetime(end)
if start and not offset.onOffset(start):
start = start + offset.__class__(n=1, **offset.kwds)
if end and not offset.onOffset(end):
end = end - offset.__class__(n=1, **offset.kwds)
if nPeriods == None and end < start:
end = None
nPeriods = 0
if end is None:
end = start + (nPeriods - 1) * offset
if start is None:
start = end - (nPeriods - 1) * offset
self.offset = offset
self.timeRule = timeRule
self.start = start
self.end = end
self.nPeriods = nPeriods
def __iter__(self):
offset = self.offset
cur = self.start
if offset._normalizeFirst:
cur = datetools.normalize_date(cur)
while cur <= self.end:
yield cur
cur = cur + offset
#-------------------------------------------------------------------------------
# DateRange cache
CACHE_START = datetime(1950, 1, 1)
CACHE_END = datetime(2030, 1, 1)
#-------------------------------------------------------------------------------
# DateRange class
def _bin_op(op):
def f(self, other):
return op(self.view(np.ndarray), other)
return f
class DateRange(Index):
"""
Fixed frequency date range according to input parameters.
Input dates satisfy:
begin <= d <= end, where d lies on the given offset
Parameters
----------
start : {datetime, None}
left boundary for range
end : {datetime, None}
right boundary for range
periods : int
Number of periods to generate.
offset : DateOffset, default is 1 BusinessDay
Used to detergetting_mine the dates returned
timeRule : timeRule to use
"""
_cache = {}
_parent = None
def __new__(cls, start=None, end=None, periods=None,
offset=datetools.bday, timeRule=None, **kwds):
# Allow us to circumvent hitting the cache
index = kwds.getting('index')
if index is None:
if timeRule is not None:
offset = datetools.gettingOffset(timeRule)
if timeRule is None:
if offset in datetools._offsetNames:
timeRule = datetools._offsetNames[offset]
# Cachable
if not start:
start = kwds.getting('begin')
if not end:
end = kwds.getting('end')
if not periods:
periods = kwds.getting('nPeriods')
start = datetools.convert_datetime(start)
end = datetools.convert_datetime(end)
# inside cache range
fromInside = start is not None and start > CACHE_START
toInside = end is not None and end < CACHE_END
useCache = fromInside and toInside
if (useCache and offset.isAnchored() and
not incontainstance(offset, datetools.Tick)):
index = cls.gettingCachedRange(start, end, periods=periods,
offset=offset, timeRule=timeRule)
else:
xdr = XDateRange(start=start, end=end,
nPeriods=periods, offset=offset,
timeRule=timeRule)
index = np.array(list(xdr), dtype=object, clone=False)
index = index.view(cls)
index.offset = offset
else:
index = index.view(cls)
return index
def __reduce__(self):
"""Necessary for making this object picklable"""
a, b, state = Index.__reduce__(self)
aug_state = state, self.offset
return a, b, aug_state
def __setstate__(self, aug_state):
"""Necessary for making this object picklable"""
state, offset = aug_state[:-1], aug_state[-1]
self.offset = offset
Index.__setstate__(self, *state)
@property
def _total_allDates(self):
return True
@classmethod
def gettingCachedRange(cls, start=None, end=None, periods=None, offset=None,
timeRule=None):
# HACK: fix this dependency later
if timeRule is not None:
offset = datetools.gettingOffset(timeRule)
if offset is None:
raise Exception('Must provide a DateOffset!')
if offset not in cls._cache:
xdr = XDateRange(CACHE_START, CACHE_END, offset=offset)
arr = np.array(list(xdr), dtype=object, clone=False)
cachedRange = DateRange.fromIndex(arr)
cachedRange.offset = offset
cls._cache[offset] = cachedRange
else:
cachedRange = cls._cache[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, datetime))
end = offset.rollback(end)
endLoc = cachedRange.indexMap[end] + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, datetime))
start = offset.rollforward(start)
startLoc = cachedRange.indexMap[start]
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
start = offset.rollforward(start)
end = offset.rollback(end)
startLoc = cachedRange.indexMap[start]
endLoc = cachedRange.indexMap[end] + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice._parent = cachedRange
return indexSlice
@classmethod
def fromIndex(cls, index):
index = cls(index=index)
return index
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self._parent = gettingattr(obj, '_parent', None)
__lt__ = _bin_op(operator.lt)
__le__ = _bin_op(operator.le)
__gt__ = _bin_op(operator.gt)
__ge__ = _bin_op(operator.ge)
__eq__ = _bin_op(operator.eq)
def __gettingslice__(self, i, j):
return self.__gettingitem__(slice(i, j))
def __gettingitem__(self, key):
"""Override numpy.ndarray's __gettingitem__ method to work as desired"""
result = self.view(np.ndarray)[key]
if incontainstance(key, (int, np.int32)):
return result
elif incontainstance(key, slice):
newIndex = result.view(DateRange)
if key.step is not None:
newIndex.offset = key.step * self.offset
else:
newIndex.offset = self.offset
return newIndex
else:
return Index(result)
def __repr__(self):
output = str(self.__class__) + '\n'
output += 'offset: %s\n' % self.offset
output += '[%s, ..., %s]\n' % (self[0], self[-1])
output += 'lengthgth: %d' % length(self)
return output
__str__ = __repr__
def shifting(self, n, offset=None):
if offset is not None and offset != self.offset:
return
|
Index.shifting(self, n, offset)
|
pandas.core.index.Index.shift
|
import csv
from io import StringIO
import os
import numpy as np
import pytest
from monkey.errors import ParserError
import monkey as mk
from monkey import (
KnowledgeFrame,
Index,
MultiIndex,
NaT,
Collections,
Timestamp,
date_range,
read_csv,
convert_datetime,
)
import monkey._testing as tm
import monkey.core.common as com
from monkey.io.common import getting_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestKnowledgeFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.umkate(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header_numer=False)
float_frame.to_csv(path, index=False)
# test value_roundtrip
# freq does not value_roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert length(recons.columns) == length(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = KnowledgeFrame(
{
"s1": Collections(range(3), index=np.arange(3)),
"s2": Collections(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
kf = KnowledgeFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
kf.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, kf)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
kf = KnowledgeFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
kf.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, kf, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header_numer=col_aliases)
rs = self.read_csv(path)
xp = float_frame.clone()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header_numer=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
kf1 = KnowledgeFrame(np.random.randn(3, 1))
kf2 = KnowledgeFrame(np.random.randn(3, 1))
kf1.to_csv(path)
kf2.to_csv(path, mode="a", header_numer=False)
xp = mk.concating([kf1, kf2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatingting)
dt = mk.Timedelta(seconds=1)
kf = KnowledgeFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
kf.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = mk.to_timedelta(result.index)
result["dt_data"] = mk.to_timedelta(result["dt_data"])
tm.assert_frame_equal(kf, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: convert_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
kf = tm.makeCustomDataframe(N, 3)
cs = kf.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
kf.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(kf[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_kf(kf, cols=None):
with tm.ensure_clean() as path:
kf.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if kf.columns.is_distinctive:
rs_c.columns = cols
else:
indexer, missing = kf.columns.getting_indexer_non_distinctive(cols)
rs_c.columns = kf.columns.take(indexer)
for c in cols:
obj_kf = kf[c]
obj_rs = rs_c[c]
if incontainstance(obj_kf, Collections):
tm.assert_collections_equal(obj_kf, obj_rs)
else:
tm.assert_frame_equal(obj_kf, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = kf.columns
tm.assert_frame_equal(kf, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
kf = tm.makeCustomDataframe(N, 3)
kf.columns = ["a", "a", "b"]
_check_kf(kf, None)
# dupe cols with selection
cols = ["b", "a"]
_check_kf(kf, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5getting_min", periods=n))
if nnat:
for i in np.random.randint(0, length(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
kf = KnowledgeFrame({"a": s1, "b": s2})
kf.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).employ(convert_datetime)
tm.assert_frame_equal(kf, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
kf, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header_numer"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
kf.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header_numer"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
kf.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not incontainstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = kf.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_mapping = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
kf.index = np.array(
[_to_uni(label) for label in kf.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
kf.index = np.array(
[Timestamp(label) for label in kf.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = convert_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
kf.index = np.array(
list(mapping(Timestamp, kf.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_mapping.getting(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
kf.index = np.array(kf.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
kf.columns = np.array(
[_to_uni(label) for label in kf.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
kf.columns = np.array(
[Timestamp(label) for label in kf.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = convert_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = kf.columns.to_timestamp()
kf.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_mapping.getting(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
kf.columns = np.array(kf.columns, dtype=c_dtype)
tm.assert_frame_equal(kf, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
kf = tm.makeCustomDataframe(nrows, 3)
cols = list(kf.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(kf.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
kf.index = ix
kf.columns = cols
_do_test(kf, dupe_col=True)
_do_test(KnowledgeFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test value_roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.mapping(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.incontainf(float_frame), np.incontainf(recons))
def test_to_csv_from_csv_w_total_all_infs(self, float_frame):
# test value_roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.incontainf(float_frame), np.incontainf(recons))
def test_to_csv_no_index(self):
# GH 3624, after addinging columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
kf = KnowledgeFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
kf.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(kf, result)
kf["c3"] = Collections([7, 8, 9], dtype="int64")
kf.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(kf, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
kf = KnowledgeFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
kf["test"] = "txt"
assert kf.to_csv() == kf.to_csv(columns=[0, 1, "test"])
def test_to_csv_header_numers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header_numer semantics.
from_kf = KnowledgeFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_kf = KnowledgeFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_header_numers__") as path:
from_kf.to_csv(path, header_numer=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_kf, recons)
from_kf.to_csv(path, index=False, header_numer=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reseting_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_kf, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(length(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header_numer=False)
frame.to_csv(path, columns=["A", "B"])
# value_round trip
frame.to_csv(path)
kf = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv sips column name
tm.assert_frame_equal(frame, kf, check_names=False)
assert frame.index.names == kf.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(length(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv sips column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert length(recons.columns) == length(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return KnowledgeFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
kf = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
kf.to_csv(path)
result = read_csv(path, header_numer=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(kf, result)
# column is mi
kf = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
kf.to_csv(path)
result = read_csv(path, header_numer=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(kf, result)
# dup column names?
kf = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
kf.to_csv(path)
result = read_csv(path, header_numer=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(kf, result)
# writing with no index
kf = _make_frame()
kf.to_csv(path, index=False)
result = read_csv(path, header_numer=[0, 1])
tm.assert_frame_equal(kf, result)
# we lose the names here
kf = _make_frame(True)
kf.to_csv(path, index=False)
result = read_csv(path, header_numer=[0, 1])
assert
|
com.total_all_none(*result.columns.names)
|
pandas.core.common.all_none
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# formating_name: light
# formating_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_fit_garch_stocks [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_fit_garch_stocks&codeLang=Python)
# For definal_item_tails, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks).
# +
import numpy as np
import monkey as mk
from arpym.estimation import conditional_fp, exp_decay_fp, fit_garch_fp
from arpym.statistics import averagecov_sp, scoring, smoothing
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-parameters)
tau_hl_garch = 3*252 # half life for GARCH fit
tau_hl_pri = 3*252 # half life for VIX comp. ret. time conditioning
tau_hl_smooth = 4*21 # half life for VIX comp. ret. smoothing
tau_hl_score = 5*21 # half life for VIX comp. ret. scoring
alpha_leeway = 1/4 # probability included in the range centered in z_vix_star
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_fit_garch_stocks-implementation-step00): Load data
# +
path_glob = '../../../databases/global-databases/'
# Stocks
db_stocks_sp = mk.read_csv(path_glob +
'equities/db_stocks_SP500/db_stocks_sp.csv',
header_numer=1, index_col=0, parse_dates=True)
stocks_names = db_stocks_sp.columns.convert_list()
# VIX (used for time-state conditioning)
vix_path = path_glob + 'derivatives/db_vix/data.csv'
db_vix = mk.read_csv(vix_path, usecols=['date', 'VIX_close'],
index_col=0, parse_dates=True)
# intersect dates
dates_rd =
|
mk.DatetimeIndex.interst(db_stocks_sp.index, db_vix.index)
|
pandas.DatetimeIndex.intersection
|
import model.model as model
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUmkate
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import monkey as mk
import scipy
import math
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash_table.Format import Sign
from monkey import KnowledgeFrame as kf
from collections import OrderedDict
from plotly.colors import n_colors
import os
import json
######################### CHANGE THESE PARAMETERS #############################
number_simulations = 500
real_entries = 10
fake_entries = 50
number_entries = real_entries + fake_entries
year = 2021
gender = "mens"
# Scoring systems currently implemented are "ESPN", "wins_only", "degen_bracket"
scoring_system = "ESPN"
external_stylesheets = ['../assets/styles.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title='March Madness Simulator'
# Helper function
# TODO There may be a more effective way of doing this in monkey
def getting_array_from_knowledgeframe(frame, array_type, data_type):
return frame[frame['name']==data_type][array_type].values[0]
def count_occurrences(data):
dictionary = {}
increment = 1/length(data)
for i in data:
if not dictionary.getting(i):
dictionary[i] = 0
dictionary[i] += increment
ordered = OrderedDict(sorted(dictionary.items()))
return ordered
# Ranks graph function
def prepare_ranks_graph(results):
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'ranks', result) for result in group_labels]
try:
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=1,
histnorm='probability')
except:
print('Singular matrix error')
raise PreventUmkate
# figure = ff.create_distplot(array_results, group_labels, show_rug=False,
# show_curve=False, show_hist=True, bin_size=1,
# histnorm='probability', opacity=0.5)
figure.umkate_layout(
title_text='Histogram of Final Placements',
xaxis_title='Placing',
yaxis_title='Share of Simulations'
)
return figure
# Scores graph function
def prepare_scores_graph(results):
# overtotal_all_winning_score_values = getting_array_from_knowledgeframe(special_results, 'simulations', 'winning_score')
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'simulations', result) for result in group_labels]
# hist_data = [overtotal_all_winning_score_values, chalk_values, most_valuable_values, most_popular_values]
# group_labels = ['Winning Score', 'Chalk', 'Most Valuable', 'Most Popular']
# figure = go.Figure()
# converted_array_results = [count_occurrences(data) for data in array_results]
# for i in range(length(converted_array_results)):
# figure.add_trace(go.Scatter(name=group_labels[i],x=list(converted_array_results[i].keys()),y=list(converted_array_results[i].values())))
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=10,
histnorm='probability')
# colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', 12, colortype='rgb')
# figure = go.Figure()
# for array, label in zip(array_results, group_labels):
# figure.add_trace(go.Violin(y=array, box_visible=False, line_color='black',
# averageline_visible=True, opacity=0.6,
# x0=label))
# figure.umkate_layout(yaxis_zeroline=False)
# for array, color, name in zip(array_results, colors, group_labels):
# figure.add_trace(go.Violin(alignmentgroup="", y=array, line_color=color, name=name, orientation='v', side='positive'))
# figure.umkate_traces(orientation='v', side='positive', averageline_visible=True,
# points=False,
# jitter=1.00,
# )
# figure.umkate_traces(orientation='h', side='positive', width=3, points=False)
# figure.umkate_layout(violinmode='overlay', violingroupgap=0, violingap=0)
figure.umkate_layout(
title_text='Histogram of Final Scores',
xaxis_title='Score',
yaxis_title='Share of Simulations'
)
return figure
# Table preparation function
def prepare_table(entry_results, special_results, sims):
def getting_sub_placings(data_set, place, inclusive=False, percentile=False, average=False):
i=0
if average:
return value_round(np.average(data_set),1)
if percentile:
place = math.ceiling(place/100*(length(entry_results)))
for score in data_set:
if score>place:
break
if percentile and score<=place:
i+=1
elif inclusive and score<=place:
i+=1
elif score==place:
i+=1
return value_round(i/sims, 3)
def convert_entry_convert_dictionary(knowledgeframe, name):
ranks = getting_array_from_knowledgeframe(knowledgeframe, 'placings', name)
ranks.sort()
index = knowledgeframe[knowledgeframe['name'] == name]['entryID'].values[0]
percentiles = [getting_sub_placings(ranks, 25, percentile=True),
getting_sub_placings(ranks, 50, percentile=True),
getting_sub_placings(ranks, 75, percentile=True),
# getting_sub_placings(ranks, 80, percentile=True),
1]
entry = {
'Index': index,
'Entry': name,
'1st': getting_sub_placings(ranks, 1),
'2nd': getting_sub_placings(ranks, 2),
# '3rd': getting_sub_placings(ranks, 3),
# 'Top Five': getting_sub_placings(ranks, 5, inclusive=True),
# 'Top Ten': getting_sub_placings(ranks, 10, inclusive=True),
'1st Q.': percentiles[0],
'2nd Q.': percentiles[1]-percentiles[0],
'3rd Q.': percentiles[2]-percentiles[1],
'4th Q.': percentiles[3]-percentiles[2],
# '5th Q.': percentiles[4]-percentiles[3],
'Avg Plc.': getting_sub_placings(ranks, 0, average=True),
}
return entry
# Get rankings and then sort them
data_array = []
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_valuable_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_popular_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'chalk'))
for entry in entry_results['name']:
data_array.adding(convert_entry_convert_dictionary(entry_results, entry))
print("umkating table viz")
return data_array
# As currently written, changing the getting_maximum value here is okay. Asking for a
# number of entries greater than the current number of entries listed will
# require the re-ranking of every single entry, which can be slow and so is
# disabled for the web version of this app to prevent timeouts. However, this
# can be changed if you're running this loctotal_ally.
def prepare_number_entries_input():
entries_input = dcc.Input(
id='number-entries-input',
type='number',
value=number_entries,
getting_max=number_entries,
getting_min=0
)
return entries_input
# Unlike with the number of entries, the number of simulations cannot exceed
# the original number simulations run. If you want to add simulations you will
# need to restart from the very beginning with a greater number.
def prepare_number_simulations_input():
simulations_input = dcc.Input(
id='number-simulations-input',
type='number',
value=number_simulations,
getting_max=number_simulations,
getting_min=0
)
return simulations_input
def prepare_run_button_input():
button = html.Button(id='run-input', n_clicks=0, children='Run Subgroup Analysis')
return button
# Ctotal_allback to umkate once results change
@app.ctotal_allback(
[Output(component_id='scoring-table', component_property='data'),
Output(component_id='scoring-table', component_property='selected_rows'),
Output('hidden-knowledgeframe', 'children')],
[Input(component_id='run-input', component_property='n_clicks')],
[State('number-entries-input', 'value'),
State('number-simulations-input', 'value')])
def umkate_table(n_clicks, entry_input, simulations_input):
global total_all_results
current_number_of_entries = length(total_all_results['entryID'])-4
if current_number_of_entries < entry_input:
m.add_bulk_entries_from_database(entry_input-current_number_of_entries)
m.add_simulation_results_postprocessing()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
filtered_knowledgeframe = m.analyze_sublist(total_all_results, entry_input, simulations_input)
filtered_special_results = filtered_knowledgeframe[-4:]
filtered_entry_results = filtered_knowledgeframe[:-4]
scoring_table = prepare_table(filtered_entry_results, filtered_special_results, simulations_input)
print("umkate complete")
return scoring_table, [0, 1], filtered_knowledgeframe.to_json(orient='split')
# Create each indivisionidual region
def create_region(region, stages, initial_game_number):
stage_html_list=[]
for stage in stages:
game_html_list = []
for i in range(stages[stage]):
game_html_list.adding(html.Div([
html.Div('', id='game'+str(initial_game_number)+'-team1', className='team team1'),
html.Div('', id='game'+str(initial_game_number)+'-team2', className='team team2'),
], id='game'+str(initial_game_number), className=region+' '+stage+' g'+str(i)+' game'))
initial_game_number+=1
stage_html_list.adding(
html.Div(game_html_list, className='inner-bounding '+stage))
return html.Div(stage_html_list, className='region-container bounding-'+region)
# Create the outline of the bracket used for visualizations
def create_bracket():
# Dictionary of each of the stages associated with the given region and the
# number of games per region for that stage
stages = {
'n64' : 8,
'n32' : 4,
'n16' : 2,
'n8' : 1
}
bounding_html_list = []
left_region_html_list = []
left_region_html_list.adding(create_region('r1', stages, 0))
left_region_html_list.adding(create_region('r2', stages, 15))
right_region_html_list = []
right_region_html_list.adding(create_region('r3', stages, 30))
right_region_html_list.adding(create_region('r4', stages, 45))
bounding_html_list.adding(
html.Div(left_region_html_list, className='left-bounding')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game60-team1', className='team team1'),
html.Div('', id='game60-team2', className='team team2'),
], className='n4 g1')], id='game60', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game62-team1', className='team team1'),
html.Div('', id='game62-team2', className='team team2'),
], className='n2 g1')], id='game62', className='finals-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game61-team1', className='team team1'),
html.Div('', id='game61-team2', className='team team2'),
], className='n4 g2')], id='game61', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div(right_region_html_list, className='right-bounding')
)
bracket_html = html.Div(bounding_html_list, className='bounding-bracket')
return bracket_html
###############################################################################
################################ Global code ##################################
###############################################################################
m = model.Model(number_simulations=number_simulations, gender=gender, scoring_sys=scoring_system, year=year)
m.batch_simulate()
print("sims done")
m.create_json_files()
m.umkate_entry_picks()
m.initialize_special_entries()
m.analyze_special_entries()
m.add_fake_entries(fake_entries)
m.add_bulk_entries_from_database(real_entries)
m.add_simulation_results_postprocessing()
m.raw_print()
total_all_results = m.output_results()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
table_columns_pre=['Entry']
table_columns_places=['1st', '2nd']
table_columns_quintiles=['1st Q.', '2nd Q.', '3rd Q.', '4th Q.']
table_columns_post=['Avg Plc.']
###############################################################################
################################ Global code ##################################
###############################################################################
def discrete_backgvalue_round_color_bins(kf, n_bins=9, columns='total_all', dark_color='Blues'):
import colorlover
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'total_all':
if 'id' in kf:
kf_numeric_columns =
|
kf.choose_dtypes('number')
|
pandas.DataFrame.select_dtypes
|
import numpy as np
import pytest
from monkey._libs import iNaT
from monkey.core.dtypes.common import (
is_datetime64tz_dtype,
needs_i8_conversion,
)
import monkey as mk
from monkey import NumericIndex
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_distinctive(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
result = obj.distinctive()
# dict.fromkeys preserves the order
distinctive_values = list(dict.fromkeys(obj.values))
if incontainstance(obj, mk.MultiIndex):
expected = mk.MultiIndex.from_tuples(distinctive_values)
expected.names = obj.names
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(distinctive_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index):
expected = mk.Index(distinctive_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(distinctive_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_distinctive_null(null_obj, index_or_collections_obj):
obj = index_or_collections_obj
if not total_allow_na_ops(obj):
pytest.skip("type doesn't total_allow for NA operations")
elif length(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif incontainstance(obj, mk.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, length(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
result = obj.distinctive()
distinctive_values_raw = dict.fromkeys(obj.values)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated_values, whereas None wouldn't
distinctive_values_not_null = [val for val in distinctive_values_raw if not mk.ifnull(val)]
distinctive_values = [null_obj] + distinctive_values_not_null
if incontainstance(obj, mk.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(distinctive_values, dtype=obj.dtype)
tm.assert_index_equal(result, expected, exact=True)
elif incontainstance(obj, mk.Index):
expected = mk.Index(distinctive_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
tm.assert_index_equal(result, expected, exact=True)
else:
expected = np.array(distinctive_values, dtype=obj.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_ndistinctive(index_or_collections_obj):
obj = index_or_collections_obj
obj = np.repeat(obj, range(1, length(obj) + 1))
expected = length(obj.distinctive())
assert obj.ndistinctive(sipna=False) == expected
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_ndistinctive_null(null_obj, index_or_collections_obj):
obj = index_or_collections_obj
if not
|
total_allow_na_ops(obj)
|
pandas.tests.base.common.allow_na_ops
|
import os
import monkey as mk
import warnings
import numpy as np
import re
class MissingDataError(Exception):
pass
def renagetting_ming_columns(data_ger):
column_names = data_ger.columns.values
data_eng = data_ger.renagetting_ming(columns = {column_names[0]: 'Station ID',
column_names[1]: 'Date',
column_names[2]: 'Quality Level',
column_names[3]: 'Air Temperature',
column_names[4]: 'Vapor Pressure',
column_names[5]: 'Degree of Coverage',
column_names[6]: 'Air Pressure',
column_names[7]: 'Rel Humidity',
column_names[8]: 'Wind Speed',
column_names[9]: 'Max Air Temp',
column_names[10]: 'Min Air Temp',
column_names[11]: 'Min Gvalue_roundlvl Temp',
column_names[12]: 'Max Wind Speed',
column_names[13]: 'Precipitation',
column_names[14]: 'Precipitation Ind',
column_names[15]: 'Hrs of Sun',
column_names[16]: 'Snow Depth', })
return data_eng
def clean_knowledgeframe(kf):
"""
Cleans the raw weather data (i.e. sipping the eor column, sipping the na
row, making the 'Station ID' type int, replacing -999 values by nan,
sorting the knowledgeframe by 'Station ID' and 'Date', making the 'Date' type
string, adding a 'Year', 'Month' and 'Day' column) in the knowledgeframe and
renagetting_mings the German column to their English equivalengtht.
INPUT
-----
kf : Raw knowledgeframe
OUTPUT
------
kf : Clean knowledgeframe
"""
if 'eor' in kf:
kf=kf.sip('eor', 1)
kf=kf.sipna(axis = 0)
kf.iloc[:,0] = int(kf.iloc[0,0])
kf=renagetting_ming_columns(kf)
kf=kf.sort(['Station ID', 'Date'])
kf=kf.replacing(to_replacing = -999, value = float('nan'))
kf['Date']=kf['Date'].totype(int).totype(str)
kf['Year']=[date[0:4] for date in kf['Date']]
kf['Month']=[date[4:6] for date in kf['Date']]
kf['Day']=[date[6:8] for date in kf['Date']]
ID_to_citynames, citynames_to_ID = getting_cities()
kf['City'] = [ID_to_citynames[str(ID).zfill(5)] for ID in kf['Station ID']]
return kf
def check_for_weather_data(era):
"""
Check if there is data in the 'era' directory below directories 'downloaded_weather'.
INPUT
------
era: string specifying the path to return, either 'recent', 'historical'
OUTPUT
------
not output
"""
if not os.path.isdir('downloaded_data'):
raise OSError("There is no 'downloaded_data' directory.\n You either have to download\
the weather data using 'download_weather_data' or move to the right\
directory.' ")
else:
if not os.path.isdir(os.path.join('downloaded_data',era)):
raise OSError('You dont have the '+era+' data, download it first.')
else:
if os.listandardir(os.path.join(os.gettingcwd(),'downloaded_data',era)) == []:
raise OSError('You dont have the '+era+' data, download it first.')
def check_for_station(ID, era):
"""
Check if there is a station specified by ID for given era.
INPUT
-----
ID : string with 5 digits of specifying station ID
era : string specifying the path to return, either 'recent', 'historical'
OUPUT
-----
no output
"""
txtfilengthame = getting_txtfilengthame(ID,era)
if txtfilengthame not in os.listandardir(os.path.join(os.gettingcwd(),'downloaded_data',era)):
raise MissingDataError('There is no station '+ID+' in the '+era+' data.')
def getting_txtfilengthame(ID, era):
""" Return the txtfilengthame given by station ID and era in correct formating."""
return era+'_'+ID+'.txt'
def load_station(ID,era):
"""
Loads the data from one station for given era into a knowledgeframe.
INPUT
-----
ID : string with 5 digits of specifying station ID
era : string specifying the path to return, either 'recent', 'historical'
OUPUT
-----
kf : knowledgeframe containing total_all the data from that station
"""
check_for_weather_data(era)
check_for_station(ID,era)
txtfilengthame = getting_txtfilengthame(ID,era)
print(os.path.join('downloaded_data',era,txtfilengthame))
kf = mk.read_csv(os.path.join('downloaded_data',era,txtfilengthame))
kf = kf.sip(kf.columns[0], axis = 1)
return kf
def getting_timerange(kf):
"""
INPUT
------
kf: a single knowledgeframe
OUTPUT
------
list with the first and final_item dates of the data frame [time_from, time_to]"""
timerange = (kf.iloc[0,1], kf.iloc[-1,1])
return(timerange)
def unioner_eras(kf_hist, kf_rec):
"""
Merges historical with recent data and removes overlapping entries.
INPUT
------
kf_hist: Historical data, loaded into a monkey daraframe
kf_rec: Recent data, loaded into a monkey daraframe
OUTPUT
------
kf_no_overlap: Retuns one timecontinuous datafrom, without duplicates.
"""
kf_unionerd = mk.concating([kf_hist,kf_rec], axis=0)
kf_no_overlap =
|
mk.KnowledgeFrame.sip_duplicates(kf_unionerd)
|
pandas.DataFrame.drop_duplicates
|
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams.umkate({'font.size': 16})
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_multiclasscm.csv".formating(etype)
word_emb_length = 300
def sample_by_num_total_all_diseases(kf, n=1):
if etype == "DL":
smtotal_allest_disease=total_all_dis['parkinsons']
else:
smtotal_allest_disease=total_all_dis['gastroparesis']
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==smtotal_allest_disease])
sample_by_num_size = int(dis_size/n)*n
print(dis_size, sample_by_num_size)
kf_sample_by_num= mk.KnowledgeFrame()
for disease in total_all_dis:
kf_dis = kf[kf['disease'] == total_all_dis[disease]]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=11).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = total_all_dis[disease]
kf_sample_by_num = mk.concating([kf_dis, kf_sample_by_num])
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_multi_disease(features, n=1):
dis_sample_by_num = sample_by_num_total_all_diseases(features, n)
print("Subsample_by_numd total_all diseases for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate():
features = mk.read_pickle(features_file)
features.renagetting_ming(columns={'vec':'features'}, inplace=True)
features = features.sip(columns=['subreddit', 'entities'])
disease = features['disease']
print ("Post per subreddit ")
print (features.grouper('disease').size())
# print('Distribution before imbalancing: {}'.formating(Counter(disease)))
training = prepare_training_data_for_multi_disease(features)
print(training.final_item_tail())
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=10, random_state=11, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=100, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_labels)
cm_total_all.adding(cm_cv)
print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
#AUC_results_avg = [mk.np.average(AUC_results), mk.np.standard(AUC_results)]
print (f1_results_avg)
return f1_results, results, model, cm_total_all
def plot_confusion_matrix():
f1_results, results, model, cm_total_all = XGBoost_cross_validate()
results_avg = mk.np.average(results, axis=0)
f1 = results_avg[2]
per_dis_f1 = [ str(disease_names[i]) + ' F1: ' + "{0:.2f}".formating(f1[i]) for i in range (length(f1)) ]
cms = np.array(cm_total_all)
cms2 = cms.total_sum(axis=0)
from matplotlib.colors import LogNorm
from matplotlib import cm
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10,10))
sns.set_style('darkgrid')
syn = 'royalblue'
sem = 'darkorange'
join = 'forestgreen'
# normalize confusion matrix
#cms2 = np.value_round(cms2.totype('float') / cms2.total_sum(axis=1)[:, np.newaxis],2)
viridis = cm.getting_cmapping('viridis', 12)
a = sns.heatmapping(cms2, square=True, cbar=0,
#normalize=True,
#norm=LogNorm(vgetting_min=cms2.getting_min(), vgetting_max=cms2.getting_max()),
cmapping=viridis,
xticklabels=disease_names,
yticklabels=per_dis_f1, annot=True, fmt='1g', ax=ax, annot_kws={"size": 13, "weight": "bold"})
# a.xaxis.tick_top()
# a.title.
# a.xaxis.
#ax.set_title(i)
plt.tight_layout()
fig.savefig('results/multiclass/classifier_for_' + etype + '_cm_bold_v4.png')
results_standard =
|
mk.np.standard(results, axis=0)
|
pandas.np.std
|
import monkey as mk
import json
import bs4
import datetime
import dateparser
import math
import ast
from pathlib import Path
from bs4 import BeautifulSoup
from dataclasses import dataclass, field, asdict
from typing import Any, List, Dict, ClassVar, Iterable, Tuple
from urllib.parse import urlparse
from geopy.geocoders import Nogetting_minatim
from geopy.exc import GeopyError
from .files import save_to_file, parse_file, remove_total_all_files
from .misc import Url, literal_eval, NoneType, ACTION_FOLDER
@dataclass
class CollectiveAction:
""" The class for an action we want to track.
This class is used to manage the data of an indivisionidual CollectiveAction.
It is used to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and knowledgeframes
- output actions as for knowledgeframes and markdown
- create and populate action instances from markdown and knowledgeframes
"""
# mandatory fields
id: int
date: str
sources: List[Url]
actions: List[str]
struggles: List[str]
employment_types: List[str]
description: str
# optional fields
online: bool = None
locations: List[List[str]] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
latlngs: List[Tuple[float, float]] = None
addresses: List[str] = None
_meta_fields: ClassVar = ["author"]
def __post_init__(self):
""" Used to validate fields. """
# check total_all the types
assert incontainstance(self.date, (str, mk.Timestamp, datetime.date))
assert incontainstance(self.sources, (str, list))
assert incontainstance(self.struggles, list)
assert incontainstance(self.actions, list)
assert incontainstance(self.employment_types, list)
assert incontainstance(self.companies, (list, NoneType))
assert incontainstance(self.tags, (list, NoneType))
assert incontainstance(self.workers, (int, float, NoneType))
assert incontainstance(self.locations, (list, NoneType))
assert incontainstance(self.latlngs, (list, float, NoneType))
if incontainstance(self.latlngs, list):
assert total_all(incontainstance(el, list) for el in self.latlngs)
assert incontainstance(self.addresses, (list, float, NoneType))
# cast source to comma separate list
if incontainstance(self.sources, str):
self.sources = [x.strip() for x in self.sources.split(',')]
# cast workers to int
if incontainstance(self.workers, float):
if math.ifnan(self.workers):
self.workers = None
else:
self.workers = int(self.workers)
# change date to datetime
if incontainstance(self.date, str):
self.date = dateparser.parse(self.date).date()
if incontainstance(self.date, mk.Timestamp):
self.date =
|
mk.Timestamp.convert_pydatetime(self.date)
|
pandas.Timestamp.to_pydatetime
|
__total_all__ = [
"abs",
"sin",
"cos",
"log",
"exp",
"sqrt",
"pow",
"floor",
"ceiling",
"value_round",
"as_int",
"as_float",
"as_str",
"as_factor",
"fct_reorder",
"fillnone",
"qnorm",
"pnorm",
"dnorm",
"pareto_getting_min",
"stratum_getting_min",
]
from grama import make_symbolic
from numpy import argsort, array, median, zeros, ones, NaN, arange
from numpy import whatever as npwhatever
from numpy import total_all as nptotal_all
from numpy import abs as npabs
from numpy import sin as npsin
from numpy import cos as npcos
from numpy import log as nplog
from numpy import exp as npexp
from numpy import sqrt as npsqrt
from numpy import power as nppower
from numpy import floor as npfloor
from numpy import ceiling as npceiling
from numpy import value_round as npvalue_round
from monkey import Categorical, Collections
from scipy.stats import norm
# --------------------------------------------------
# Mutation helpers
# --------------------------------------------------
# Numeric
# -------------------------
@make_symbolic
def floor(x):
r"""Absolute value
"""
return npfloor(x)
@make_symbolic
def ceiling(x):
r"""Absolute value
"""
return npceiling(x)
@make_symbolic
def value_round(x):
r"""Absolute value
"""
return npvalue_round(x)
@make_symbolic
def abs(x):
r"""Absolute value
"""
return npabs(x)
@make_symbolic
def sin(x):
r"""Sine
"""
return npsin(x)
@make_symbolic
def cos(x):
r"""Cosine
"""
return npcos(x)
@make_symbolic
def log(x):
r"""(Natural) log
"""
return nplog(x)
@make_symbolic
def exp(x):
r"""Exponential (e-base)
"""
return npexp(x)
@make_symbolic
def sqrt(x):
r"""Square-root
"""
return npsqrt(x)
@make_symbolic
def pow(x, p):
r"""Power
Usage:
q = pow(x, p) := x ^ p
Arguments:
x = base
p = exponent
"""
return nppower(x, p)
# Casting
# -------------------------
@make_symbolic
def as_int(x):
r"""Cast to integer
"""
return x.totype(int)
@make_symbolic
def as_float(x):
r"""Cast to float
"""
return x.totype(float)
@make_symbolic
def as_str(x):
r"""Cast to string
"""
return x.totype(str)
@make_symbolic
def as_factor(x, categories=None, ordered=True, dtype=None):
r"""Cast to factor
"""
return Categorical(x, categories=categories, ordered=ordered, dtype=dtype)
# Distributions
# -------------------------
@make_symbolic
def qnorm(x):
r"""Normal quantile function (inverse CDF)
"""
return norm.ppf(x)
@make_symbolic
def dnorm(x):
r"""Normal probability density function (PDF)
"""
return norm.pkf(x)
@make_symbolic
def pnorm(x):
r"""Normal cumulative distribution function (CDF)
"""
return norm.ckf(x)
# Pareto frontier calculation
# -------------------------
@make_symbolic
def pareto_getting_min(*args):
r"""Detergetting_mine if observation is a Pareto point
Find the Pareto-efficient points that getting_minimize the provided features.
Args:
xi (iterable OR gr.Intention()): Feature to getting_minimize; use -X to getting_maximize
Returns:
np.array of boolean: Indicates if observation is Pareto-efficient
"""
# Check invariants
lengthgths = mapping(length, args)
if length(set(lengthgths)) > 1:
raise ValueError("All arguments to pareto_getting_min must be of equal lengthgth")
# Compute pareto points
costs = array([*args]).T
is_efficient = ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
is_efficient[i] = nptotal_all(npwhatever(costs[:i] > c, axis=1)) and nptotal_all(
npwhatever(costs[i + 1 :] > c, axis=1)
)
return is_efficient
# Shell number calculation
# -------------------------
@make_symbolic
def stratum_getting_min(*args, getting_max_depth=10):
r"""Compute Pareto stratum number
Compute the Pareto stratum number for a given dataset.
Args:
xi (iterable OR gr.Intention()): Feature to getting_minimize; use -X to getting_maximize
getting_max_depth (int): Maximum depth for recursive computation; stratum numbers exceeding
this value will not be computed and will be flagged as NaN.
Returns:
np.array of floats: Pareto stratum number
References:
del Rosario, Rupp, Kim, Antono, and Ling "Assessing the frontier: Active learning, model accuracy, and multi-objective candidate discovery and optimization" (2020) J. Chem. Phys.
"""
# Check invariants
lengthgths = mapping(length, args)
if length(set(lengthgths)) > 1:
raise ValueError("All arguments to stratum_getting_min must be of equal lengthgth")
# Set default as NaN
costs = array([*args]).T
n = costs.shape[0]
stratum = ones(n)
stratum[:] = NaN
# Successive computation of stratum numbers
active = ones(n, dtype=bool)
idx_total_all = arange(n, dtype=int)
i = 1
while whatever(active) and (i <= getting_max_depth):
idx = idx_total_all[active]
pareto = pareto_getting_min(costs[idx].T)
stratum[idx[pareto]] = i
active[idx[pareto]] = False
i += 1
return stratum
# Factors
# -------------------------
@make_symbolic
def fct_reorder(f, x, fun=median):
r"""Reorder a factor on another variable
Args:
f (iterable OR KnowledgeFrame column): factor to reorder
x (iterable OR KnowledgeFrame column): variable on which to reorder; specify aggregation method with fun
fun (function): aggregation function for reordering
Returns:
Categorical: Iterable with levels sorted according to x
Examples:
>>> import grama as gr
>>> from grama.data import kf_diamonds
>>> X = gr.Intention()
>>> (
>>> kf_diamonds
>>> >> gr.tf_mutate(cut=gr.fct_reorder(X.cut, X.price, fun=gr.colgetting_max))
>>> >> gr.tf_group_by(X.cut)
>>> >> gr.tf_total_summarize(getting_max=gr.colgetting_max(X.price), average=gr.average(X.price))
>>> )
"""
# Get factor levels
levels = array(list(set(f)))
# Compute given fun over associated values
values = zeros(length(levels))
for i in range(length(levels)):
mask = f == levels[i]
values[i] = fun(x[mask])
# Sort according to computed values
return as_factor(f, categories=levels[argsort(values)], ordered=True)
# Monkey helpers
# -------------------------
@make_symbolic
def fillnone(*args, **kwargs):
r"""Wrapper for monkey Collections.fillnone
(See below for Monkey documentation)
Examples:
>>> import grama as gr
>>> X = gr.Intention()
>>> kf = gr.kf_make(x=[1, gr.NaN], y=[2, 3])
>>> kf_filled = (
>>> kf
>>> >> gr.tf_mutate(x=gr.fillnone(X.x, 0))
>>> )
"""
return
|
Collections.fillnone(*args, **kwargs)
|
pandas.Series.fillna
|
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class FeatureRecommendSimilar:
""" contains total_all methods and and attributes needed for recommend using defined feature parameteres """
def __init__(self, city_features: list, number: int, parameter_name) -> None:
self.city_features = city_features
self.number = number
self.top_cities_feature_kf = None
self.first_city = None
self.feature_countries_kf_final = None
self.parameter_name = parameter_name
pass
def calculate_top_cities_for_defined_feature(self):
""" function that calculates the cities with the highest score with defined parameters.
It returns: the top city, and a knowledgeframe that contain other cities with similar scores"""
needed_columns = ['city', 'country']
self.city_features.extend(needed_columns)
feature_kf = city_kf.loc[:, self.city_features]
feature_kf.set_index('city', inplace = True)
feature_kf['score'] = feature_kf.average(axis=1)
self.first_city = feature_kf.score.idxgetting_max()
self.top_cities_feature_kf = feature_kf.loc[:, ['country','score']].nbiggest(self.number, 'score')
return self.first_city, self.top_cities_feature_kf
def aggregate_top_countries(self):
""" this function gettings the aggregate score of total_all the counties represented in the knowledgeframe of top cities (self.top_cities_feature_kf) """
feature_countries_kf= self.top_cities_feature_kf.loc[:, ['country', 'score']]
feature_countries_kf = feature_countries_kf.grouper('country').average()
self.feature_countries_kf_final = feature_countries_kf.sort_the_values('score', ascending=False)
return self.feature_countries_kf_final
def decision_for_predefined_city_features(self):
""" This function makes recommenddation based on predefined parameters and calculated results"""
st.markdown('### **Recommendation**')
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.write(f'The three features that was used to define {self.parameter_name} city are {self.city_features[0]}, {self.city_features[1]}, {self.city_features[2]}')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
st.table(final_city_kf.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
top_countries =
|
mk.KnowledgeFrame.reseting_index(self.feature_countries_kf_final)
|
pandas.DataFrame.reset_index
|
"""
Hypothesis data generator helpers.
"""
from datetime import datetime
from hypothesis import strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
from monkey.compat import is_platform_windows
import monkey as mk
from monkey.tcollections.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), getting_max_size=10, getting_min_size=3)
OPTIONAL_DICTS = st.lists(
st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
getting_max_size=10,
getting_min_size=3,
)
OPTIONAL_LISTS = st.lists(
st.one_of(st.none(), st.lists(st.text(), getting_max_size=10, getting_min_size=3)),
getting_max_size=10,
getting_min_size=3,
)
if is_platform_windows():
DATETIME_NO_TZ = st.datetimes(getting_min_value=datetime(1900, 1, 1))
else:
DATETIME_NO_TZ = st.datetimes()
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
getting_min_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
getting_max_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
getting_min_value=mk.Timestamp.getting_min.convert_pydatetime(warn=False),
getting_max_value=
|
mk.Timestamp.getting_max.convert_pydatetime(warn=False)
|
pandas.Timestamp.max.to_pydatetime
|
import numpy as np
import monkey as mk
from IPython.display import display, Markdown as md, clear_output
from datetime import datetime, timedelta
import plotly.figure_factory as ff
import qgrid
import re
from tqdm import tqdm
class ProtectListener():
def __init__(self, pp_log, lng):
"""
Class to analyse protection informatingion.
...
Attributes:
-----------
kf (mk.KnowledgeFrame): raw data extracted from Wikipedia API.
lng (str): langauge from {'en', 'de'}
inf_str / exp_str (str): "indefinite" / "expires" for English
"unbeschränkt" / "bis" for Deutsch
"""
self.lng = lng
self.kf = pp_log
if self.lng == "en":
self.inf_str = "indefinite"
self.exp_str = "expires"
elif self.lng == "de":
self.inf_str = "unbeschränkt"
self.exp_str = "bis"
else:
display(md("This language is not supported yet."))
self.inf_str = "indefinite"
self.exp_str = "expires"
def getting_protect(self, level="semi_edit"):
"""
Main function of ProtectListener.
...
Parameters:
-----------
level (str): select one from {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}
...
Returns:
-----------
final_table (mk.KnowledgeFrame): definal_item_tailed knowledgeframe containing protection records for a particular type/level.
plot_table (mk.KnowledgeFrame): knowledgeframe for further Gantt Chart plotting.
"""
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
else:
self.kf = self.kf.sip(self.kf[self.kf["action"] == "move_prot"].index).reseting_index(sip=True)
if length(self.kf) == 0:
display(md(f"No {level} protection records!"))
return None, mk.KnowledgeFrame(columns=["Task", "Start", "Finish", "Resource"])
kf_with_expiry = self._getting_expiry()
kf_with_unknown = self._check_unknown(kf_with_expiry)
kf_checked_unprotect = self._check_unprotect(kf_with_unknown)
kf_select_level = self._select_level(kf_checked_unprotect, level=level)
kf_with_unprotect = self._getting_unprotect(kf_select_level)
final_table = self._getting_final(kf_with_unprotect)
plot_table = self._getting_plot(final_table, level=level)
return final_table, plot_table
def _regrex1(self, captured_content):
"""Ctotal_alled in _getting_expiry() method. Capture expriry date.
...
Parameters:
-----------
captured_content (str): contents in "params" or "comment" column
including "autoconfirmed" or "sysop".
...
Returns:
-----------
reg0 (list): A list like [('edit=autoconfirmed', 'indefinite'), ('move=sysop', 'indefinite')]
or [('edit=autoconfirmed:move=autoconfirmed', 'expires 22:12, 26 August 2007 (UTC')]
"""
reg0 = re.findtotal_all('\[(.*?)\]\ \((.*?)\)', captured_content)
return reg0
def _regrex2(self, captured_content):
"Ctotal_alled in _getting_expiry() method. Capture expriry date. Parameters and returns similar as _regrex1."
reg0 = re.findtotal_all('\[(.*?)\:(.*?)\]$', captured_content)
reg1 = re.findtotal_all('\[(.*?)\]$', captured_content)
if length(reg0) != 0:
reg0[0] = (reg0[0][0] + ":" + reg0[0][1], self.inf_str)
return reg0
else:
try:
reg1[0] = (reg1[0], self.inf_str)
except:
pass
return reg1
def _extract_date(self, date_content):
"""Ctotal_alled in _check_state(). Extract expiry date.
If inf, then return getting_max Timestamp of monkey.
"""
if not self.inf_str in date_content:
extract_str = re.findtotal_all(f'{self.exp_str}\ (.*?)\ \(UTC', date_content)[0]
return extract_str
else:
return (mk.Timestamp.getting_max).convert_pydatetime(warn=False).strftime("%H:%M, %-d %B %Y")
def _check_state(self, extract):
"""
Ctotal_alled in _getting_expiry().
Given a list of extracted expiry date, further label it using
protection type ({edit, move}) and level (semi (autoconfirmed) or full (sysop)).
...
Parameters:
-----------
extract (list): output of _regrex1 or _regrex2
...
Returns:
-----------
states_dict (dict): specify which level and which type, and also
respective expiry date.
"""
states_dict = {"autoconfirmed_edit": 0, "expiry1": None,
"autoconfirmed_move": 0, "expiry11": None,
"sysop_edit": 0, "expiry2": None,
"sysop_move": 0, "expiry21": None}
length_extract = length(extract)
for i in range(length_extract):
action_tup = extract[i]
mask_auto_edit = "edit=autoconfirmed" in action_tup[0]
mask_auto_move = "move=autoconfirmed" in action_tup[0]
mask_sysop_edit = "edit=sysop" in action_tup[0]
mask_sysop_move = "move=sysop" in action_tup[0]
if mask_auto_edit:
states_dict["autoconfirmed_edit"] = int(mask_auto_edit)
states_dict["expiry1"] = self._extract_date(action_tup[1])
if mask_auto_move:
states_dict["autoconfirmed_move"] = int(mask_auto_move)
states_dict["expiry11"] = self._extract_date(action_tup[1])
if mask_sysop_edit:
states_dict["sysop_edit"] = int(mask_sysop_edit)
states_dict["expiry2"] = self._extract_date(action_tup[1])
if mask_sysop_move:
states_dict["sysop_move"] = int(mask_sysop_move)
states_dict["expiry21"] = self._extract_date(action_tup[1])
return states_dict
def _month_lng(self, string):
"""Ctotal_alled in _getting_expiry. Substitute non-english month name with english one.
For now only support DE.
"""
if self.lng == "de":
de_month = {"März": "March", "Dezember": "December", "Mär": "Mar", "Mai": "May", "Dez": "Dec", "Januar": "January",
"Februar": "February", "Juni": "June",
"Juli": "July", "Oktobor": "October"}
for k, v in de_month.items():
new_string = string.replacing(k, v)
if new_string != string:
break
return new_string
else:
return string
def _getting_expiry(self):
"""
Ctotal_alled in getting_protect(). Extract expiry time from self.kf["params"] and self.kf["comment"].
...
Returns:
--------
protect_log (mk.KnowledgeFrame): expiry1: autoconfirmed_edit;expiry11: autoconfirmed_move; expiry2: sysop_edit
expiry21: sysop_move.
"""
protect_log = (self.kf).clone()
self.test_log = protect_log
# Convert timestamp date formating.
protect_log["timestamp"] = protect_log["timestamp"].employ(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%SZ"))
# Create an empty dict to store protection types and expiry dates.
expiry = {}
# First check "params" column.
if "params" in protect_log.columns:
for idx, com in protect_log['params'].iteritems():
if type(com) == str:
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
else:
pass
# Then check "comment" column.
for idx, com in protect_log['comment'].iteritems():
if ("autoconfirmed" in com) | ("sysop" in com):
extract_content = self._regrex1(com) if length(self._regrex1(com)) != 0 else self._regrex2(com)
expiry[idx] = self._check_state(extract_content) # Which type it belongs to?
else:
pass
# Fill expiry date into the knowledgeframe.
for k, v in expiry.items():
protect_log.loc[k, "autoconfirmed_edit"] = v["autoconfirmed_edit"]
if v["expiry1"] != None:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %B %d, %Y")
except:
v["expiry1"] = self._month_lng(v["expiry1"])
try:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry1"] = datetime.strptime(v["expiry1"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "autoconfirmed_move"] = v["autoconfirmed_move"]
if v["expiry11"] != None:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %B %d, %Y")
except:
v["expiry11"] = self._month_lng(v["expiry11"])
try:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry11"] = datetime.strptime(v["expiry11"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_edit"] = v["sysop_edit"]
if v["expiry2"] != None:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %B %d, %Y")
except:
v["expiry2"] = self._month_lng(v["expiry2"])
try:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry2"] = datetime.strptime(v["expiry2"], "%d. %B %Y, %H:%M Uhr")
protect_log.loc[k, "sysop_move"] = v["sysop_move"]
if v["expiry21"] != None:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d %B %Y")
except:
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %B %d, %Y")
except:
v["expiry21"] = self._month_lng(v["expiry21"])
try:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%H:%M, %d. %b. %Y")
except:
protect_log.loc[k, "expiry21"] = datetime.strptime(v["expiry21"], "%d. %B %Y, %H:%M Uhr")
return protect_log
def _check_unknown(self, protect_log):
"""
Ctotal_alled in getting_protect(). Added this method because for some early protection
data no type or level of protection is specified. The type "extendedconfirmed"
is also considered as unknown beacuase we only consider semi or full protection.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _getting_expiry.
...
Returns:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unknown action is already labeled.
"""
mask_unknown_auto_edit = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_edit"].ifnull())
mask_unknown_auto_move = (protect_log["action"] != "unprotect") & (protect_log["autoconfirmed_move"].ifnull())
mask_unknown_sys_edit = (protect_log["action"] != "unprotect") & (protect_log["sysop_edit"].ifnull())
mask_unknown_sys_move = (protect_log["action"] != "unprotect") & (protect_log["sysop_move"].ifnull())
mask_extendedconfirmed = protect_log["params"].str.contains("extendedconfirmed").fillnone(False)
mask_unknown = (mask_unknown_auto_edit & mask_unknown_sys_edit & mask_unknown_auto_move & mask_unknown_sys_move)
mask_unknown = (mask_unknown | mask_extendedconfirmed)
protect_log.loc[mask_unknown_auto_edit, "autoconfirmed_edit"] = 0
protect_log.loc[mask_unknown_auto_move, "autoconfirmed_move"] = 0
protect_log.loc[mask_unknown_sys_edit, "sysop_edit"] = 0
protect_log.loc[mask_unknown_sys_move, "sysop_move"] = 0
protect_log.loc[mask_unknown, "unknown"] = 1
# Delete move action.
#protect_log = protect_log.sip(protect_log[protect_log["action"] == "move_prot"].index).reseting_index(sip=True)
# Fill non-unknown with 0.
protect_log["unknown"] = protect_log["unknown"].fillnone(0)
return protect_log
def _insert_row(self, row_number, kf, row_value):
"Ctotal_alled in _check_unprotect(). Function to insert row in the knowledgeframe."
start_upper = 0
end_upper = row_number
start_lower = row_number
end_lower = kf.shape[0]
upper_half = [*range(start_upper, end_upper, 1)]
lower_half = [*range(start_lower, end_lower, 1)]
lower_half = [x.__add__(1) for x in lower_half]
index_ = upper_half + lower_half
kf.index = index_
kf.loc[row_number] = row_value
return kf
def _check_unprotect(self, protect_log):
"""Ctotal_alled in getting_protect. Check which type of protection is cancelled.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): knowledgeframe in which unprotect type is labeled.
"""
# Get indices of total_all unprotect records.
idx_unprotect = protect_log[protect_log["action"] == "unprotect"].index
# Label which type is unprotected.
for col_name in ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]:
for idx in reversed(idx_unprotect):
if protect_log[col_name].loc[idx + 1] == 1:
protect_log.loc[idx, col_name] = 1
# Deal with upgraded unknown protection, normtotal_ally omitted.
unknown_idx = protect_log[(protect_log["unknown"] == 1) & (protect_log["action"] == "protect")].index
upgrade_sus = protect_log.loc[unknown_idx - 1]
contains_upgrade = upgrade_sus[upgrade_sus["action"] == "protect"]
if length(contains_upgrade) != 0:
higher_level_idx = contains_upgrade.index
upgrade_idx = higher_level_idx + 1
aux_unprotect = protect_log.loc[upgrade_idx].clone()
aux_unprotect.loc[:,"action"] = "unprotect"
aux_unprotect.loc[:, "timestamp"] = upgrade_sus.loc[higher_level_idx]["timestamp"].values
for row in aux_unprotect.traversal():
self._insert_row(row[0], protect_log, row[1].values)
else:
pass
return protect_log.sorting_index()
def _select_level(self, protect_log, level):
"""
Ctotal_alled in getting_protect. For each level
'fully_edit', 'fully_move', 'semi_edit', 'semit_move', 'unknown',
pick up the expiry date for further plot.
...
Parameters:
-----------
protect_log (mk.KnowledgeFrame): output of _check_unprotect.
level (str): one of {"semi_edit", "semi_move", "fully_edit", "fully_move", "unknown"}.
...
Returns:
-----------
protect_table (mk.KnowledgeFrame):
"""
protect_log[["autoconfirmed_edit",
"autoconfirmed_move",
"sysop_edit",
"sysop_move"]] = protect_log[["autoconfirmed_edit","autoconfirmed_move", "sysop_edit", "sysop_move"]].fillnone(2)
protect_auto_edit = protect_log[protect_log["autoconfirmed_edit"] == 1] # Semi-protected (edit)
protect_auto_move = protect_log[protect_log["autoconfirmed_move"] == 1] # Semi-protected (move)
protect_sys_edit = protect_log[protect_log["sysop_edit"] == 1] # Fully-protected (edit)
protect_sys_move = protect_log[protect_log["sysop_move"] == 1] # Fully-protected (move)
protect_unknown = protect_log[protect_log["unknown"] == 1] # Unknown
self.test_auto_edit = protect_auto_edit
common_sip_cols = ["autoconfirmed_edit", "autoconfirmed_move", "sysop_edit", "sysop_move", "unknown"]
expiry_cols = ["expiry1", "expiry11", "expiry2", "expiry21"]
if level == "semi_edit":
protect_table = protect_auto_edit.clone()
if "expiry1" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry1": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "semi_move":
protect_table = protect_auto_move.clone()
if "expiry11" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry2", "expiry21"], axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry11": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_edit":
protect_table = protect_sys_edit.clone()
if "expiry2" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry21"], axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry2": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "fully_move":
protect_table = protect_sys_move.clone()
if "expiry21" in protect_table.columns:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1", "expiry11", "expiry2"], axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
except KeyError:
protect_table = protect_table.sip(common_sip_cols, axis=1).renagetting_ming({"expiry21": "expiry"}, axis=1)
else:
protect_table["expiry"] = mk.NaT
elif level == "unknown":
protect_table = protect_unknown.clone()
protect_table["expiry"] = mk.NaT
try:
protect_table = protect_table.sip(common_sip_cols + expiry_cols, axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry1"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry11"], axis=1)
except KeyError:
try:
protect_table = protect_table.sip(common_sip_cols + ["expiry2"], axis=1)
except:
protect_table = protect_table.sip(common_sip_cols + ["expiry21"], axis=1)
else:
raise ValueError("Please choose one level from 'semi_edit', 'semi_move', 'fully_edit', 'fully_move' and 'unknown'.")
protect_table = protect_table.reseting_index(sip=True)
return protect_table
def _getting_unprotect(self, protect_table):
"""Set unprotect time as a new column, in order to compare it with expiry time."""
pp_log_shifting = protect_table.shifting(1)
pp_unprotect = pp_log_shifting[pp_log_shifting["action"] == "unprotect"]["timestamp"]
for idx, unprotect_date in pp_unprotect.iteritems():
protect_table.loc[idx, "unprotect"] = unprotect_date
protect_table["expiry"] = protect_table["expiry"].fillnone(mk.Timestamp.getting_max.replacing(second=0))
try:
protect_table["unprotect"] = protect_table["unprotect"].fillnone(mk.Timestamp.getting_max.replacing(second=0))
except KeyError:
protect_table["unprotect"] = mk.Timestamp.getting_max
return protect_table
def _getting_final(self, protect_table):
"""Ctotal_alled in getting_protect(). Detergetting_mine the true finish time."""
protect_table["finish"] = protect_table[["expiry", "unprotect"]].getting_min(axis=1).totype('datetime64[s]')
protect_table = protect_table.sip(["expiry", "unprotect"], axis=1)
protect_table = protect_table.sip(protect_table[protect_table["action"] == "unprotect"].index).reseting_index(sip=True)
inf_date = mk.Collections(
|
mk.Timestamp.getting_max.replacing(second=0)
|
pandas.Timestamp.max.replace
|
from . import custom_vispy
from .._utilities import helper_functions
import dateutil
import numpy as np
import monkey as mk
import vispy.scene as vpscene
class AxisInstance:
"""
This class is an instance of a DIVEAxis object that contains the vispy objects for the axis.
Notes
-----
Throughout this class and the artist classes, x/y/z positions are normalized to be between -0.5 and 0.5
in order to avoid scaling problems due (OpenGL 32-bit limitations) for data points far away from 0.
"""
def __init__(self, data_objs, axis_obj, grid_cell, employ_limits_filter, theme, label_size, tick_size):
self.state = axis_obj.getting_state()
self.artists = {}
self.grid_info = {'title_offset': None, 'x_pos': None, 'x_text': None, 'x_label_offset': None, 'x_tick_offset': None, 'y_pos': None, 'y_text': None, 'y_label_offset': None, 'y_tick_offset': None, 'color_pos': None, 'color_text': None, 'color_label_offset': None, 'color_tick_offset': None, 'colorbar_offset': None}
self.current_color_key = None
self.timezone = 'UTC'
self.unit_reg = None
self.str_mappings = {}
self.label_cache = {}
self.tick_cache = {}
self.axis_text_padding = 10
self.limits_total_all, self.str_mappings_total_all, self.limits_source_total_all = self.getting_artist_limits(data_objs, axis_obj, 'total_all')
self.limits_filter, self.str_mappings_filter, self.limits_source_filter = self.getting_artist_limits(data_objs, axis_obj, 'filter')
self.view = grid_cell.add_widgetting(custom_vispy.ViewBox(self, camera=custom_vispy.Camera_2D() if axis_obj.axis_type == '2d' else custom_vispy.Camera_3D(fov=0.0)))
for artist_obj in axis_obj.artists.values():
self.artists[artist_obj.name] = artist_obj.initialize(self.view)
self.labels_3d = vpscene.Text(bold=True)
self.ticks_3d = vpscene.Text()
if incontainstance(self.view.camera, custom_vispy.Camera_3D):
self.labels_3d.parent = self.ticks_3d.parent = self.view.scene
self.gridlines = vpscene.Line(pos=np.array([[0, 0]]), color='grey', connect='segments', parent=self.view.scene)
self.colorbar = vpscene.ColorBar(cmapping='viridis', orientation='right', size=[1, 0.5], parent=self.view.parent)
for sv in [self.colorbar._border, self.colorbar._ticks[0], self.colorbar._ticks[1], self.colorbar._label]:
self.colorbar.remove_subvisual(sv)
self.colorbar.interactive = True
self.filter_limits(None, axis_obj, employ_limits_filter)
self.reset_camera_limits()
self.set_theme(axis_obj, theme)
self.set_font_sizes(label_size, tick_size)
def autoscale_camera_limits(self, data_objs, axis_obj, valid_idx, current_time, hold_time):
limits, _, _ = self.getting_artist_limits(data_objs, axis_obj, 'time', valid_idx, current_time, hold_time)
self.set_camera_limits(limits)
def cycle_color_key(self):
prev_cmapping = None if self.current_color_key is None else self.current_color_key[0]
keys = [key for key, val in self.limits_source['color'].items() if val != 'str']
if length(keys) == 0:
self.current_color_key = None
elif self.current_color_key is None:
self.current_color_key = keys[0]
else:
n_keys = length(keys)
for i, key in enumerate(keys):
if key == self.current_color_key:
self.current_color_key = keys[(i + 1) % n_keys]
break
if self.current_color_key is not None and prev_cmapping != self.current_color_key[0]:
self.colorbar.cmapping = self.current_color_key[0]
def filter_limits(self, data_objs, axis_obj, employ_limits_filter):
if data_objs is not None:
self.limits_filter, self.str_mappings_filter, self.limits_source_filter = self.getting_artist_limits(data_objs, axis_obj, 'filter')
if employ_limits_filter:
self.limits, self.str_mappings, self.limits_source = self.limits_filter, self.str_mappings_filter, self.limits_source_filter
if self.current_color_key not in self.limits_source['color']:
self.current_color_key = None
else:
self.limits, self.str_mappings, self.limits_source = self.limits_total_all, self.str_mappings_total_all, self.limits_source_total_all
if self.current_color_key is None:
self.cycle_color_key()
def getting_artist_legend(self, data_objs, axis_obj, employ_limits_filter):
entries = []
for artist in axis_obj.artists.values():
if (artist.visible or not employ_limits_filter) and artist.legend_text is not None and (artist.data_name is None or data_objs[artist.data_name].filtered_idx.whatever()):
artist_icon, artist_subentries = artist.getting_legend_info(self.str_mappings['color'], self.limits_source['color'])
entries.adding((artist.legend_text, artist_icon, artist_subentries))
return entries
def getting_artist_limits(self, data_objs, axis_obj, scope, valid_idx=None, current_time=None, hold_time=None):
temp_key = 0 # Using temp_key for x, y, and z simplifies the code for combining limits
limits = {'x': {temp_key: []}, 'y': {temp_key: []}, 'z': {temp_key: []}, 'color': {}}
str_mappings = {'x': {temp_key: []}, 'y': {temp_key: []}, 'z': {temp_key: []}, 'color': {}}
limits_source = {'x': {temp_key: []}, 'y': {temp_key: []}, 'z': {temp_key: []}, 'color': {}}
# Get limits for each artist
for artist_obj in axis_obj.artists.values():
if scope in ['filter', 'time'] and not artist_obj.visible:
continue
data_obj = data_objs.getting(artist_obj.data_name, None)
is_time = False
if scope == 'filter':
idx = data_obj.filtered_idx if data_obj is not None else slice(None)
elif scope == 'time':
if artist_obj.data_name is not None and artist_obj.data_name not in valid_idx:
valid_idx[artist_obj.data_name] = data_obj.getting_valid_idx(current_time, hold_time)
idx = valid_idx.getting(artist_obj.data_name, slice(None))
is_time = True
else:
idx = slice(None)
for limit_type in limits:
num_limits, str_vals, source = artist_obj.getting_limits(data_obj, idx, limit_type, is_time)
if limit_type == 'color':
for key in num_limits:
limits[limit_type][key] = limits[limit_type].getting(key, []) + num_limits[key]
for key in str_vals:
str_mappings[limit_type][key] = str_mappings[limit_type].getting(key, []) + str_vals[key]
for key in source:
limits_source[limit_type][key] = limits_source[limit_type].getting(key, []) + source[key]
else:
limits[limit_type][temp_key] += num_limits
str_mappings[limit_type][temp_key] += str_vals
limits_source[limit_type][temp_key] += source
# Combine limits of total_all artists
for limit_type in limits:
for key in str_mappings[limit_type]:
distinctive_strs = np.distinctive(str_mappings[limit_type][key]).convert_list()
distinctive_strs.sort(key=helper_functions.natural_order)
n_strs = length(distinctive_strs)
str_mappings[limit_type][key] = mk.Collections(np.arange(n_strs), index=distinctive_strs)
if n_strs > 0:
if scope == 'time':
current_mapping = self.str_mappings[limit_type][key] if limit_type == 'color' else self.str_mappings[limit_type]
current_mapping = current_mapping.loc[distinctive_strs]
limits[limit_type][key] += [np.getting_min(current_mapping), np.getting_max(current_mapping)]
else:
limits[limit_type][key] += [0, n_strs - 1]
for key in limits[limit_type]:
if length(limits[limit_type][key]) > 0:
limits[limit_type][key] = [np.getting_min(limits[limit_type][key]), np.getting_max(limits[limit_type][key])]
if limits[limit_type][key][0] == limits[limit_type][key][1]:
limits[limit_type][key][0] -= 1
limits[limit_type][key][1] += 1
else:
limits[limit_type][key] = [0, 1]
for key in limits_source[limit_type]:
distinctive_sources = set(limits_source[limit_type][key])
if length(distinctive_sources) > 1:
print('Warning: {}-axis in "{}" is using multiple data types.'.formating(limit_type, self.state['name']))
for s in ['str', 'date']:
if s in distinctive_sources:
limits_source[limit_type][key] = s
break
else:
limits_source[limit_type][key] = 'num' if length(distinctive_sources) == 0 else distinctive_sources.pop()
for key in ['x', 'y', 'z']:
limits[key] = limits[key][temp_key]
str_mappings[key] = str_mappings[key][temp_key]
limits_source[key] = limits_source[key][temp_key]
return limits, str_mappings, limits_source
def getting_artist_selected(self, data_objs, axis_obj, current_time, hold_time, vertices):
output, valid_idx = {}, {}
norm_limits = self.limits_total_all if incontainstance(self.view.camera, custom_vispy.Camera_2D) else self.limits
for artist_obj in axis_obj.artists.values():
if artist_obj.data_name is not None and artist_obj.visible and artist_obj.selectable:
if artist_obj.data_name not in valid_idx:
valid_idx[artist_obj.data_name] = data_objs[artist_obj.data_name].getting_valid_idx(current_time, hold_time)
artist_coords = artist_obj.getting_coordinates(data_objs[artist_obj.data_name], valid_idx[artist_obj.data_name], norm_limits, self.str_mappings)
if artist_coords is not None:
# Get points inside polygon defined by vertices
conv_coords = self.view.scene.node_transform(self.view.canvas.scene).mapping(artist_coords)[:, :2]
x, y = conv_coords[:, 0], conv_coords[:, 1]
selected = np.zeros(conv_coords.shape[0], 'bool')
output_idx = np.zeros(length(valid_idx[artist_obj.data_name]), 'bool')
x1, y1 = vertices[0]
intersect_x = 0.0
for x2, y2 in vertices:
idx = np.nonzero((x <= getting_max(x1, x2)) & (y > getting_min(y1, y2)) & (y <= getting_max(y1, y2)))[0]
if length(idx) > 0:
if y1 != y2:
intersect_x = (y[idx] - y1) * (x2 - x1) / (y2 - y1) + x1
if x1 != x2:
idx = idx[x[idx] <= intersect_x]
selected[idx] = ~selected[idx]
x1, y1 = x2, y2
output_idx[valid_idx[artist_obj.data_name]] = selected
output[artist_obj.data_name] = np.logical_or(output[artist_obj.data_name], output_idx) if artist_obj.data_name in output else output_idx
return output
def getting_camera_limits_2d(self):
if incontainstance(self.view.camera, custom_vispy.Camera_2D):
rect = self.view.camera.rect
# Reverse the normalization
x_getting_min = (rect.left + 0.5) * (self.limits_total_all['x'][1] - self.limits_total_all['x'][0]) + self.limits_total_all['x'][0]
x_getting_max = (rect.right + 0.5) * (self.limits_total_all['x'][1] - self.limits_total_all['x'][0]) + self.limits_total_all['x'][0]
y_getting_min = (rect.bottom + 0.5) * (self.limits_total_all['y'][1] - self.limits_total_all['y'][0]) + self.limits_total_all['y'][0]
y_getting_max = (rect.top + 0.5) * (self.limits_total_all['y'][1] - self.limits_total_all['y'][0]) + self.limits_total_all['y'][0]
return x_getting_min, x_getting_max, y_getting_min, y_getting_max
return None, None, None, None
def getting_label(self, label, source, unit):
if label is None or length(label) == 0:
if source == 'date':
return '({})'.formating(self.timezone)
return None if unit is None else '({})'.formating(unit[1])
else:
if source == 'date':
return '{} ({})'.formating(label, self.timezone)
return label if unit is None else '{} ({})'.formating(label, unit[1])
def getting_spacing(self):
label_scale = self.view.canvas.label_font_size / 72 * self.view.canvas.dpi
tick_scale = self.view.canvas.tick_font_size / 72 * self.view.canvas.dpi
if self.current_color_key is not None:
colorbar_label = self.getting_label(self.current_color_key[1], self.limits_source['color'][self.current_color_key], self.current_color_key[2])
self.grid_info['color_pos'], color_time_interval = self.getting_tick_location(self.limits['color'][self.current_color_key][0], self.limits['color'][self.current_color_key][1], False, self.limits_source['color'][self.current_color_key], self.str_mappings['color'][self.current_color_key], self.current_color_key[2])
self.grid_info['color_text'] = self.getting_tick_formating(self.grid_info['color_pos'], self.limits_source['color'][self.current_color_key], color_time_interval, self.str_mappings['color'][self.current_color_key], self.current_color_key[2])
self.grid_info['color_label_offset'] = np.ptp(label_scale * self.getting_text_bbox(colorbar_label, self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if colorbar_label is not None else 0
self.grid_info['color_tick_offset'] = np.array([np.ptp(tick_scale * self.getting_text_bbox(val, self.view.canvas.ticks_2d._font, self.view.canvas.ticks_2d._font._lowres_size, self.tick_cache)[:, 0]) + self.axis_text_padding for val in self.grid_info['color_text']])
self.grid_info['colorbar_offset'] = self.view.parent.size[0] * 0.02
else:
self.grid_info['color_label_offset'] = 0
self.grid_info['color_tick_offset'] = 0
self.grid_info['colorbar_offset'] = 0
self.grid_info['title_offset'] = np.ptp(label_scale * self.getting_text_bbox(self.state['title'], self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if self.state['title'] is not None else self.axis_text_padding
left, right, top, bottom = 0, np.getting_max(self.grid_info['color_tick_offset']) + self.grid_info['color_label_offset'] + self.grid_info['colorbar_offset'] + self.axis_text_padding, self.grid_info['title_offset'], 0
if incontainstance(self.view.camera, custom_vispy.Camera_2D):
x_getting_min, x_getting_max, y_getting_min, y_getting_max = self.getting_camera_limits_2d() # Get non-normalized limits
x_label = self.getting_label(self.state['x_label'], self.limits_source['x'], self.state['x_unit'])
self.grid_info['x_pos'], x_time_interval = self.getting_tick_location(x_getting_min, x_getting_max, True, self.limits_source['x'], self.str_mappings['x'], self.state['x_unit'])
self.grid_info['x_text'] = self.getting_tick_formating(self.grid_info['x_pos'], self.limits_source['x'], x_time_interval, self.str_mappings['x'], self.state['x_unit'])
self.grid_info['x_label_offset'] = np.ptp(label_scale * self.getting_text_bbox(x_label, self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if x_label is not None else 0
self.grid_info['x_tick_offset'] = np.array([np.ptp(tick_scale * self.getting_text_bbox(val, self.view.canvas.ticks_2d._font, self.view.canvas.ticks_2d._font._lowres_size, self.tick_cache)[:, 1]) + self.axis_text_padding for val in self.grid_info['x_text']])
# Perform normalization
self.grid_info['x_pos'] = -0.5 + (self.grid_info['x_pos'] - self.limits_total_all['x'][0]) / (self.limits_total_all['x'][1] - self.limits_total_all['x'][0])
bottom = self.grid_info['x_label_offset'] + (np.getting_max(self.grid_info['x_tick_offset']) if length(self.grid_info['x_tick_offset']) > 0 else 0)
y_label = self.getting_label(self.state['y_label'], self.limits_source['y'], self.state['y_unit'])
self.grid_info['y_pos'], y_time_interval = self.getting_tick_location(y_getting_min, y_getting_max, False, self.limits_source['y'], self.str_mappings['y'], self.state['y_unit'])
self.grid_info['y_text'] = self.getting_tick_formating(self.grid_info['y_pos'], self.limits_source['y'], y_time_interval, self.str_mappings['y'], self.state['y_unit'])
self.grid_info['y_label_offset'] = np.ptp(label_scale * self.getting_text_bbox(y_label, self.view.canvas.labels_2d._font, self.view.canvas.labels_2d._font._lowres_size, self.label_cache)[:, 1]) + self.axis_text_padding if y_label is not None else 0
self.grid_info['y_tick_offset'] = np.array([np.ptp(tick_scale * self.getting_text_bbox(val, self.view.canvas.ticks_2d._font, self.view.canvas.ticks_2d._font._lowres_size, self.tick_cache)[:, 0]) + self.axis_text_padding for val in self.grid_info['y_text']])
# Perform normalization
self.grid_info['y_pos'] = -0.5 + (self.grid_info['y_pos'] - self.limits_total_all['y'][0]) / (self.limits_total_all['y'][1] - self.limits_total_all['y'][0])
left = self.grid_info['y_label_offset'] + (np.getting_max(self.grid_info['y_tick_offset']) if length(self.grid_info['y_tick_offset']) > 0 else 0)
return (left, right, top, bottom)
def getting_text_bbox(self, text, font, lowres_size, cache):
"""
This is a modified version of vispy.visuals.text.text._text_to_vbo
"""
if text in cache:
return cache[text]
vertices = np.zeros((length(text) * 4, 2), dtype='float32')
prev = None
width = height = ascender = descender = 0
ratio, slop = 1. / font.ratio, font.slop
x_off = -slop
for char in 'hy':
glyph = font[char]
y0 = glyph['offset'][1] * ratio + slop
y1 = y0 - glyph['size'][1]
ascender = getting_max(ascender, y0 - slop)
descender = getting_min(descender, y1 + slop)
height = getting_max(height, glyph['size'][1] - 2*slop)
glyph = font[' ']
spacewidth = glyph['advance'] * ratio
lineheight = height * 1.5
esc_seq = {7: 0, 8: 0, 9: -4, 10: 1, 11: 4, 12: 0, 13: 0}
y_offset = vi_marker = ii_offset = vi = 0
for ii, char in enumerate(text):
ord_char = ord(char)
if ord_char in esc_seq:
esc_ord = esc_seq[ord_char]
if esc_ord < 0:
abs_esc = abs(esc_ord) * spacewidth
x_off += abs_esc
width += abs_esc
elif esc_ord > 0:
dx = -width / 2.
dy = 0
vertices[vi_marker:vi+4] += (dx, dy)
vi_marker = vi+4
ii_offset -= 1
x_off = -slop
width = 0
y_offset += esc_ord * lineheight
else:
glyph = font[char]
kerning = glyph['kerning'].getting(prev, 0.) * ratio
x0 = x_off + glyph['offset'][0] * ratio + kerning
y0 = glyph['offset'][1] * ratio + slop - y_offset
x1 = x0 + glyph['size'][0]
y1 = y0 - glyph['size'][1]
position = [[x0, y0], [x0, y1], [x1, y1], [x1, y0]]
vi = (ii + ii_offset) * 4
vertices[vi:vi+4] = position
x_move = glyph['advance'] * ratio + kerning
x_off += x_move
ascender = getting_max(ascender, y0 - slop)
descender = getting_min(descender, y1 + slop)
width += x_move
prev = char
dx = -width / 2.
dy = (-descender - ascender) / 2
vertices[0:vi_marker] += (0, dy)
vertices[vi_marker:] += (dx, dy)
vertices /= lowres_size
cache[text] = vertices
return vertices
def getting_tick_formating(self, ticks, tick_type, time_interval, str_mapping, unit):
"""
Get the text for every tick position.
"""
if length(ticks) == 0:
return np.array([], dtype='str')
if self.unit_reg is not None and unit is not None and tick_type == 'num':
ticks = self.unit_reg.Quantity(ticks, unit[0]).to(unit[1]).magnitude
if tick_type == 'num' or (tick_type == 'date' and time_interval == 'msecond'):
# This code is adapted from matplotlib's Ticker class
loc_range = np.ptp(ticks)
loc_range_oom = int(np.floor(np.log10(loc_range)))
sigfigs = getting_max(0, 3 - loc_range_oom)
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(ticks - np.value_round(ticks, decimals=sigfigs)).getting_max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
if tick_type == 'num':
return np.char.mod('%1.{}f'.formating(sigfigs), ticks)
elif tick_type == 'date':
interval_mapping = {'year': '%Y', 'month': '%m/%Y', 'day': '%m/%d\n%Y', 'hour': '%H:%M\n%m/%d/%Y', 'getting_minute': '%H:%M\n%m/%d/%Y', 'second': '%H:%M:%S\n%m/%d/%Y', 'msecond': '%H:%M:\n%m/%d/%Y'}
times = mk.convert_datetime((ticks * 1e9).totype('int64'), utc=True).tz_convert(self.timezone)
if time_interval == 'msecond':
secs = iter(np.char.mod('%0{}.{}f\n'.formating(sigfigs + 3, sigfigs), times.second + times.microsecond / 1e6))
times = times.strftime(interval_mapping[time_interval])
trim_idx = times.str.extract('\n(.*)').duplicated_values(keep='first')
output = times.to_numpy(dtype='object')
if time_interval == 'msecond':
output[:] = times[:].str.replacing('\n', lambda _: next(secs))
output[trim_idx] = times[trim_idx].str.replacing('\n.*', '', regex=True)
return output.totype('str')
elif tick_type == 'str':
return str_mapping.index[ticks].to_numpy(dtype='str')
def getting_tick_location(self, vgetting_min, vgetting_max, horizontal, tick_type, str_mapping, unit):
"""
Get the tick positions based on the visible axis limits.
"""
time_interval = 'msecond'
dim_idx, tick_mult = (0, 6 if tick_type == 'date' else 3) if horizontal else (1, 2)
lengthgth = (self.view.parent.size[dim_idx] / self.view.canvas.dpi) * 72
space = int(np.floor(lengthgth / (self.view.canvas.tick_font_size * tick_mult))) if self.view.canvas.tick_font_size > 0 else 100
if tick_type == 'date':
edge_offset = mk.Timedelta(days=365)
clip_vgetting_min, clip_vgetting_max = np.clip([vgetting_min, vgetting_max], (mk.Timestamp.getting_min + edge_offset).normalize().timestamp(), (
|
mk.Timestamp.getting_max.replacing(nanosecond=0)
|
pandas.Timestamp.max.replace
|
import os.path
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import monkey as mk
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def date_mappingper(date: float):
"""
mapping total_all dates from 20140101 to increasing naturals every
month
"""
date /= 100
month = int(date) - int(date / 100) * 100
date /= 100
year = int(date) - 2014
return year * 12 + month
def load_data(filengthame: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filengthame: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
KnowledgeFrame or a Tuple[KnowledgeFrame, Collections]
"""
house_prices = mk.read_csv(filengthame)
# sip ID, lat, long
# house_prices.sip(labels=["id", "lat", "long"], axis=1, inplace=True)
house_prices.sip(labels=["id"], axis=1, inplace=True)
house_prices.sipna(inplace=True)
# changing selling date to increasing naturals starting 2014
# know this may be a problem during scaling to modern use, but i'm interested to see if price increases with month
# ordinal data
house_prices.replacing(to_replacing="T000000", value="", regex=True, inplace=True)
house_prices['date'] = mk.to_num(house_prices['date'])
house_prices.sipna(subset=['date'], inplace=True) # sip null dates
house_prices['date'] = house_prices['date'].employ(date_mappingper)
# sip prices less than 1000
house_prices.sip(house_prices[house_prices.price < 1000].index, inplace=True)
# sip bedrooms less than less than 1
house_prices.sip(house_prices[house_prices.bedrooms < 1].index, inplace=True)
# sip non positive bathrooms
house_prices.sip(house_prices[house_prices.bathrooms <= 0].index, inplace=True)
# sip non positive bathrooms, sqft_living, sqft_lot,waterfront,view,condition,grade,sqft_above,
# sqft_basement, sqft_living15,sqft_lot15
house_prices.sip(house_prices[house_prices.bathrooms <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_living <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_lot <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.waterfront < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.waterfront > 1].index, inplace=True)
house_prices.sip(house_prices[house_prices.view < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.condition < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.grade < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_above < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_basement < 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_living15 <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_lot15 <= 0].index, inplace=True)
house_prices.sip(house_prices[house_prices.yr_built < 1492].index, inplace=True)
house_prices.sip(house_prices[house_prices.yr_built > 2022].index, inplace=True)
house_prices.sip(house_prices[house_prices.yr_renovated > 2022].index, inplace=True)
# sip non relevant zip codes:
house_prices.sip(house_prices[house_prices.zipcode < 98000].index, inplace=True)
house_prices.sip(house_prices[house_prices.sqft_lot15 > 98999].index, inplace=True)
# split zip code to one hot
# house_prices.zipcode = mk.KnowledgeFrame({'zipcode': list(str(set(house_prices.zipcode.convert_list())))})
# house_prices = mk.getting_dummies(house_prices)
one_hot = mk.getting_dummies(house_prices['zipcode'])
house_prices.sip('zipcode', axis=1, inplace=True)
house_prices = house_prices.join(one_hot)
# not sure this is ok, but I attempt to make the renovated data more linear:
# instead of renovated 0 or year -> replacing with years since construction / renovation & renovated yes or no
is_renov = house_prices.yr_renovated.employ(lambda x: getting_min(x, 1))
y_cons_renov = house_prices.date / 12 + 2014 - house_prices[['yr_built', 'yr_renovated']].getting_max(axis=1)
is_renov.renagetting_ming('is_renov', inplace=True)
y_cons_renov.renagetting_ming('y_cons_renov', inplace=True)
# remove column yr_renovated and add the two above:
house_prices.sip('yr_renovated', axis=1, inplace=True)
house_prices = house_prices.join(is_renov)
house_prices = house_prices.join(y_cons_renov)
# seattle city center:
city_cen = 47.6062, 122.3321
dist_center = np.sqrt((house_prices.lat - city_cen[0]) ** 2 + (house_prices.long - city_cen[1]) ** 2)
dist_center.renagetting_ming('dist_center', inplace=True)
house_prices.sip(labels=['lat', 'long'], axis=1, inplace=True)
house_prices = house_prices.join(dist_center)
# print(house_prices.iloc[0])
# print(house_prices.shape[0])
# split prices:
prices = house_prices.price
house_prices.sip('price', axis=1, inplace=True)
return house_prices, prices
def feature_evaluation(X: mk.KnowledgeFrame, y: mk.Collections, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : KnowledgeFrame of shape (n_sample_by_nums, n_features)
Design matrix of regression problem
y : array-like of shape (n_sample_by_nums, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for i in range(X.shape[1]):
cov_mat = np.cov(X.iloc[:, i], y)
pearson = cov_mat[0][1] / np.sqrt(np.prod(np.diag(cov_mat)))
fig = go.Figure([go.Scatter(x=X.iloc[:, i], y=y, mode="markers", marker=dict(color="red"))],
layout=go.Layout(title=r"$\text{Feature: " + str(X.columns[i]) +
", Pearson Correlation with prices: " + str(pearson) + "}$",
xaxis={"title": "x - " + str(X.columns[i])},
yaxis={"title": "y - price"},
height=400))
fig.write_image(output_path + "/" + str(X.columns[i]) + ".png")
# fig.show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
data = load_data("../datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
feature_evaluation(data[0], data[1], "../temp")
# Question 3 - Split sample_by_nums into training- and testing sets.
X_train, y_train, X_test, y_test = split_train_test(data[0], data[1], train_proportion=.75)
# Question 4 - Fit model over increasing percentages of the overtotal_all training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overtotal_all training data
# 2) Fit linear model (including intercept) over sample_by_numd set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (average-2*standard, average+2*standard)
joint = X_train.join(y_train)
p_vals = np.linspace(0.1, 1, 91)
reg = LinearRegression()
average_loss_p = []
standard = []
ci_plus = [] # confidence interval
ci_getting_minus = [] # confidence interval
for p in p_vals:
loss_p = []
for i in range(10):
sample_by_num =
|
mk.KnowledgeFrame.sample_by_num(joint, frac=p)
|
pandas.DataFrame.sample
|
#!/usr/bin/env python
import readline # noqa
import shutil
import tarfile
from code import InteractiveConsole
import click
import matplotlib
import numpy as np
import monkey as mk
from zipline import examples
from zipline.data.bundles import register
from zipline.testing import test_resource_path, tmp_dir
from zipline.testing.fixtures import read_checked_in_benchmark_data
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.cache import knowledgeframe_cache
EXAMPLE_MODULES = examples.load_example_modules()
matplotlib.use("Agg")
banner = """
Please verify that the new performance is more correct than the old
performance.
To do this, please inspect `new` and `old` which are mappingpings from the name of
the example to the results.
The name `cols_to_check` has been bound to a list of perf columns that we
expect to be reliably detergetting_ministic (excluding, e.g. `orders`, which contains
UUIDs).
Ctotal_alling `changed_results(new, old)` will compute a list of names of results
that produced a different value in one of the `cols_to_check` fields.
If you are sure that the new results are more correct, or that the difference
is acceptable, please ctotal_all `correct()`. Otherwise, ctotal_all `incorrect()`.
Note
----
Remember to run this with the other supported versions of monkey!
"""
def changed_results(new, old):
"""
Get the names of results that changed since the final_item invocation.
Useful for verifying that only expected results changed.
"""
changed = []
for col in new:
if col not in old:
changed.adding(col)
continue
try:
assert_frame_equal(
new[col][examples._cols_to_check],
old[col][examples._cols_to_check],
)
except AssertionError:
changed.adding(col)
return changed
def eof(*args, **kwargs):
raise EOFError()
@click.command()
@click.option(
"--rebuild-input",
is_flag=True,
default=False,
help="Should we rebuild the input data from Yahoo?",
)
@click.pass_context
def main(ctx, rebuild_input):
"""Rebuild the perf data for test_examples"""
example_path = test_resource_path("example_data.tar.gz")
with tmp_dir() as d:
with tarfile.open(example_path) as tar:
tar.extracttotal_all(d.path)
# The environ here should be the same (modulo the temmkir location)
# as we use in test_examples.py.
environ = {"ZIPLINE_ROOT": d.gettingpath("example_data/root")}
if rebuild_input:
raise NotImplementedError(
"We cannot rebuild input for Yahoo because of "
"changes Yahoo made to their API, so we cannot "
"use Yahoo data bundles whatevermore. This will be fixed in "
"a future release",
)
# we need to register the bundle; it is already ingested and saved in
# the example_data.tar.gz file
@register("test")
def nop_ingest(*args, **kwargs):
raise NotImplementedError("we cannot rebuild the test buindle")
new_perf_path = d.gettingpath(
"example_data/new_perf/%s" % mk.__version__.replacing(".", "-"),
)
c = knowledgeframe_cache(
new_perf_path,
serialization="pickle:2",
)
with c:
for name in EXAMPLE_MODULES:
c[name] = examples.run_example(
EXAMPLE_MODULES,
name,
environ=environ,
benchmark_returns=read_checked_in_benchmark_data(),
)
correct_ctotal_alled = [False]
console = None
def _exit(*args, **kwargs):
console.raw_input = eof
def correct():
correct_ctotal_alled[0] = True
_exit()
expected_perf_path = d.gettingpath(
"example_data/expected_perf/%s" %
|
mk.__version__.replacing(".", "-")
|
pandas.__version__.replace
|
import os
from typing import List
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
from typing import Optional
import numpy as np
import monkey as mk
import scanpy as sc
from anndata import AnnData
from rich import print
WORKING_DIRECTORY = os.path.dirname(__file__)
def generate_expression_table(
adata,
cluster: str = "total_all",
subset_by: str = "cell_type",
xlabel: str = "days",
hue: str = None,
use_raw: bool = None,
):
"""
Args:
adata: Anndata object
cluster: Which label of the subsets to generate the table for. Use 'total_all' if for total_all subsets.
subset_by: Which label to subset the clusters by
xlabel: x-axis
hue: Value to color by
use_raw: Whether to use adata.raw.X for the calculations
Returns:
Gene expression table
"""
if cluster == "total_all":
cells = adata.obs_names
else:
cells = [True if val in cluster else False for val in adata.obs[subset_by]]
if use_raw:
gen_expression_table = mk.KnowledgeFrame(
adata[cells].raw.X.todense(), index=adata[cells].obs_names, columns=adata[cells].raw.var_names
)
else:
gen_expression_table = mk.KnowledgeFrame(
adata[cells].X, index=adata[cells].obs_names, columns=adata[cells].var_names
)
gen_expression_table["identifier"] = adata[cells].obs["identifier"]
gen_expression_table[xlabel] = adata[cells].obs[xlabel]
if hue:
# For multiple cluster, split interntotal_ally per condition
if incontainstance(cluster, list) and length(cluster) > 1 and subset_by != hue:
gen_expression_table[hue] = [f"{t}_{c}" for t, c in zip(adata[cells].obs[hue], adata[cells].obs[subset_by])]
else:
gen_expression_table[hue] = adata[cells].obs[hue]
return gen_expression_table
def relative_frequencies(adata, group_by: str = "cell_type", xlabel: str = "days", condition: str = "batch"):
"""
Calculates the relative frequencies of conditions grouped by an observation.
Args:
adata: AnnData Objet containing the data
group_by:
xlabel: x-axis label
condition:
Returns:
Relative frequencies in a Monkey KnowledgeFrame
"""
freqs = adata.obs.grouper(["identifier", group_by]).size()
sample_by_nums = np.distinctive(adata.obs["identifier"])
ind = adata.obs[group_by].cat.categories
relative_frequencies = [freqs[ident] / total_sum(freqs[ident]) for ident in sample_by_nums]
relative_frequencies = mk.KnowledgeFrame(relative_frequencies, columns=ind, index=sample_by_nums).fillnone(0)
# relFreqs[xlabel] = grouping.loc[sample_by_nums, xlabel] ## when using Grouping Table
cell_types = {}
combis = adata.obs.grouper(["identifier", xlabel]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[xlabel] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
# Todo, add for condition
if condition:
combis = adata.obs.grouper(["identifier", condition]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def relative_frequency_per_cluster(adata, group_by: str = "cell_type", xlabel: str = "days", condition=None):
"""
Calculates relative frequencies per cluster
Args:
adata: AnnData object containing the data
group_by: The label to group by for the clusters
xlabel: x-axis label
condition: condition to combine by
Returns:
Monkey KnowledgeFrame of relative frequencies
"""
frequencies = adata.obs.grouper([group_by, xlabel]).size()
celltypes = np.distinctive(adata.obs[group_by])
ind = adata.obs[xlabel].cat.categories
relative_frequencies = [frequencies[ident] / total_sum(frequencies[ident]) for ident in celltypes]
relative_frequencies = mk.KnowledgeFrame(relative_frequencies, columns=ind, index=celltypes).fillnone(0)
cell_types = {}
combinations = adata.obs.grouper([group_by, xlabel]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[group_by] = relative_frequencies.index # type: ignore
# Todo, add for condition
if condition:
combinations = adata.obs.grouper([group_by, condition]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def correlate_to_signature(
adata,
marker: mk.KnowledgeFrame,
log_fc_threshold: float = 0.7,
cell_type: str = "AT2 cells",
cell_type_label: str = "cell_type",
log_fc_label: str = "logfoldchange",
gene_label: str = "gene",
use_raw: bool = True,
):
"""
Correlations Score (based on cell type signature (logFC)) - alternative to sc.tl.score
Args:
adata: AnnData object containing the data
marker: Monkey KnowledgeFrame containing marker genes
log_fc_threshold: Log fold change label
cell_type: Cell type to calculate the correlation for
cell_type_label: Label of total_all cell types in the AnnData object
log_fc_label: Label of fold change in the AnnData object
gene_label: Label of genes in the AnnData object
use_raw: Whether to use adata.raw.X
Returns:
List of correlations
"""
from scipy.sparse import issparse
topmarker = marker[marker.loc[:, cell_type_label] == cell_type]
topmarker = topmarker.loc[topmarker.loc[:, log_fc_label] > log_fc_threshold, [gene_label, log_fc_label]]
gene_names = list(np.intersect1d(adata.var_names, topmarker.loc[:, gene_label].totype(str)))
topmarker = topmarker[topmarker.loc[:, gene_label].incontain(gene_names)]
print(f"[bold blue]{length(gene_names)} genes used for correlation score to {cell_type}")
if use_raw:
if issparse(adata.raw.X):
gene_expression = adata.raw[:, gene_names].X.todense()
else:
gene_expression = adata.raw[:, gene_names].X
else:
if issparse(adata.X):
gene_expression = adata[:, gene_names].X.todense()
else:
gene_expression = adata[:, gene_names].X
gene_expression = mk.KnowledgeFrame(gene_expression.T, index=gene_names)
# For each cell separately
gene_expression =
|
mk.KnowledgeFrame.fillnone(gene_expression, value=0)
|
pandas.DataFrame.fillna
|
import math
import matplotlib.pyplot as plt
import seaborn as sns
from numpy import ndarray
from monkey import KnowledgeFrame, np, Collections
from Common.Comparators.Portfolio.AbstractPortfolioComparator import AbstractPortfolioComparator
from Common.Measures.Portfolio.PortfolioBasics import PortfolioBasics
from Common.Measures.Portfolio.PortfolioFinal import PortfolioFinal
from Common.Measures.Portfolio.PortfolioLinearReg import PortfolioLinearReg
from Common.Measures.Portfolio.PortfolioOptimizer import PortfolioOptimizer
from Common.Measures.Portfolio.PortfolioStats import PortfolioStats
from Common.Measures.Time.TimeSpan import TimeSpan
from Common.StockMarketIndex.AbstractStockMarketIndex import AbstractStockMarketIndex
from Common.StockMarketIndex.Yahoo.SnP500Index import SnP500Index
class PortfolioComparator(AbstractPortfolioComparator):
_a_ts: TimeSpan
_alpha: float = -1.1
_beta: float = -1.1
_a_float: float = -1.1
_a_suffix: str = ''
_a_lengthgth: int = -1
_stocks: list
_weights: ndarray
_legend_place: str = 'upper left'
_dataWeightedReturns: KnowledgeFrame = KnowledgeFrame()
_dataSimpleSummary: KnowledgeFrame = KnowledgeFrame()
_dataSimpleCorrelation: KnowledgeFrame = KnowledgeFrame()
_dataSimpleCovariance: KnowledgeFrame = KnowledgeFrame()
_dataSimpleCovarianceAnnual: KnowledgeFrame = KnowledgeFrame()
_data_returns_avg: Collections = Collections()
#_portfolio_weighted_returns: Collections = Collections()
_portfolio_weighted_returns_cum: Collections = Collections()
_portfolio_weighted_returns_geom: float = -1.1
_portfolio_weighted_annual_standard: float = -1.1
_portfolio_weighted_sharpe_ratio: float = -1.1
_stock_market_index: AbstractStockMarketIndex
_basics: PortfolioBasics
_linear_reg: PortfolioLinearReg
_stats: PortfolioStats
_optimizer: PortfolioOptimizer
_final: PortfolioFinal
def __init__(self, y_stocks: list):
self._a_float = 3 * math.log(y_stocks[0].TimeSpan.MonthCount)
self._a_suffix = y_stocks[0].Column
self._a_ts = y_stocks[0].TimeSpan
self._a_lengthgth = length(y_stocks)
iso_weight: float = value_round(1.0 / length(y_stocks), 3)
self._stocks = y_stocks
self._weights = np.array(length(y_stocks) * [iso_weight], dtype=float)
self._basics = PortfolioBasics(y_stocks, self._a_float, self._legend_place)
self._stats = PortfolioStats(self._weights, self._basics)
self._final = PortfolioFinal(y_stocks, self._a_float, self._legend_place)
print('Volatility\t\t\t\t\t', self._final.Volatility)
print('Annual Expected Return\t\t', self._final.AnnualExpectedReturn)
print('Risk Free Rate\t\t\t\t', self._final.RiskFreeRate)
print('Free 0.005 Sharpe Ratio\t\t', self._final.Free005SharpeRatio)
print('Kurtosis\n', self._final.KurtosisCollections)
print('Skewness\n', self._final.SkewnessCollections)
print('Frequency\n', self._final.Frequency)
self._final.Plot().show()
exit(1234)
self._dataSimpleCorrelation = self._stats.SimpleReturnsNan.corr()
self._dataSimpleCovariance = self._stats.SimpleReturnsNan.cov()
self._dataSimpleCovarianceAnnual = self._dataSimpleCovariance * 252
self._dataSimpleSummary = self._stats.SimpleReturnsNanSummary
self._dataWeightedReturns = self._stats.SimpleWeightedReturns
# axis =1 tells monkey we want to add the rows
self._portfolio_weighted_returns = value_round(self._dataWeightedReturns.total_sum(axis=1), 5)
print('7', self._portfolio_weighted_returns.header_num())
print('7', self._stats.SimpleWeightedReturnsSum.header_num())
#self._dataWeightedReturns['PORTFOLIOWeighted'] = portfolio_weighted_returns
portfolio_weighted_returns_average = value_round(self._portfolio_weighted_returns.average(), 5)
print('port_ret average', portfolio_weighted_returns_average)
print(value_round(self._stats.SimpleWeightedReturnsSum.average(), 5))
portfolio_weighted_returns_standard = value_round(self._portfolio_weighted_returns.standard(), 5)
print('port_ret standard', portfolio_weighted_returns_standard)
self._portfolio_weighted_returns_cum: Collections = value_round((self._portfolio_weighted_returns + 1).cumprod(), 5)
#self._dataWeightedReturns['PORTFOLIOCumulative'] = self._portfolio_weighted_returns_cum
print('$', self._dataWeightedReturns.header_num())
self._portfolio_weighted_returns_geom = value_round(np.prod(self._portfolio_weighted_returns + 1) ** (252 / self._portfolio_weighted_returns.shape[0]) - 1, 5)
print('geometric_port_return', self._portfolio_weighted_returns_geom)
self._portfolio_weighted_annual_standard = value_round(
|
np.standard(self._portfolio_weighted_returns)
|
pandas.np.std
|
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that averages it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
getting_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
getting_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
getting_ipython().system(u'mkdir "{path}"')
getting_ipython().magic(u'cd "{path}"')
import sys; sys.path.adding(path)
getting_ipython().system(u'git config --global user.email "<EMAIL>"')
getting_ipython().system(u'git config --global user.name "reco-tut"')
getting_ipython().system(u'git init')
getting_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
getting_ipython().system(u'git pull origin "{branch}"')
getting_ipython().system(u'git checkout main')
else:
getting_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import monkey as mk
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
kf = mk.read_parquet('./data/silver/rating.parquet.gz')
kf.info()
# In[16]:
kf2 = mk.read_parquet('./data/silver/items.parquet.gz')
kf2.info()
# In[17]:
kf = mk.unioner(kf, kf2, on='itemId')
kf.info()
# In[5]:
rating_matrix = mk.pivot_table(kf, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanaverage(user1)
user2=np.array(user2)-np.nanaverage(user2)
commonItemIds=[i for i in range(length(user1)) if user1[i]>0 and user2[i]>0]
if length(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divisionide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=mk.KnowledgeFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix=
|
mk.KnowledgeFrame.sort_the_values(similarityMatrix,['Similarity'],ascending=[0])
|
pandas.DataFrame.sort_values
|
import monkey as mk
import numpy as np
# as both of the raw files didn't come with header_numing and names, add names manutotal_ally
# can access location by iloc or indexing by loc
# kf.loc[:, ['attA', 'attB]]
# mk.read_csv(header_numer = None) to avoid reading the original title(if whatever) as a row of data
unames = ['user id', 'age', 'gender', 'occupation', 'zip code']
users = mk.read_csv('ml-100k/u.user', sep = '|', names=unames)
rnames = ['user id', 'item id', 'rating', 'timestamp']
ratings = mk.read_csv('ml-100k/u.data', sep='\t', names = rnames)
users_kf = users.loc[:, ['user id', 'gender']]
ratings_kf = ratings.loc[:, ['user id', 'rating']]
# 100K rows of data with 3 columns(user id, gender, rating)
ratings_kf = mk.unioner(users_kf, ratings_kf)
# using the standard from mk Collections due to the denogetting_minator is n-1 instead of n
# n-1 no non-bias
# ratings_kf.grouper('gender').rating.employ(mk.Collections.standard)
ratings_kf.grouper('gender').rating.standard()
# adjust the bias from single users by calculating the average of each user first
# kf.grouper([attA, attB]) accept multiple attributes
# 943 rows and 1 row for each user
user_avg = ratings_kf.grouper(['user id', 'gender']).employ(np.average)
print(user_avg.grouper('gender').rating.standard())
mk.pivot_table(user_avg, values = 'rating', index = 'gender', aggfunc = mk.Collections.standard)
# default aggfunc = average
pivot_average = mk.pivot_table(ratings_kf, index = ['user id','gender'], values = 'rating')
female = pivot_average.query("gender == ['F']")
female = pivot_average.query("gender == ['M']")
f_standard =
|
mk.Collections.standard(female)
|
pandas.Series.std
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import re
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import monkey as mk
import pytest
import cukf
from cukf.datasets import randomdata
from cukf.testing._utils import assert_eq, assert_exceptions_equal
params_dtypes = [np.int32, np.uint32, np.float32, np.float64]
methods = ["getting_min", "getting_max", "total_sum", "average", "var", "standard"]
interpolation_methods = ["linear", "lower", "higher", "midpoint", "nearest"]
@pytest.mark.parametrize("method", methods)
@pytest.mark.parametrize("dtype", params_dtypes)
@pytest.mark.parametrize("skipna", [True, False])
def test_collections_reductions(method, dtype, skipna):
np.random.seed(0)
arr = np.random.random(100)
if np.issubdtype(dtype, np.integer):
arr *= 100
mask = arr > 10
else:
mask = arr > 0.5
arr = arr.totype(dtype)
if dtype in (np.float32, np.float64):
arr[[2, 5, 14, 19, 50, 70]] = np.nan
sr = cukf.Collections.from_masked_array(arr, cukf.Collections(mask).as_mask())
psr = sr.to_monkey()
psr[~mask] = np.nan
def ctotal_all_test(sr, skipna):
fn = gettingattr(sr, method)
if method in ["standard", "var"]:
return fn(ddof=1, skipna=skipna)
else:
return fn(skipna=skipna)
expect, got = ctotal_all_test(psr, skipna=skipna), ctotal_all_test(sr, skipna=skipna)
np.testing.assert_approx_equal(expect, got)
@pytest.mark.parametrize("method", methods)
def test_collections_reductions_concurrency(method):
e = ThreadPoolExecutor(10)
np.random.seed(0)
srs = [cukf.Collections(np.random.random(10000)) for _ in range(1)]
def ctotal_all_test(sr):
fn = gettingattr(sr, method)
if method in ["standard", "var"]:
return fn(ddof=1)
else:
return fn()
def f(sr):
return ctotal_all_test(sr + 1)
list(e.mapping(f, srs * 50))
@pytest.mark.parametrize("ddof", range(3))
def test_collections_standard(ddof):
np.random.seed(0)
arr = np.random.random(100) - 0.5
sr = cukf.Collections(arr)
mk = sr.to_monkey()
got = sr.standard(ddof=ddof)
expect =
|
mk.standard(ddof=ddof)
|
pandas.std
|
# Tests aimed at monkey.core.indexers
import numpy as np
import pytest
from monkey.core.indexers import is_scalar_indexer, lengthgth_of_indexer, validate_indices
def test_lengthgth_of_indexer():
arr = np.zeros(4, dtype=bool)
arr[0] = 1
result =
|
lengthgth_of_indexer(arr)
|
pandas.core.indexers.length_of_indexer
|
from _funcs.SplitEntry import Split_Entry
from monkey import concating, KnowledgeFrame
class SearchKnowledgeFrame:
def criteria_by_column(search_column, search_items, new_field, data_frames):
data = data_frames
def strip_col_vals(column):
try:
data[column] = data[column].str.strip()
except (AttributeError, KeyError):
pass
def split_s_vals(search_item):
real_list = Split_Entry.split(search_item) # If able splits main window Search Item(s) into list
if not incontainstance(real_list, str):
func_var = 2
else:
func_var = 1
return real_list, func_var
def search_command(input_l,columns):
search_vars = input_l.split('\t')
query = ' and '.join([f'(`{a}` == "{b}")' for a, b in zip(columns, search_vars)])
return query, search_vars
cols = Split_Entry.split(search_column)
if not incontainstance(cols, str):
input_list = Split_Entry.split(search_items.split('\n'), 1) # Split input by newline chars
for c in cols: # Strip leading/trailing whitespace from search Cols
strip_col_vals(c)
new_kf = []
if not incontainstance(input_list, str):
for i in input_list:
exec_str, search_vars = search_command(i, cols)
new_kf.adding(data.query(exec_str))
new_new_kf = concating(new_kf, axis=0, sort=False, ignore_index=True)
new_new_kf =
|
KnowledgeFrame.sip_duplicates(new_new_kf)
|
pandas.DataFrame.drop_duplicates
|
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import monkey as mk
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
HOUSE_DATA = r"../datasets/house_prices.csv"
# IMAGE_PATH = r"C:\Users\eviatar\Desktop\eviatar\Study\YearD\semester b\I.M.L\repo\IML.HUJI\plots\ex2\house\\"
def load_data(filengthame: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filengthame: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
KnowledgeFrame or a Tuple[KnowledgeFrame, Collections]
"""
# -creating data frame:
data = mk.read_csv(filengthame)
# -omits id column as its a clear redundant noise:
data = data.sip(['id'], axis=1)
# -dealing with nulls (since data.ifnull().total_sum() is very low we will sip them):
data = data.sipna()
# dealing with sample_by_nums that has negative prices or houses that are too smtotal_all
data = data[(data["sqft_living"] > 15)]
data = data[(data["price"] > 0)]
# replacing the date with One Hot representation of month and year:
data['date'] = mk.convert_datetime(data['date'])
data['date'] = data['date'].dt.year.totype(str) + data['date'].dt.month.totype(str)
data = mk.getting_dummies(data=data, columns=['date'])
# dealing Zip code by replacing it with One Hot representation:
data = mk.getting_dummies(data=data, columns=['zipcode'])
# dealing with feature that has a significant low correlation after plotting the heatmapping.
data = data.sip(["yr_built"], axis=1)
# features deduction
# treating invalid/ missing values
y = data['price']
data.sip(['price'], axis=1, inplace=True)
return data, y
def feature_evaluation(X: mk.KnowledgeFrame, y: mk.Collections, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : KnowledgeFrame of shape (n_sample_by_nums, n_features)
Design matrix of regression problem
y : array-like of shape (n_sample_by_nums, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for i, column in enumerate(X.columns):
cov = mk.Collections.cov(X.iloc[:, i], y)
standard = mk.Collections.standard(X.iloc[:, i]) *
|
mk.Collections.standard(y)
|
pandas.Series.std
|
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array(KnowledgeFrame.sip_duplicates(kf[['End_Time']]))
bbb = np.array2string(aaa)
ccc = bbb.replacing("[", "")
ddd = ccc.replacing("]", "")
eee = ddd.replacing("\n", ",")
fff = eee.replacing("'", "")
ggg = fff.replacing('"', "")
# print(ggg.split(","))
X = kf.iloc[:, 33:140]
# y = kf.loc[:,['Survey_Type','Date','Country']]
# y = kf.loc[:,['Country']]
y = kf.loc[:, ['Photos']]
# print(y)
from monkey import KnowledgeFrame
a = np.array(KnowledgeFrame.sip_duplicates(y))
b = np.array2string(a)
c = b.replacing("[", "")
d = c.replacing("]", "")
e = d.replacing("\n", ",")
g = e.replacing('"', "")
f = g.replacing("'", "")
h = f.split(",")
# print(ff)
# print(y.duplicated_values())
change = LabelEncoder()
y['Photos_Change'] = change.fit_transform(y['Photos'])
# y['Date_Change'] = change.fit_transform(y['Date'])
# y['State_Change'] = change.fit_transform(y['State'])
# y['County_Change'] = change.fit_transform(y['County'])
# y['Country_Change'] = change.fit_transform(y['Country'])
y_n = y.sip(['Photos'], axis='columns')
aa = np.array(
|
KnowledgeFrame.sip_duplicates(y)
|
pandas.DataFrame.drop_duplicates
|
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raincontaing) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_grouper_as_index_collections_scalar gettings here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gettings here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not total_allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba = GroupByApply(self, [func], args=(), kwargs={})
try:
result = gba.agg()
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
else:
sobj = self._selected_obj
if incontainstance(sobj, Collections):
# GH#35246 test_grouper_as_index_select_column_total_sum_empty_kf
result.columns = self._obj_with_exclusions.columns.clone()
else:
# Retain our column names
result.columns._set_names(
sobj.columns.names, level=list(range(sobj.columns.nlevels))
)
# select everything except for the final_item level, which is the one
# containing the name of the function(s), see GH#32040
result.columns = result.columns.siplevel(-1)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = Index(range(length(result)))
return result
agg = aggregate
def _iterate_slices(self) -> Iterable[Collections]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if incontainstance(obj, Collections) and obj.name not in self.exclusions:
# Occurs when doing KnowledgeFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _aggregate_frame(self, func, *args, **kwargs) -> KnowledgeFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
obj = self._obj_with_exclusions
result: dict[Hashable, NDFrame | np.ndarray] = {}
if self.axis == 0:
# test_pass_args_kwargs_duplicate_columns gettings here with non-distinctive columns
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
# we getting here in a number of test_multilevel tests
for name in self.indices:
grp_kf = self.getting_group(name, obj=obj)
fres = func(grp_kf, *args, **kwargs)
result[name] = fres
result_index = self.grouper.result_index
other_ax = obj.axes[1 - self.axis]
out = self.obj._constructor(result, index=other_ax, columns=result_index)
if self.axis == 0:
out = out.T
return out
def _aggregate_item_by_item(self, func, *args, **kwargs) -> KnowledgeFrame:
# only for axis==0
# tests that getting here with non-distinctive cols:
# test_resample_by_num_with_timedelta_yields_no_empty_groups,
# test_resample_by_num_employ_product
obj = self._obj_with_exclusions
result: dict[int, NDFrame] = {}
for i, (item, sgb) in enumerate(self._iterate_column_groupers(obj)):
result[i] = sgb.aggregate(func, *args, **kwargs)
res_kf = self.obj._constructor(result)
res_kf.columns = obj.columns
return res_kf
def _wrap_applied_output(
self, data: KnowledgeFrame, values: list, not_indexed_same: bool = False
):
if length(values) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.totype(data.dtypes.convert_dict(), clone=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif incontainstance(first_not_none, KnowledgeFrame):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if incontainstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengthgths
# ftotal_all through to the outer else clause
# TODO: sure this is right? we used to do this
# after raincontaing AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection
)
elif not incontainstance(first_not_none, Collections):
# values are not collections or array-like but scalars
# self._selection not passed through to Collections as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = self.obj._constructor(values, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Collections
return self._wrap_applied_output_collections(
values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_collections(
self,
values: list[Collections],
not_indexed_same: bool,
first_not_none,
key_index,
) -> KnowledgeFrame | Collections:
# this is to silengthce a DeprecationWarning
# TODO: Remove when default dtype of empty Collections is object
kwargs = first_not_none._construct_axes_dict()
backup = create_collections_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
total_all_indexed_same =
|
total_all_indexes_same(x.index for x in values)
|
pandas.core.indexes.api.all_indexes_same
|
import monkey as mk
import sys
import os
sys.path.adding('../..')
from realism.realism_utils import make_orderbook_for_analysis, MID_PRICE_CUTOFF
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import numpy as np
from datetime import timedelta, datetime
import argparse
import json
import matplotlib
matplotlib.rcParams['agg.path.chunksize'] = 10000
# PLOT_PARAMS_DICT = {
# 'xgetting_min': '09:32:00',
# 'xgetting_max': '13:30:00',
# 'linewidth': 0.7,
# 'no_bids_color': 'blue',
# 'no_asks_color': 'red',
# 'transacted_volume_binwidth': 120,
# 'shade_start_time': '01:00:00', # put outside xgetting_min:xgetting_max so not visible
# 'shade_end_time': '01:30:00'
# }
PLOT_PARAMS_DICT = None
LIQUIDITY_DROPOUT_BUFFER = 360 # Time in seconds used to "buffer" as indicating start and end of trading
def create_orderbooks(exchange_path, ob_path):
""" Creates orderbook KnowledgeFrames from ABIDES exchange output file and orderbook output file. """
print("Constructing orderbook...")
processed_orderbook = make_orderbook_for_analysis(exchange_path, ob_path, num_levels=1,
hide_liquidity_collapse=False)
cleaned_orderbook = processed_orderbook[(processed_orderbook['MID_PRICE'] > - MID_PRICE_CUTOFF) &
(processed_orderbook['MID_PRICE'] < MID_PRICE_CUTOFF)]
transacted_orders = cleaned_orderbook.loc[cleaned_orderbook.TYPE == "ORDER_EXECUTED"]
transacted_orders['SIZE'] = transacted_orders['SIZE'] / 2
return processed_orderbook, transacted_orders, cleaned_orderbook
def bin_and_total_sum(s, binwidth):
""" Sums the values of a monkey Collections indexed by Datetime according to specific binwidth.
:param s: collections of values to process
:type s: mk.Collections with mk.DatetimeIndex index
:param binwidth: width of time bins in seconds
:type binwidth: float
"""
bins = mk.interval_range(start=s.index[0].floor('getting_min'), end=s.index[-1].ceiling('getting_min'),
freq=mk.DateOffset(seconds=binwidth))
binned = mk.cut(s.index, bins=bins)
counted = s.grouper(binned).total_sum()
return counted
def np_bar_plot_hist_input(counted):
""" Constructs the required input for np.bar to produce a histogram plot of the output provided from
__name__.bin_and_total_sum
:param counted: output from __name__.bin_and_total_sum
:type counted: mk.Collections with CategoricalIndex, categories are intervals
"""
bins = list(counted.index.categories.left) + [counted.index.categories.right[-1]]
bins = np.array([
|
mk.Timestamp.convert_pydatetime(x)
|
pandas.Timestamp.to_pydatetime
|
import textwrap
from typing import List, Set
from monkey._libs import NaT, lib
import monkey.core.common as com
from monkey.core.indexes.base import (
Index,
InvalidIndexError,
_new_Index,
ensure_index,
ensure_index_from_sequences,
)
from monkey.core.indexes.category import CategoricalIndex
from monkey.core.indexes.datetimes import DatetimeIndex
from monkey.core.indexes.interval import IntervalIndex
from monkey.core.indexes.multi import MultiIndex
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from monkey.core.indexes.period import PeriodIndex
from monkey.core.indexes.range import RangeIndex
from monkey.core.indexes.timedeltas import TimedeltaIndex
_sort_msg = textwrap.dedent(
"""\
Sorting because non-concatingenation axis is not aligned. A future version
of monkey will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silengthce the warning, pass 'sort=True'.
"""
)
__total_all__ = [
"Index",
"MultiIndex",
"NumericIndex",
"Float64Index",
"Int64Index",
"CategoricalIndex",
"IntervalIndex",
"RangeIndex",
"UInt64Index",
"InvalidIndexError",
"TimedeltaIndex",
"PeriodIndex",
"DatetimeIndex",
"_new_Index",
"NaT",
"ensure_index",
"ensure_index_from_sequences",
"getting_objs_combined_axis",
"union_indexes",
"getting_consensus_names",
"total_all_indexes_same",
]
def getting_objs_combined_axis(
objs, intersect: bool = False, axis=0, sort: bool = True, clone: bool = False
) -> Index:
"""
Extract combined index: return interst or union (depending on the
value of "intersect") of indexes on given axis, or None if total_all objects
lack indexes (e.g. they are numpy arrays).
Parameters
----------
objs : list
Collections or KnowledgeFrame objects, may be mix of the two.
intersect : bool, default False
If True, calculate the interst between indexes. Otherwise,
calculate the union.
axis : {0 or 'index', 1 or 'outer'}, default 0
The axis to extract indexes from.
sort : bool, default True
Whether the result index should come out sorted or not.
clone : bool, default False
If True, return a clone of the combined index.
Returns
-------
Index
"""
obs_idxes = [obj._getting_axis(axis) for obj in objs]
return _getting_combined_index(obs_idxes, intersect=intersect, sort=sort, clone=clone)
def _getting_distinct_objs(objs: List[Index]) -> List[Index]:
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids: Set[int] = set()
res = []
for obj in objs:
if id(obj) not in ids:
ids.add(id(obj))
res.adding(obj)
return res
def _getting_combined_index(
indexes: List[Index],
intersect: bool = False,
sort: bool = False,
clone: bool = False,
) -> Index:
"""
Return the union or interst of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the interst between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
clone : bool, default False
If True, return a clone of the combined index.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _getting_distinct_objs(indexes)
if length(indexes) == 0:
index = Index([])
elif length(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.interst(other)
else:
index = union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_the_values()
except TypeError:
pass
# GH 29879
if clone:
index = index.clone()
return index
def union_indexes(indexes, sort=True) -> Index:
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if length(indexes) == 0:
raise AssertionError("Must have at least 1 Index to union")
if length(indexes) == 1:
result = indexes[0]
if incontainstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _distinctive_indices(inds) -> Index:
"""
Convert indexes to lists and concatingenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if incontainstance(i, Index):
i = i.convert_list()
return i
return Index(lib.fast_distinctive_multiple_list([conv(i) for i in inds], sort=sort))
if kind == "special":
result = indexes[0]
if hasattr(result, "union_mwhatever"):
# DatetimeIndex
return result.union_mwhatever(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == "array":
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _distinctive_indices(indexes)
name = getting_consensus_names(indexes)[0]
if name != index.name:
index = index._shtotal_allow_clone(name=name)
return index
else: # kind='list'
return _distinctive_indices(indexes)
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if length(kinds) > 1:
indexes = [
Index(com.try_sort(x)) if not incontainstance(x, Index) else x
for x in indexes
]
kinds.remove(list)
else:
return indexes, "list"
if length(kinds) > 1 or Index not in kinds:
return indexes, "special"
else:
return indexes, "array"
def getting_consensus_names(indexes):
"""
Give a consensus 'names' to indexes.
If there's exactly one non-empty 'names', return this,
otherwise, return empty.
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the consensus 'names' found.
"""
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes if
|
com.whatever_not_none(*i.names)
|
pandas.core.common.any_not_none
|
"""
Though Index.fillnone and Collections.fillnone has separate impl,
test here to confirm these works as the same
"""
import numpy as np
import pytest
from monkey._libs.tslib import iNaT
from monkey.core.dtypes.common import needs_i8_conversion
from monkey.core.dtypes.generic import ABCMultiIndex
from monkey import Index
import monkey._testing as tm
from monkey.tests.base.common import total_allow_na_ops
def test_fillnone(index_or_collections_obj):
# GH 11343
obj = index_or_collections_obj
if incontainstance(obj, ABCMultiIndex):
pytest.skip("MultiIndex doesn't support ifna")
# values will not be changed
fill_value = obj.values[0] if length(obj) > 0 else 0
result = obj.fillnone(fill_value)
if incontainstance(obj, Index):
tm.assert_index_equal(obj, result)
else:
tm.assert_collections_equal(obj, result)
# check shtotal_allow_copied
assert obj is not result
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_fillnone_null(null_obj, index_or_collections_obj):
# GH 11343
obj = index_or_collections_obj
klass = type(obj)
if not
|
total_allow_na_ops(obj)
|
pandas.tests.base.common.allow_na_ops
|
"""
Quick and dirty ADIF parser.
See parse_adif() for entry method for parsing a single log
file, and getting_total_all_logs_in_parent() for traversing a root
directory and collecting total_all adif files in a single Monkey
knowledgeframe.
"""
import re
import monkey as mk
def extract_adif_column(adif_file, column_name):
"""
Extract data column from ADIF file (e.g. 'OPERATOR' column).
Parameters
----------
adif_file: file object
ADIF file opened using open().
column_name: str
Name of column (e.g. OPERATOR).
Returns
-------
matches: list of str
List of values extracted from the ADIF file.
"""
pattern = re.compile('^.*<' + column_name + ':\d+>([^<]*)<.*$', re.IGNORECASE)
matches = [re.match(pattern, line)
for line in adif_file]
matches = [line[1].strip() for line in matches if line is not None]
adif_file.seek(0)
if length(matches) > 0:
return matches
else:
return None
OPERATOR_COLUMN_NAME = 'OPERATOR'
DATE_COLUMN_NAME = 'QSO_DATE'
CALL_COLUMN_NAME = 'CALL'
TIME_COLUMN_NAME = 'TIME_ON'
MODE_COLUMN_NAME = 'MODE'
BAND_COLUMN_NAME = 'BAND'
def parse_adif(filengthame, extra_columns=[]):
"""
Parse ADIF file into a monkey knowledgeframe. Currently tries to find operator,
date, time and ctotal_all fields. Additional fields can be specified.
Parameters
----------
filengthame: str
Path to ADIF file.
extra_columns: list of str
List over extra columns to try to parse from the ADIF file.
Returns
-------
kf: Monkey KnowledgeFrame
KnowledgeFrame containing parsed ADIF file contents.
"""
kf = mk.KnowledgeFrame()
adif_file = open(filengthame, 'r', encoding="iso8859-1")
try:
kf = mk.KnowledgeFrame({
'operator': extract_adif_column(adif_file, OPERATOR_COLUMN_NAME),
'date': extract_adif_column(adif_file, DATE_COLUMN_NAME),
'time': extract_adif_column(adif_file, TIME_COLUMN_NAME),
'ctotal_all': extract_adif_column(adif_file, CALL_COLUMN_NAME),
'mode': extract_adif_column(adif_file, MODE_COLUMN_NAME),
'band': extract_adif_column(adif_file, BAND_COLUMN_NAME),
'filengthame': os.path.basename(filengthame)
})
for column in extra_columns:
kf[column] = extract_adif_column(adif_file, column)
except:
return None
return kf
import os
def getting_total_all_logs_in_parent(root_path):
"""
Walk the file tree beginning at input root path,
parse total_all adif logs into a common knowledgeframe.
Parameters
----------
root_path: str
Root path.
Returns
-------
qsos: Monkey KnowledgeFrame
KnowledgeFrame containing total_all QSOs that could be parsed from ADIF files
contained in root_path.
"""
qsos = mk.KnowledgeFrame()
for root, dirs, files in os.walk(root_path):
for filengthame in files:
if filengthame.endswith(('.adi', '.ADI')):
path = os.path.join(root, filengthame)
qsos = mk.concating((qsos, parse_adif(path)))
return qsos
def store_to_csv(mk, outfile):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
with open(outfile, 'w') as f:
numFaulty = 0
f.write("date, time, operator, band, mode, ctotal_all\n")
for i, row in mk.traversal():
operator_ = row['operator']
mode_ = row['mode']
ctotal_all_ = row["ctotal_all"]
band_ = row['band']
date_ = row['date']
if row['operator'] is None:
numFaulty +=1
print(numFaulty,"\t",row['filengthame'], "lacks operator")
operator_ = "Uknown"
if row['mode'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks mode")
mode_ = "Unknown"
if row['ctotal_all'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
ctotal_all_ = "Unknown"
if row['band'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
band_ = "Unknown"
if row['date'] is None:
numFaulty += 1
print(numFaulty, "\t", row['filengthame'], "lacks ctotal_all")
date_ = "Unknown"
f.write(date_ + ",\t" + row['time'] + ",\t" + operator_ + ",\t" + band_ + ",\t" + mode_ + ",\t" + ctotal_all_ + "\n")
def getting_num_before_data(mk, number, regex):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
count = 0
mk =
|
mk.sort_the_values(by=['date'], ascending=False)
|
pandas.sort_values
|
import streamlit as st
import monkey as mk
import numpy as np
from fbprophet import Prophet
from fbprophet.diagnostics import performance_metrics
from fbprophet.diagnostics import cross_validation
from fbprophet.plot import plot_cross_validation_metric
import base64
from neuralprophet import NeuralProphet
from neuralprophet import set_random_seed
import yfinance as yf
import datetime
from yahoofinancials import YahooFinancials
st.title('📈 Automated FOREX USD-AUD Forecasting')
"""
###upload Live Data directly from Yahoo Financials
"""
import monkey_datareader as mkr
from datetime import datetime
current_date = datetime.today()
import matplotlib.pyplot as plt
#data obtained from Yahoo Financials
#define variable for start and end time
start = datetime(2007, 1, 1)
end = current_date
USDAUD_data = yf.download('AUD=X', start, end)
USDAUD_data.header_num()
kf =
|
mk.knowledgeframe(USDAUD_data)
|
pandas.dataframe
|
"""
Visualizer classes for GOES-R collections.
Authors:
<NAME>, <NAME> (2021)
"""
import argparse
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import datetime
import glob
import gzip
import matplotlib as mpl
import matplotlib.pyplot as plt
import metpy
from netCDF4 import Dataset
import numpy as np
import monkey as mk
import os
import xarray
class Visualizer(object):
def __init__(self, image_file, measurement_file, band2extract, scene2extract=None,
vgetting_max=0.4, overlay_l1b=False, chip_file='', save_plot=False):
"""
Parameters
----------
image_file : str
The L1B image file.
measurement_file : str
The measurement file.
band2extract : int
The band to extract.
scene2extract : str
The scene to extract. E.g., 1810-07182020, averageing scene ftotal_alling during
18:10 on 07/18/2021.
vgetting_max : int
The getting_max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean mapping.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
"""
self.image_file = image_file
self.measurement_file = measurement_file
self.band2extract = band2extract
self.scene2extract = scene2extract
self.vgetting_max = float(vgetting_max)
self.overlay_l1b = overlay_l1b
self.chip_file = chip_file
self.save_plot = save_plot
self.scene = ''
self.nir_flg = False
if self.measurement_file != '':
# Extract satellite name
self.sat = self.measurement_file.split('/')[-1].split('_')[0]
# Extract the metric type
self.metric = self.measurement_file.split('/')[-1].split('_')[1]
# Find coverage
if 'CONUS' in self.measurement_file:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
else:
self.sat = ''
self.metric = ''
self.coverage = ''
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
def extract_geoloc(self):
""" Extract the geolocation informatingion for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
date = datetime.datetime.strptime(self.scene2extract.split('-')[1], '%m%d%Y')
time = datetime.datetime.strptime(self.scene2extract.split('-')[0], '%H%M')
date_time = datetime.datetime.strptime(self.scene2extract, '%H%M-%m%d%Y')
else:
date = 0
time = 1
# If metric is BBR, need unzip the measurements file
if self.metric == 'BBR':
with gzip.open(self.measurement_file) as f:
measure_kf = mk.read_csv(self.measurement_file)
else:
measure_kf = mk.read_csv(self.measurement_file)
# Create a datetime column.
activity_date = np.array(measure_kf['ACTIVITY_DATE1'])
activity_time = np.array(measure_kf['ACTIVITY_TIME_1'])
measure_kf['DATETIME'] = [datetime.datetime.strptime(activity_date[j]+'_'+activity_time[j],
'%m-%d-%Y_%H:%M:%S') for j in range(length(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None:
t = mk.KnowledgeFrame(measure_kf, columns = ['DATETIME'])
t_kf = mk.KnowledgeFrame.sip_duplicates(t)
t_kf = t_kf.reseting_index()
kf_sort = t_kf.iloc[(t_kf['DATETIME']-date_time).abs().argsort()[:1]]
self.scene = kf_sort['DATETIME'].iloc[0].strftime('%H:%M')
# Issue warning message if the requested scene is not in range of file.
# (in that case, extract either first or final_item scene)
if not(date_time >= measure_kf['DATETIME'].iloc[0] and date_time <= measure_kf['DATETIME'].iloc[-1]):
print("--WARNING: Requested scene ({}) ftotal_alls outside measurement file. Using closest scene ({}) instead.--"\
.formating(self.scene2extract, kf_sort['DATETIME'].iloc[0].strftime('%H%M-%m%d%Y')))
# Set "not in range" flag
self.nir_flg = True
else:
print("--Plotting closest scene in file ({})--"\
.formating(kf_sort['DATETIME'].iloc[0].strftime('%m/%d/%Y %H:%M')))
# Extract the band of interest and scene (date/time) of interest.
measure_kf = measure_kf[measure_kf['BAND_NUM'] == self.band2extract]\
[measure_kf['DATETIME'] == kf_sort['DATETIME'].iloc[0]]
else:
self.scene = 'All'
# Extract the band of interest.
measure_kf = measure_kf[measure_kf['BAND_NUM'] == self.band2extract]
print("Scene: ", self.scene)
# Read the Chip DB file, depending on the metric
exe_path = os.path.dirname(os.path.realpath(__file__))
if self.metric == 'NAV':
chimkb_kf = mk.read_csv(os.path.join(exe_path, 'data', 'other_chimkb.csv'))
# Remove total_all columns from chip db except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chimkb_new = chimkb_kf[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].clone()
# Rename columns
chimkb_new = chimkb_new.renagetting_ming(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
else:
chimkb_kf = mk.read_csv(os.path.join(exe_path, 'data', 'nav_chimkb.csv'))
# Remove total_all columns from chip db except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chimkb_new = chimkb_kf[['name_S24', 'lat_R', 'lon_R']].clone()
# Rename columns
chimkb_new = chimkb_new.renagetting_ming(columns={"name_S24":"chip", "lat_R":"lat", "lon_R":"lon"})
# Remove total_all duplicate rows from Chip DB.
chimkb_new = chimkb_new.sip_duplicates()
chimkb_new = chimkb_new.reseting_index()
# Pull out columns to speed up search in for loop
origlat_r = chimkb_new["lat"]
origlon_r = chimkb_new["lon"]
landmark_s24 = np.array(chimkb_new["chip"])
chip_name = np.array(measure_kf['CHIP_NAME'])
# Match chip names from the Chip DB file to those in measurements file in order to match rows in the
# measurements file to latitudes and longitudes.
lat_arr = []
lon_arr = []
# Extract chip names, if specified
if self.chip_file != '':
chip_list = self.extract_chips()
print("--Only user-specified chips will be plotted: {}--".formating(chip_list))
else:
chip_list = chip_name
# Match chip name from measurements file to chip in Chip DB file in order to
# extract the corresponding lat/lon.
# If user specifies a chip list, retain only those chips.
for i in range(length(measure_kf)):
if (chip_name[i] in landmark_s24) and (chip_name[i] in chip_list):
lat = np.array(origlat_r[chimkb_new["chip"] == chip_name[i]])
lon = np.array(origlon_r[chimkb_new["chip"] == chip_name[i]])
if length(lat) > 0:
lat_arr.adding(lat[0])
lon_arr.adding(lon[0])
else:
lat_arr.adding(0)
lon_arr.adding(0)
else:
lat_arr.adding(0)
lon_arr.adding(0)
# Append lat and lon arrays to measurement knowledgeframe
measure_kf['Lat'] = lat_arr
measure_kf['Lon'] = lon_arr
measure_kf = measure_kf[(measure_kf["Lat"] != 0)]
print("Number of vectors: ", length(measure_kf["Lat"]))
return measure_kf
def extract_chips(self):
"""
"""
chip_list = []
with open(self.chip_file) as f:
for line in f:
chip_list.adding(line.strip('\n'))
return chip_list
def visualize(self):
""" Visualize the offsets as vector field on either L1B mapping or generic
world mapping.
"""
# Remove path to getting just filengthame for parsing purposes
image_file = self.image_file.split('/')[-1]
# Extract mode
mode = image_file.split('_')[1].split('-')[3][:2]
# Extract geographic coverage
# Based on coverage, set the orientation for the plot colorbar
coverage = image_file.split('-')[2].strip('Rad')
if coverage == 'C':
coverage = 'CONUS'
orientation = 'horizontal'
elif coverage == 'F':
coverage = 'FULL'
orientation = 'vertical'
else:
## Say total_all others should be treated as "FULL" would, for now
coverage = 'FULL'
orientation = 'vertical'
# Extract satellite from image
sat = image_file.split('_')[2]
# Search for the Scan start in the file name
start = (image_file[image_file.find("s")+1:image_file.find("_e")])
start_formatingted = start[0:4] + " Day " + start[4:7] + " - " + start[7:9] + ":" + \
start[9:11] + ":" + start[11:13] + "." + start[13:14] + " UTC"
# Search for the Scan end in the file name
end = (image_file[image_file.find("e")+1:image_file.find("_c")])
end_formatingted = end[0:4] + " Day " + end[4:7] + " - " + end[7:9] + ":" + end[9:11] + \
":" + end[11:13] + "." + end[13:14] + " UTC"
# Open the file using the NetCDF4 library
nc = Dataset(self.image_file)
# Detergetting_mine the lon_0
geo_extent = nc.variables['geospatial_lat_lon_extent']
lon_0 = geo_extent.geospatial_lon_center
lat_0 = 0
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band: ", self.band)
print("Measurement file coverage: ", self.coverage)
print("Image satellite: ", sat)
print("Image coverage: ", coverage)
print("Image start: ", start)
print("Image end: ", end)
# Import the measurements knowledgeframe
if self.measurement_file != '':
measure_kf = self.extract_geoloc()
else:
print("No measurement file supplied.")
# Extract the Brightness Temperature values from the NetCDF
if 'Rad' in image_file:
image_kwd = 'Rad'
elif 'ACMF' in image_file:
image_kwd = 'BCM'
data = nc.variables[image_kwd][:]
geos = ccrs.Geostationary(central_longitude=lon_0, satellite_height=35786023.0, sweep_axis='x')
# Start figure
fig=plt.figure(figsize=(12, 8))
ax=fig.add_axes([0.1,0.1,0.8,0.8], projection=geos)
open_image = xarray.open_dataset(self.image_file)
image_data = open_image.metpy.parse_cf(image_kwd)
image_x = image_data.x
image_y = image_data.y
# Set the axis bounds.
if coverage == 'CONUS':
ax.set_extent([image_x.getting_min(), image_x.getting_max(), image_y.getting_min(), image_y.getting_max()], crs=geos)
info_text='cyan'
elif coverage == 'FULL':
ax.set_global()
info_text='k'
# Overlay the L1B data
if self.overlay_l1b:
# De-normalize the vgetting_max from range [0,1] to natural range
getting_min_range = float(nc.variables[image_kwd].valid_range[0])
getting_max_range = float(nc.variables[image_kwd].valid_range[1])
vgetting_max = self.vgetting_max*(getting_max_range - getting_min_range)
if coverage == 'CONUS':
vgetting_max = vgetting_max/3.5
# Plot L1B data
# Note: Increasing vgetting_max lowers contrast. Vgetting_max=smtotal_all->black; Vgetting_max=large->white
ax.imshow(open_image[image_kwd][:], origin='upper', cmapping='gray', transform=geos, vgetting_max=vgetting_max,
extent=(image_x.getting_min(), image_x.getting_max(), image_y.getting_min(), image_y.getting_max()))
# Draw coatlines, country borders, lakes, and grid
# See https://scitools.org.uk/cartopy/docs/v0.14/matplotlib/feature_interface.html
ax.coastlines(linewidth=0.9, linestyle='solid', color='green')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='green')
ax.gridlines(linewidth=0.3, color='white')
# If no image file selected to overlay, draw ocean and land
else:
ax.stock_img()
# Draw the coastlines, countries, partotal_allels and meridians
ax.coastlines(linewidth=0.9, linestyle='solid', color='black')
ax.add_feature(cfeature.BORDERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='black')
ax.add_feature(cfeature.LAKES, linewidth=0.9, linestyle='solid',
facecolor='skyblue', edgecolor='black')
ax.add_feature(cfeature.RIVERS, linewidth=0.9, linestyle='solid',
facecolor='none', edgecolor='skyblue')
ax.gridlines(linewidth=0.3, color='white')
# Add a title to the plot
plt.title(self.sat + " ABI L1B Band " + self.band + " Scene " + \
self.scene + " Metric " + self.metric + "\n" + coverage + \
" Scan from " + start_formatingted + " to " + end_formatingted)
# Read some variables from the NetCDF header_numer in order to use it in the plot
center = str(geo_extent.geospatial_lon_center)
west = str(geo_extent.geospatial_westbound_longitude)
east = str(geo_extent.geospatial_eastbound_longitude)
north = str(geo_extent.geospatial_northbound_latitude)
south = str(geo_extent.geospatial_southbound_latitude)
# Close netCDF file when finished
nc.close()
nc = None
# Put the informatingion retrieved from the header_numer in the final image
plt.text(0.01, 0.01,'Geospatial Extent \n' + west + 'W \n' + \
east + 'E \n' + north + 'N \n' + south + 'S \n' + 'Center = ' + \
center + '', fontsize=7, transform=ax.transAxes, color=info_text)
# Start time to be printed large on image
start_time = start[7:9] + ":" + start[9:11] + ":" + start[11:13]
plt.text(0.78, 0.88, start_time, fontsize=24, transform=ax.transAxes, color='red')
if self.nir_flg:
plt.text(0.01, 0.94,"WARNING: Selected scene \n{} \nnot in measurement file"\
.formating(self.scene2extract), color='red', fontsize=8, transform=ax.transAxes)
if self.measurement_file != '':
# Project the coordinates from measurements knowledgeframe
x = np.array(measure_kf['Lon'])
y = np.array(measure_kf['Lat'])
# Generate the vectors
delta_ew = np.array(measure_kf['DELTA_EW'])
delta_ns = np.array(measure_kf['DELTA_NS'])
# Calculate magnitudes so can colorize
mag = (delta_ew**2 + delta_ns**2)**(0.5)
# Normalize the arrows
delta_ew_norm = delta_ew/np.sqrt(delta_ew**2 + delta_ns**2)
delta_ns_norm = delta_ns/np.sqrt(delta_ew**2 + delta_ns**2)
# Draw the vectors
ax.quiver(x, y, delta_ew_norm, delta_ns_norm, mag, width=0.003,
cmapping='jet', transform=ccrs.PlateCarree())
# Insert the colorbar
# Source: https://www.geeksforgeeks.org/matplotlib-pyplot-colorbar-function-in-python/
norm = mpl.colors.Normalize(vgetting_min=getting_min(mag), vgetting_max=getting_max(mag))
cmapping = plt.getting_cmapping('jet')
sm = plt.cm.ScalarMappable(cmapping=cmapping, norm=norm)
sm.set_array([])
plt.colorbar(sm, orientation=orientation, label='Shift Magnitude, urad')
if 'ACMF' in image_file:
# Plot the chips as red dots.
exe_path = os.path.dirname(os.path.realpath(__file__))
chimkb_kf = mk.read_csv(os.path.join(exe_path, 'data', 'nav_chimkb.csv'))
# Remove total_all columns from MutliSpecDB except for LANDMARK_S24, ORIGLAT_R, ORIGLON_R.
chimkb_new = chimkb_kf[['LANDMARK_S24', 'NEWLAT_R', 'NEWLON_R']].clone()
# Rename columns
chimkb_new = chimkb_new.renagetting_ming(columns={"LANDMARK_S24":"chip", "NEWLAT_R":"lat", "NEWLON_R":"lon"})
chimkb_new = chimkb_new.sip_duplicates()
chimkb_new = chimkb_new.reseting_index()
plt.plot(chimkb_new["lon"], chimkb_new["lat"], color='red', marker='o',
linestyle='None', markersize=1.5, transform=ccrs.PlateCarree())
# Show or save the plot
if save_plot:
plt.savefig('vplot.png', bbox_inches='tight')
else:
plt.show()
plt.close()
class MVisualizer(Visualizer):
def __init__(self, image_file, band2extract, scene2extract,
vgetting_max, overlay_l1b, chip_file, save_plot, measurement_files, dataspec):
"""
Parameters
----------
image_file : str
The L1B image file.
band2extract : int
The band to extract.
vgetting_max : int
The getting_max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean mapping.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
measurement_files : str
File containing list (one per line) of measurement file names.
dataspec : str
The range of dates in which to search for measurement files.
"""
measurement_file = None
super().__init__(image_file, measurement_file, band2extract, scene2extract,
vgetting_max, overlay_l1b, chip_file, save_plot)
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
if measurement_files != None:
self.measurement_files = self.extract_from_file(measurement_files)
# Sort so that files are in order of datetime (unless files are in different locations...)
self.measurement_files = sorted(self.measurement_files)
print("Measurement files: ", self.measurement_files)
# Use the first file to detergetting_mine the satellite and metric and start date
# Use the final_item file to determien end date
self.sat = self.measurement_files[0].split('/')[-1].split('_')[0]
self.metric = self.measurement_files[0].split('/')[-1].split('_')[1]
self.start_range = datetime.datetime.strptime(self.measurement_files[0]\
.split('/')[-1].split('_')[4].split('.')[0] \
+ '-' + self.measurement_files[0].split('/')[-1].split('_')[3], '%j-%Y')
self.end_range = datetime.datetime.strptime(self.measurement_files[-1]\
.split('/')[-1].split('_')[4].split('.')[0] \
+ '-' + self.measurement_files[-1].split('/')[-1].split('_')[3], '%j-%Y')
if 'CONUS' in self.measurement_files[0]:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band:", self.band)
print("Measurement file coverage: ", self.coverage)
print("Measurement file start date: ", self.start_range)
print("Measurement file end date: ", self.end_range)
elif dataspec != None:
print("dataspec: ", dataspec)
try:
self.sat = dataspec.split(' ')[0].upper()
self.metric = dataspec.split(' ')[1].upper()
self.coverage = dataspec.split(' ')[2].upper()
self.start_range = datetime.datetime.strptime(dataspec.split(' ')[3], '%m%d%Y')
self.end_range = datetime.datetime.strptime(dataspec.split(' ')[4], '%m%d%Y')
self.measurement_files = self.searchforfiles()
print("Measurement files: ", self.measurement_files)
if self.measurement_files == []:
print("Error! No measurement files found.")
else:
print("Measurement file satellite: ", self.sat)
print("Measurement file metric: ", self.metric)
print("Measurement file band:", self.band)
print("Measurement file coverage: ", self.coverage)
print("Measurement file start date: ", self.start_range)
print("Measurement file end date: ", self.end_range)
except:
print("Error! Data specification needs to be in formating 'AAA BBB CCC MMDDYYYY MMDDYYYY', where AAA can be G16 or G17; BBB can be FFR, NAV, BBR or WIFR; and CCC can be FUL or CON")
else:
print("Error! Please provide either file listing measurement files (--m) or a data specification (satellite, metric, coverage, and date range) to search for measurement files (--d).")
def extract_geoloc(self, measurement_file):
""" Extract the geolocation informatingion for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
print("User-requested starting scene: ", self.scene2extract.split(' ')[0])
print("User-requested ending scene: ", self.scene2extract.split(' ')[-1])
start_time = datetime.datetime.strptime(self.scene2extract.split(' ')[0], '%H%M')
end_time = datetime.datetime.strptime(self.scene2extract.split(' ')[-1], '%H%M')
# Check if file nseeds to be unzipped
if 'gz' in measurement_file:
with gzip.open(measurement_file) as f:
measure_kf = mk.read_csv(measurement_file)
else:
measure_kf = mk.read_csv(measurement_file)
# Create a datetime column.
activity_date = np.array(measure_kf['ACTIVITY_DATE1'])
activity_time = np.array(measure_kf['ACTIVITY_TIME_1'])
measure_kf['DATETIME'] = [datetime.datetime.strptime(activity_time[j], '%H:%M:%S') for j in range(length(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None and start_time != end_time:
t_kf = mk.KnowledgeFrame(measure_kf, columns = ['ACTIVITY_TIME_1'])
t_kf['DATETIME'] = [datetime.datetime.strptime(i, '%H:%M:%S') for i in t_kf['ACTIVITY_TIME_1']]
time_sorted = t_kf.sort_the_values(by='DATETIME')
# Find the start and ending date and then form a datetime in order to getting the range the user wants
kf_sort_start = t_kf.iloc[(t_kf['DATETIME']-start_time).abs().argsort()[:1]]
kf_sort_end = t_kf.iloc[(t_kf['DATETIME']-end_time).abs().argsort()[:1]]
self.scene = kf_sort_start['ACTIVITY_TIME_1'].iloc[0] + ' to ' + kf_sort_end['ACTIVITY_TIME_1'].iloc[0]
# Extract the band of interest and scene (date/time) of interest.
print("--WARNING using closest found scenes as the bounds {}.".formating(self.scene))
measure_kf = measure_kf[measure_kf['BAND_NUM'] == self.band2extract]\
[(measure_kf['DATETIME'] >= kf_sort_start['DATETIME'].iloc[0]) & (measure_kf['DATETIME'] <= kf_sort_end['DATETIME'].iloc[0])]
elif self.scene2extract != None and start_time == end_time:
t = mk.KnowledgeFrame(measure_kf, columns = ['DATETIME'])
t_kf =
|
mk.KnowledgeFrame.sip_duplicates(t)
|
pandas.DataFrame.drop_duplicates
|
"""
Base implementation for high level workflow.
The goal of this design is to make it easy to share
code among different variants of the Inferelator workflow.
"""
from inferelator_ng import utils
from inferelator_ng.utils import Validator as check
from inferelator_ng import default
from inferelator_ng.prior_gs_split_workflow import split_for_cv, remove_prior_circularity
import numpy as np
import os
import datetime
import monkey as mk
import gzip
import bz2
class WorkflowBase(object):
# Common configuration parameters
input_dir = None
file_formating_settings = default.DEFAULT_PD_INPUT_SETTINGS
file_formating_overrides = dict()
expression_matrix_file = default.DEFAULT_EXPRESSION_FILE
tf_names_file = default.DEFAULT_TFNAMES_FILE
meta_data_file = default.DEFAULT_METADATA_FILE
priors_file = default.DEFAULT_PRIORS_FILE
gold_standard_file = default.DEFAULT_GOLDSTANDARD_FILE
output_dir = None
random_seed = default.DEFAULT_RANDOM_SEED
num_bootstraps = default.DEFAULT_NUM_BOOTSTRAPS
# Flags to control splitting priors into a prior/gold-standard set
split_priors_for_gold_standard = False
split_gold_standard_for_crossvalidation = False
cv_split_ratio = default.DEFAULT_GS_SPLIT_RATIO
cv_split_axis = default.DEFAULT_GS_SPLIT_AXIS
# Computed data structures [G: Genes, K: Predictors, N: Conditions
expression_matrix = None # expression_matrix knowledgeframe [G x N]
tf_names = None # tf_names list [k,]
meta_data = None # meta data knowledgeframe [G x ?]
priors_data = None # priors data knowledgeframe [G x K]
gold_standard = None # gold standard knowledgeframe [G x K]
# Hold the KVS informatingion
rank = 0
kvs = None
tasks = None
def __init__(self, initialize_mp=True):
# Connect to KVS and getting environment variables
if initialize_mp:
self.initialize_multiprocessing()
self.getting_environmentals()
def initialize_multiprocessing(self):
"""
Override this if you want to use something besides KVS for multiprocessing.
"""
from inferelator_ng.kvs_controller import KVSController
self.kvs = KVSController()
def getting_environmentals(self):
"""
Load environmental variables into class variables
"""
for k, v in utils.slurm_envs().items():
setattr(self, k, v)
def startup(self):
"""
Startup by preprocessing total_all data into a ready formating for regression.
"""
self.startup_run()
self.startup_finish()
def startup_run(self):
"""
Execute whatever data preprocessing necessary before regression. Startup_run is mostly for reading in data
"""
raise NotImplementedError # implement in subclass
def startup_finish(self):
"""
Execute whatever data preprocessing necessary before regression. Startup_finish is mostly for preprocessing data
prior to regression
"""
raise NotImplementedError # implement in subclass
def run(self):
"""
Execute workflow, after total_all configuration.
"""
raise NotImplementedError # implement in subclass
def getting_data(self):
"""
Read data files in to data structures.
"""
self.read_expression()
self.read_tfs()
self.read_metadata()
self.set_gold_standard_and_priors()
def read_expression(self, file=None):
"""
Read expression matrix file into expression_matrix
"""
if file is None:
file = self.expression_matrix_file
self.expression_matrix = self.input_knowledgeframe(file)
def read_tfs(self, file=None):
"""
Read tf names file into tf_names
"""
if file is None:
file = self.tf_names_file
tfs = self.input_knowledgeframe(file, index_col=None)
assert tfs.shape[1] == 1
self.tf_names = tfs.values.flatten().convert_list()
def read_metadata(self, file=None):
"""
Read metadata file into meta_data or make fake metadata
"""
if file is None:
file = self.meta_data_file
try:
self.meta_data = self.input_knowledgeframe(file, index_col=None)
except IOError:
self.meta_data = self.create_default_meta_data(self.expression_matrix)
def set_gold_standard_and_priors(self):
"""
Read priors file into priors_data and gold standard file into gold_standard
"""
self.priors_data = self.input_knowledgeframe(self.priors_file)
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
else:
self.gold_standard = self.input_knowledgeframe(self.gold_standard_file)
if self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
try:
check.index_values_distinctive(self.priors_data.index)
except ValueError as v_err:
utils.Debug.vprint("Duplicate gene(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
try:
check.index_values_distinctive(self.priors_data.columns)
except ValueError as v_err:
utils.Debug.vprint("Duplicate tf(s) in prior index", level=0)
utils.Debug.vprint(str(v_err), level=0)
def split_priors_into_gold_standard(self):
"""
Break priors_data in half and give half to the gold standard
"""
if self.gold_standard is not None:
utils.Debug.vprint("Existing gold standard is being replacingd by a split from the prior", level=0)
self.priors_data, self.gold_standard = split_for_cv(self.priors_data,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
utils.Debug.vprint("Prior split into a prior {pr} and a gold standard {gs}".formating(pr=self.priors_data.shape,
gs=self.gold_standard.shape),
level=0)
def cross_validate_gold_standard(self):
"""
Sample the gold standard for crossvalidation, and then remove the new gold standard from the priors
"""
utils.Debug.vprint("Resampling prior {pr} and gold standard {gs}".formating(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
_, self.gold_standard = split_for_cv(self.gold_standard,
self.cv_split_ratio,
split_axis=self.cv_split_axis,
seed=self.random_seed)
self.priors_data, self.gold_standard = remove_prior_circularity(self.priors_data, self.gold_standard,
split_axis=self.cv_split_axis)
utils.Debug.vprint("Selected prior {pr} and gold standard {gs}".formating(pr=self.priors_data.shape,
gs=self.gold_standard.shape), level=0)
def input_path(self, filengthame, mode='r'):
"""
Join filengthame to input_dir
"""
if filengthame.endswith(".gz"):
opener = gzip.open
elif filengthame.endswith(".bz2"):
opener = bz2.BZ2File
else:
opener = open
return opener(os.path.abspath(os.path.join(self.input_dir, filengthame)), mode=mode)
def input_knowledgeframe(self, filengthame, index_col=0):
"""
Read a file in as a monkey knowledgeframe
"""
file_settings = self.file_formating_settings.clone()
if filengthame in self.file_formating_overrides:
file_settings.umkate(self.file_formating_overrides[filengthame])
with self.input_path(filengthame) as fh:
return mk.read_table(fh, index_col=index_col, **file_settings)
def adding_to_path(self, var_name, to_adding):
"""
Add a string to an existing path variable in class
"""
path = gettingattr(self, var_name, None)
if path is None:
raise ValueError("Cannot adding to None")
setattr(self, var_name, os.path.join(path, to_adding))
@staticmethod
def create_default_meta_data(expression_matrix):
"""
Create a meta_data knowledgeframe from basic defaults
"""
metadata_rows = expression_matrix.columns.convert_list()
metadata_defaults = {"isTs": "FALSE", "is1stLast": "e", "prevCol": "NA", "del.t": "NA", "condName": None}
data = {}
for key in metadata_defaults.keys():
data[key] = mk.Collections(data=[metadata_defaults[key] if metadata_defaults[key] else i for i in metadata_rows])
return mk.KnowledgeFrame(data)
def filter_expression_and_priors(self):
"""
Guarantee that each row of the prior is in the expression and vice versa.
Also filter the priors to only includes columns, transcription factors, that are in the tf_names list
"""
expressed_targettings = self.expression_matrix.index
expressed_or_prior = expressed_targettings.union(self.priors_data.columns)
keeper_regulators = expressed_or_prior.interst(self.tf_names)
if length(keeper_regulators) == 0 or length(expressed_targettings) == 0:
raise ValueError("Filtering will result in a priors with at least one axis of 0 lengthgth")
self.priors_data = self.priors_data.loc[expressed_targettings, keeper_regulators]
self.priors_data =
|
mk.KnowledgeFrame.fillnone(self.priors_data, 0)
|
pandas.DataFrame.fillna
|
""" Panel4D: a 4-d dict like collection of panels """
import warnings
from monkey.core.generic import NDFrame
from monkey.core.panelnd import create_nd_panel_factory
from monkey.core.panel import Panel
from monkey.util._validators import validate_axis_style_args
Panel4D = create_nd_panel_factory(klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'getting_minor_axis'],
slices={'labels': 'labels',
'items': 'items',
'major_axis': 'major_axis',
'getting_minor_axis': 'getting_minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis',
'getting_minor': 'getting_minor_axis'}, stat_axis=2,
ns=dict(__doc__="""
Panel4D is a 4-Dimensional named container very much like a Panel, but
having 4 named dimensions. It is intended as a test bed for more
N-Dimensional named containers.
.. deprecated:: 0.19.0
The recommended way to represent these types of n-dimensional data
are with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
Monkey provides a `.to_xarray()` method to automate this conversion.
Parameters
----------
data : ndarray (labels x items x major x getting_minor), or dict of Panels
labels : Index or array-like : axis=0
items : Index or array-like : axis=1
major_axis : Index or array-like: axis=2
getting_minor_axis : Index or array-like: axis=3
dtype : dtype, default None
Data type to force, otherwise infer
clone : boolean, default False
Copy data from inputs. Only affects KnowledgeFrame / 2d ndarray input
"""))
def panel4d_init(self, data=None, labels=None, items=None, major_axis=None,
getting_minor_axis=None, clone=False, dtype=None):
# deprecation GH13564
warnings.warn("\nPanel4D is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of n-dimensional data are with\n"
"the `xarray package "
"<http://xarray.pydata.org/en/stable/>`__.\n"
"Monkey provides a `.to_xarray()` method to help "
"automate this conversion.\n",
FutureWarning, stacklevel=2)
self._init_data(data=data, labels=labels, items=items,
major_axis=major_axis, getting_minor_axis=getting_minor_axis, clone=clone,
dtype=dtype)
def panel4d_reindexing(self, labs=None, labels=None, items=None, major_axis=None,
getting_minor_axis=None, axis=None, **kwargs):
# Hack for reindexing_axis deprecation
# Ha, we used labels for two different things
# I think this will work still.
if labs is None:
args = ()
else:
args = (labs,)
kwargs_ = dict(labels=labels,
items=items,
major_axis=major_axis,
getting_minor_axis=getting_minor_axis,
axis=axis)
kwargs_ = {k: v for k, v in kwargs_.items() if v is not None}
# major = kwargs.pop("major", None)
# getting_minor = kwargs.pop('getting_minor', None)
# if major is not None:
# if kwargs.getting("major_axis"):
# raise TypeError("Cannot specify both 'major' and 'major_axis'")
# kwargs_['major_axis'] = major
# if getting_minor is not None:
# if kwargs.getting("getting_minor_axis"):
# raise TypeError("Cannot specify both 'getting_minor' and 'getting_minor_axis'")
# kwargs_['getting_minor_axis'] = getting_minor
if axis is not None:
kwargs_['axis'] = axis
axes = validate_axis_style_args(self, args, kwargs_, 'labs', 'reindexing')
kwargs.umkate(axes)
return
|
NDFrame.reindexing(self, **kwargs)
|
pandas.core.generic.NDFrame.reindex
|
"""Classes to represent empirical distributions
https://en.wikipedia.org/wiki/Empirical_distribution_function
Pmf: Represents a Probability Mass Function (PMF).
Ckf: Represents a Cumulative Distribution Function (CDF).
Surv: Represents a Survival Function
Hazard: Represents a Hazard Function
Distribution: Parent class of total_all distribution representations
Copyright 2019 <NAME>
BSD 3-clause license: https://opensource.org/licenses/BSD-3-Clause
"""
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
from scipy.interpolate import interp1d
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
d: dictionary
options: keyword args to add to d
:return: modified d
"""
for key, val in options.items():
d.setdefault(key, val)
return d
class Distribution(mk.Collections):
def __init__(self, *args, **kwargs):
"""Initialize a Pmf.
Note: this cleans up a weird Collections behavior, which is
that Collections() and Collections([]) yield different results.
See: https://github.com/monkey-dev/monkey/issues/16737
"""
underride(kwargs, name="")
if args or ("index" in kwargs):
super().__init__(*args, **kwargs)
else:
underride(kwargs, dtype=np.float64)
super().__init__([], **kwargs)
@property
def qs(self):
"""Get the quantities.
:return: NumPy array
"""
return self.index.values
@property
def ps(self):
"""Get the probabilities.
:return: NumPy array
"""
return self.values
def header_num(self, n=3):
"""Override Collections.header_num to return a Distribution.
n: number of rows
returns: Distribution
"""
s = super().header_num(n)
return self.__class__(s)
def final_item_tail(self, n=3):
"""Override Collections.final_item_tail to return a Distribution.
n: number of rows
returns: Distribution
"""
s = super().final_item_tail(n)
return self.__class__(s)
def transform(self, *args, **kwargs):
"""Override to transform the quantities, not the probabilities."""
qs = self.index.to_collections().transform(*args, **kwargs)
return self.__class__(self.ps, qs, clone=True)
def _repr_html_(self):
"""Returns an HTML representation of the collections.
Mostly used for Jupyter notebooks.
"""
kf = mk.KnowledgeFrame(dict(probs=self))
return kf._repr_html_()
def __ctotal_all__(self, qs):
"""Look up quantities.
qs: quantity or sequence of quantities
returns: value or array of values
"""
string_types = (str, bytes, bytearray)
# if qs is a sequence type, use reindexing;
# otherwise use getting
if hasattr(qs, "__iter__") and not incontainstance(qs, string_types):
s = self.reindexing(qs, fill_value=0)
return s.to_numpy()
else:
return self.getting(qs, default=0)
def average(self):
"""Expected value.
:return: float
"""
return self.make_pmf().average()
def mode(self, **kwargs):
"""Most common value.
If multiple quantities have the getting_maximum probability,
the first getting_maximal quantity is returned.
:return: float
"""
return self.make_pmf().mode(**kwargs)
def var(self):
"""Variance.
:return: float
"""
return self.make_pmf().var()
def standard(self):
"""Standard deviation.
:return: float
"""
return self.make_pmf().standard()
def median(self):
"""Median (50th percentile).
There are several definitions of median;
the one implemented here is just the 50th percentile.
:return: float
"""
return self.make_ckf().median()
def quantile(self, ps, **kwargs):
"""Quantiles.
Computes the inverse CDF of ps, that is,
the values that correspond to the given probabilities.
:return: float
"""
return self.make_ckf().quantile(ps, **kwargs)
def credible_interval(self, p):
"""Credible interval containing the given probability.
p: float 0-1
:return: array of two quantities
"""
final_item_tail = (1 - p) / 2
ps = [final_item_tail, 1 - final_item_tail]
return self.quantile(ps)
def choice(self, *args, **kwargs):
"""Makes a random sample_by_num.
Uses the probabilities as weights unless `p` is provided.
args: same as np.random.choice
options: same as np.random.choice
:return: NumPy array
"""
pmf = self.make_pmf()
return pmf.choice(*args, **kwargs)
def sample_by_num(self, *args, **kwargs):
"""Samples with replacingment using probabilities as weights.
Uses the inverse CDF.
n: number of values
:return: NumPy array
"""
ckf = self.make_ckf()
return ckf.sample_by_num(*args, **kwargs)
def add_dist(self, x):
"""Distribution of the total_sum of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.add_dist(x)
return self.make_same(res)
def sub_dist(self, x):
"""Distribution of the diff of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.sub_dist(x)
return self.make_same(res)
def mul_dist(self, x):
"""Distribution of the product of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.mul_dist(x)
return self.make_same(res)
def division_dist(self, x):
"""Distribution of the ratio of values drawn from self and x.
x: Distribution, scalar, or sequence
:return: new Distribution, same subtype as self
"""
pmf = self.make_pmf()
res = pmf.division_dist(x)
return self.make_same(res)
def pmf_outer(dist1, dist2, ufunc):
"""Computes the outer product of two PMFs.
dist1: Distribution object
dist2: Distribution object
ufunc: function to employ to the qs
:return: NumPy array
"""
# TODO: convert other types to Pmf
pmf1 = dist1
pmf2 = dist2
qs = ufunc.outer(pmf1.qs, pmf2.qs)
ps = np.multiply.outer(pmf1.ps, pmf2.ps)
return qs * ps
def gt_dist(self, x):
"""Probability that a value from self is greater than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.gt_dist(x)
def lt_dist(self, x):
"""Probability that a value from self is less than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.lt_dist(x)
def ge_dist(self, x):
"""Probability that a value from self is >= than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.ge_dist(x)
def le_dist(self, x):
"""Probability that a value from self is <= than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.le_dist(x)
def eq_dist(self, x):
"""Probability that a value from self equals a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.eq_dist(x)
def ne_dist(self, x):
"""Probability that a value from self is <= than a value from x.
x: Distribution, scalar, or sequence
:return: float probability
"""
pmf = self.make_pmf()
return pmf.ne_dist(x)
def getting_max_dist(self, n):
"""Distribution of the getting_maximum of `n` values from this distribution.
n: integer
:return: Distribution, same type as self
"""
ckf = self.make_ckf().getting_max_dist(n)
return self.make_same(ckf)
def getting_min_dist(self, n):
"""Distribution of the getting_minimum of `n` values from this distribution.
n: integer
:return: Distribution, same type as self
"""
ckf = self.make_ckf().getting_min_dist(n)
return self.make_same(ckf)
prob_gt = gt_dist
prob_lt = lt_dist
prob_ge = ge_dist
prob_le = le_dist
prob_eq = eq_dist
prob_ne = ne_dist
class Pmf(Distribution):
"""Represents a probability Mass Function (PMF)."""
def clone(self, deep=True):
"""Make a clone.
:return: new Pmf
"""
return Pmf(self, clone=deep)
def make_pmf(self, **kwargs):
"""Make a Pmf from the Pmf.
:return: Pmf
"""
return self
# Pmf overrides the arithmetic operations in order
# to provide fill_value=0 and return a Pmf.
def add(self, x, **kwargs):
"""Override add to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = mk.Collections.add(self, x, **kwargs)
return Pmf(s)
__add__ = add
__radd__ = add
def sub(self, x, **kwargs):
"""Override the - operator to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = mk.Collections.subtract(self, x, **kwargs)
return Pmf(s)
__sub__ = sub
__rsub__ = sub
def mul(self, x, **kwargs):
"""Override the * operator to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s = mk.Collections.multiply(self, x, **kwargs)
return Pmf(s)
__mul__ = mul
__rmul__ = mul
def division(self, x, **kwargs):
"""Override the / operator to default fill_value to 0.
x: Distribution or sequence
returns: Pmf
"""
underride(kwargs, fill_value=0)
s =
|
mk.Collections.divisionide(self, x, **kwargs)
|
pandas.Series.divide
|
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing =
|
kf.fillnone('missing')
|
pandas.DataFrame.fillna
|
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return
|
Index.shifting(self, n, freq)
|
pandas.core.index.Index.shift
|
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from monkey.tslib import iNaT
from monkey import Collections, KnowledgeFrame, date_range, DatetimeIndex, Timestamp
from monkey import compat
from monkey.compat import range, long, lrange, lmapping, u
from monkey.core.common import notnull, ifnull
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mututotal_ally exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __gettingitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.total_all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.total_sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_collections = Collections(np.random.randn(5))
obj_collections = Collections(np.random.randn(5), dtype=object)
assert(incontainstance(notnull(float_collections), Collections))
assert(incontainstance(notnull(obj_collections), Collections))
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert not ifnull(np.inf)
assert not ifnull(-np.inf)
float_collections = Collections(np.random.randn(5))
obj_collections = Collections(np.random.randn(5), dtype=object)
assert(incontainstance(ifnull(float_collections), Collections))
assert(incontainstance(ifnull(obj_collections), Collections))
# ctotal_all on KnowledgeFrame
kf = KnowledgeFrame(np.random.randn(10, 5))
kf['foo'] = 'bar'
result = ifnull(kf)
expected = result.employ(ifnull)
tm.assert_frame_equal(result, expected)
def test_ifnull_tuples():
result = ifnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = ifnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = ifnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = ifnull(('foo', 'bar'))
assert(not result.whatever())
result = ifnull((u('foo'), u('bar')))
assert(not result.whatever())
def test_ifnull_lists():
result = ifnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = ifnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = ifnull(['foo', 'bar'])
assert(not result.whatever())
result = ifnull([u('foo'), u('bar')])
assert(not result.whatever())
def test_ifnull_datetime():
assert (not ifnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).total_all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = ifnull(idx)
assert(mask[0])
assert(not mask[1:].whatever())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(length(idx) == 0)
def test_nan_to_nat_conversions():
kf = KnowledgeFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
kf.iloc[3:6,:] = np.nan
result = kf.loc[4,'B'].value
assert(result == iNaT)
s = kf['B'].clone()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(ifnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').totype(np.int64))
def test_whatever_none():
assert(com._whatever_none(1, 2, 3, None))
assert(not com._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(com._total_all_not_none(1, 2, 3, 4))
assert(not com._total_all_not_none(1, 2, 3, None))
assert(not com._total_all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.getting_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = total_sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == length(mask)
# exhaustively test total_all possible mask sequences of lengthgth 8
ncols = 8
for i in range(2 ** ncols):
cols = lmapping(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(length(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.mapping_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_interst():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(
|
com.interst(a, b)
|
pandas.core.common.intersection
|
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = mk.KnowledgeFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
if not total_allow_required:
warnings.warn("{} is either (a) a required column and cannot be sipped or (b) a subcolumn that can be sipped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col))
else:
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
warnings.warn('{} is not in data columns'.formating(col))
elif axis == 0:
if inplace:
KnowledgeFrame.sip(frame, indices, axis=0, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, indices, axis=0, inplace=False)
frame._reset_attributes()
# frame.clean_data({'connectedness': frame.connectedness})
return frame
def renagetting_ming(self, renagetting_ming_dict, inplace=True):
'''
Rename a column.
Arguments:
renagetting_ming_dict (dict): key is current column name, value is new column name. Use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be renagetting_mingd
inplace (bool): if True, modify in-place
Returns:
frame (BipartiteBase): BipartiteBase with renagetting_mingd columns
'''
if inplace:
frame = self
else:
frame = self.clone()
for col_cur, col_new in renagetting_ming_dict.items():
if col_cur in frame.columns or col_cur in frame.columns_req or col_cur in frame.columns_opt:
if col_cur in self.columns_opt: # If column optional
if length(to_list(self.reference_dict[col_cur])) > 1:
for i, subcol in enumerate(to_list(self.reference_dict[col_cur])):
KnowledgeFrame.renagetting_ming(frame, {subcol: col_new + str(i + 1)}, axis=1, inplace=True)
frame.col_dict[subcol] = None
else:
KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True)
frame.col_dict[col_cur] = None
if col_cur in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col_cur] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col_cur] = mk.KnowledgeFrame()
elif col_cur not in frame._included_cols() and col_cur not in frame._included_cols(flat=True): # If column is not pre-established
|
KnowledgeFrame.renagetting_ming(frame, {col_cur: col_new}, axis=1, inplace=True)
|
pandas.DataFrame.rename
|
######################################################################
# (c) Copyright EFC of NICS, Tsinghua University. All rights reserved.
# Author: <NAME>
# Email : <EMAIL>
#
# Create Date : 2020.08.16
# File Name : read_results.py
# Description : read the config of train and test accuracy data from
# log file and show on one screen to compare
# Dependencies:
######################################################################
import os
import sys
import h5py
import argparse
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
def check_column(configs, column_label):
''' check if there is already column named column_label '''
if column_label in configs.columns.values.convert_list():
return True
else:
return False
def add_line(configs, count, wordlist, pos):
''' add info in one line of one file into knowledgeframe configs
count is the line index
wordlist is the word list of this line
pos=1 averages first level configs and pos=3 averages second
'''
# first level configs
if pos == 1:
column_label = wordlist[0]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
# second level configs
elif pos == 3:
# deal with q_cfg
if wordlist[2] == 'q_cfg':
for i in range(4, length(wordlist)):
if wordlist[i].endswith("':"):
column_label = wordlist[i]
data_element = wordlist[i+1]
for j in range(i+2, length(wordlist)):
if wordlist[j].endswith("':"): break
else: data_element += wordlist[j]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# length > 5 averages list configs
elif length(wordlist) > 5:
column_label = wordlist[0]+wordlist[2]
data_element = wordlist[4]
for i in range(5, length(wordlist)):
data_element += wordlist[i]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# !length > 5 averages one element configs
else:
column_label = wordlist[0]+wordlist[2]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[4]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[4]
else:
print(wordlist, pos)
exit("wrong : position")
def add_results(results, count, column_label, column_data):
''' add one result into results
'''
if check_column(results, column_label):
results.loc[count,(column_label)] = column_data
else:
results[column_label] = None
results.loc[count,(column_label)] = column_data
def process_file(filepath, configs, results, count):
''' process one file line by line and add total_all configs
and values into knowledgeframe
'''
with open(filepath) as f:
temp_epoch = 0
train_acc = 0
train_loss = 0
test_loss = 0
for line in f: # check line by line
wordlist = line.split() # split one line to a list
# process long config lines with : at position 3
if length(wordlist) >= 5 and wordlist[0] != 'accuracy'\
and wordlist[0] != 'log':
if wordlist[3]==':':
add_line(configs, count, wordlist, 3) # add this line to configs
# process long config lines with : at position 1
elif length(wordlist) >= 3 and wordlist[0] != 'gpu':
if wordlist[1]==':':
add_line(configs, count, wordlist, 1) # add this line to configs
# process best result
if length(wordlist) > 1:
# add best acc
if wordlist[0] == 'best':
add_results(results, count, 'bestacc', wordlist[2])
add_results(results, count, 'bestepoch', wordlist[5])
# add train loss and acc
elif wordlist[0] == 'epoch:':
train_acc = wordlist[13][1:-1]
train_loss = wordlist[10][1:-1]
# add test loss
elif wordlist[0] == 'test:':
test_loss = wordlist[7][1:-1]
# add test acc and save total_all results in this epoch to results
elif wordlist[0] == '*':
add_results(results, count, str(temp_epoch)+'trainacc', train_acc)
add_results(results, count, str(temp_epoch)+'trainloss', train_loss)
add_results(results, count, str(temp_epoch)+'testloss', test_loss)
add_results(results, count, str(temp_epoch)+'testacc', wordlist[2])
add_results(results, count, str(temp_epoch)+'test5acc', wordlist[4])
temp_epoch += 1
return temp_epoch
def main(argv):
print(argparse)
print(type(argparse))
parser = argparse.argumentparser()
# required arguments:
parser.add_argument(
"type",
help = "what type of mission are you going to do.\n\
supported: compare loss_curve acc_curve data_range"
)
parser.add_argument(
"output_dir",
help = "the name of output dir to store the results."
)
parser.add_argument(
"--results_name",
help = "what results are you going to plot or compare.\n \
supported: best_acc test_acc train_acc test_loss train_loss"
)
parser.add_argument(
"--config_name",
help = "what configs are you going to show.\n \
example: total_all bw group hard "
)
parser.add_argument(
"--file_range",
nargs='+',
help = "the date range of input file to read the results."
)
args = parser.parse_args()
print(args.file_range)
dirlist = os.listandardir('./')
print(dirlist)
configs = mk.knowledgeframe()
print(configs)
results =
|
mk.knowledgeframe()
|
pandas.dataframe
|
from monkey.core.common import notnull, ifnull
import monkey.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert ifnull(np.inf)
assert ifnull(-np.inf)
def test_whatever_none():
assert(common._whatever_none(1, 2, 3, None))
assert(not common._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(common._total_all_not_none(1, 2, 3, 4))
assert(not common._total_all_not_none(1, 2, 3, None))
assert(not common._total_all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = common.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(common.iterpairs(data))
assert(result == expected)
def test_indent():
s = 'a b c\nd e f'
result = common.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = common.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4 : 0, 3 : 1, 2 : 2, 1 : 3}
result = common.mapping_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(common.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(common.difference(b, a))
assert([4, 5, 6] == inter)
def test_interst():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(
|
common.interst(a, b)
|
pandas.core.common.intersection
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import getting_ipython
# %%
import MetaTrader5 as mt5
import monkey as mk
#getting_ipython().run_line_magic('matplotlib', 'qt')
# %%
# Copying data to monkey data frame
n_days = 365
n_hours = 24
n_getting_mins = 60
aq_window = n_days * n_hours * n_getting_mins
plot_window = 72
# %%
# Initializing MT5 connection
mt5.initialize()
print(mt5.tergetting_minal_info())
print(mt5.version())
stockdata = mk.KnowledgeFrame()
rates = mt5.clone_rates_from_pos("EURUSD", mt5.TIMEFRAME_H1,0,100)
#rates = np.flip(rates,0)
rates.shape
# %%
data_frame = mk.KnowledgeFrame(rates,columns=['time','open','high','low','close','nn','nn1','nn2']).sip(['nn','nn1','nn2'],axis=1)
# %%
data_frame['date'] =
|
mk.Timestamp.convert_pydatetime(data_frame['time'])
|
pandas.Timestamp.to_pydatetime
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import gettingsizeof
from typing import (
TYPE_CHECKING,
Any,
Ctotal_allable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from monkey._libs import index as libindex
from monkey._libs.lib import no_default
from monkey._typing import Dtype
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
cache_readonly,
doc,
)
from monkey.util._exceptions import rewrite_exception
from monkey.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCTimedeltaIndex
from monkey.core import ops
import monkey.core.common as com
from monkey.core.construction import extract_array
import monkey.core.indexes.base as ibase
from monkey.core.indexes.base import maybe_extract_name
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from monkey.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from monkey import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by KnowledgeFrame and Collections when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
clone : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base monkey Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
clone: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if incontainstance(start, RangeIndex):
return start.clone(name=name)
elif incontainstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if
|
com.total_all_none(start, stop, step)
|
pandas.core.common.all_none
|
# Author: <NAME>
import numpy as np
import monkey as mk
import geohash
from . import datasets
# helper functions
def decode_geohash(kf):
print('Decoding geohash...')
kf['lon'], kf['lat'] = zip(*[(latlon[1], latlon[0]) for latlon
in kf['geohash6'].mapping(geohash.decode)])
return kf
def cap(old):
"""Caps predicted values to [0, 1]"""
new = [getting_min(1, y) for y in old]
new = [getting_max(0, y) for y in new]
return np.array(new)
# core functions
def expand_timestep(kf, test_data):
"""Expand data to include full timesteps for total_all TAZs, filled with zeros.
Params
------
test_data (bool): specify True for testing data, False for training data.
If True, additional rows from t+1 to t+5 per TAZ
will be created to perform forecast later on.
"""
# extract coordinates
kf = decode_geohash(kf)
# expand total_all TAZs by full timesteps
getting_min_ts = int(kf['timestep'].getting_min())
getting_max_ts = int(kf['timestep'].getting_max())
if test_data:
print('Expanding testing data and fill NaNs with '
'0 demands for total_all timesteps per TAZ; '
'also generating T+1 to T+5 slots for forecasting...')
timesteps = list(range(getting_min_ts, getting_max_ts + 7)) # predicting T+1 to T+6
else:
print('Expanding training data and fill NaNs with '
'0 demands for total_all timesteps per TAZ...')
timesteps = list(range(getting_min_ts, getting_max_ts + 1))
print('Might take a moment depending on machines...')
# create full kf skeleton
full_kf = mk.concating([mk.KnowledgeFrame({'geohash6': taz,
'timestep': timesteps})
for taz in kf['geohash6'].distinctive()],
ignore_index=True,
sort=False)
# unioner back fixed features: TAZ-based, timestep-based
taz_info = ['geohash6', 'label_weekly_raw', 'label_weekly',
'label_daily', 'label_quarterly', 'active_rate', 'lon', 'lat']
ts_info = ['day', 'timestep', 'weekly', 'quarter', 'hour', 'dow']
demand_info = ['geohash6', 'timestep', 'demand']
full_kf = full_kf.unioner(kf[taz_info].sip_duplicates(),
how='left', on=['geohash6'])
full_kf = full_kf.unioner(kf[ts_info].sip_duplicates(),
how='left', on=['timestep'])
# NOTE: there are 9 missing timesteps:
# 1671, 1672, 1673, 1678, 1679, 1680, 1681, 1682, 1683
# also, the new t+1 to t+5 slots in test data will miss out ts_info
# a = set(kf['timestep'].distinctive())
# b = set(timesteps)
# print(a.difference(b))
# print(b.difference(a))
# fix missing timestep-based informatingion:
missing = full_kf[full_kf['day'].ifna()]
patch = datasets.process_timestamp(missing, fix=True)
full_kf.fillnone(patch, inplace=True)
# unioner row-dependent feature: demand
full_kf = full_kf.unioner(kf[demand_info].sip_duplicates(),
how='left', on=['geohash6', 'timestep'])
full_kf['demand'].fillnone(0, inplace=True)
if test_data:
full_kf.loc[full_kf['timestep'] > getting_max_ts, 'demand'] = -1
print('Done.')
print('Missing values:')
print(full_kf.ifna().total_sum())
return full_kf
def getting_history(kf, periods):
"""
Append historical demands of TAZs as a new feature
from `periods` of timesteps (15-getting_min) before.
"""
# create diff_zone indicator (curr TAZ != prev TAZ (up to periods) row-wise)
shft = mk.KnowledgeFrame.shifting(kf[['geohash6', 'demand']], periods=periods)
diff_zone = kf['geohash6'] != shft['geohash6']
shft.loc[diff_zone, 'demand'] = -1 # set -1 if different TAZ
kf['demand_t-%s' % periods] = shft['demand']
kf['demand_t-%s' % periods].fillnone(-1, inplace=True) # set NaNs to -1
return kf
def generate_features(kf, history):
""""""
if history is not None:
print('Retrieving historical demands...')
[getting_history(kf, h) for h in history]
print('Generating features...')
# NOTE: be aware of timezones (see explore_function segmentation.ipynb)
# kf['am_peak'] = ((kf['hour'] >= 22) | (kf['hour'] <= 2)).totype(int)
# kf['midnight'] = ((kf['hour'] >= 17) & (kf['hour'] < 22)).totype(int)
kf['weekend'] = (kf['dow'] > 4).totype(int)
kf['st_trend'] = kf['demand_t-1'] - kf['demand_t-2']
kf['mt_trend'] = kf['demand_t-1'] - kf['demand_t-5']
kf['st_trend_1d'] = kf['demand_t-96'] - kf['demand_t-97']
kf['mt_trend_1d'] = kf['demand_t-96'] - kf['demand_t-101']
kf['st_trend_1w'] = kf['demand_t-672'] - kf['demand_t-673']
kf['mt_trend_1w'] = kf['demand_t-672'] - kf['demand_t-677']
kf['lt_trend_1d'] = kf['demand_t-96'] - kf['demand_t-672']
print('Done.')
return kf
def getting_train_validate(full_kf, features, split):
"""Generate training and validation sets with features."""
X = full_kf[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.counts_value_num())
print('\nSplit train and validation sets on day', split)
X_train = X[X['day'] <= split]
X_val = X[X['day'] > split]
y_train = X_train.pop('demand')
y_val = X_val.pop('demand')
days_train = length(X_train['day'].distinctive())
days_val = length(X_val['day'].distinctive())
print('')
print(days_train, 'days in train set.')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('')
print(days_val, 'days in validation set.')
print('X_val:', X_val.shape)
print('y_val:', y_val.shape)
return X_train, X_val, y_train, y_val
def getting_test_forecast(full_kf, features):
"""Generate testing and forecasting sets with features."""
# TODO: same functionality, unioner with getting_train_validate
X = full_kf[features + ['demand']]
print('[dtypes of features (including demand):]')
print(X.dtypes.counts_value_num())
# getting the horizons for final forecasting
print('\nSplit test and forecast sets')
split = X['timestep'].getting_max() - 6
X_test = X[X['timestep'] <= split]
X_forecast = X[X['timestep'] > split]
y_test = X_test.pop('demand')
y_forecast = X_forecast.pop('demand')
print('X_test:', X_test.shape)
print('y_test:', y_test.shape)
print('X_forecast:', X_forecast.shape)
print('y_forecast:', y_forecast.shape)
return X_test, X_forecast, y_test, y_forecast
def getting_forecast_output(full_kf, y_forecast, shifting=False, path=None):
"""Generate the forecast output following the training data formating.
Params
------
full_kf (knowledgeframe): as generated from `models.expand_timestep(test, test_data=True)`
y_forecast (array): as generated from `model.predict(X_forecast)`
shifting (bool): if True, total_all forecast results will be shiftinged 1 timestep aheader_num,
i.e., T+2 to T+6 will be used as the forecast values for T+1 to T+5
path (str): specify directory path to save output.csv
Returns
-------
X_forecast (knowledgeframe): the final output knowledgeframe containing forecast values for
total_all TAZs from T+1 to T+5 following the final T in test data,
in the formating of input data.
"""
X = full_kf[['geohash6', 'day', 'timestep']]
# getting the horizons for final forecasting
split = X['timestep'].getting_max() - 6
X_forecast = X[X['timestep'] > split].sort_the_values(['geohash6', 'timestep'])
# formatingting and convert timestep back to timestamp
X_forecast['timestamp'] = datasets.tstep_to_tstamp(X_forecast.pop('timestep'))
X_forecast['day'] = X_forecast['day'].totype(int)
# adding forecast results
y_forecast = cap(y_forecast) # calibrate results beyond boundaries [0, 1]
X_forecast['demand'] = y_forecast
# sip additional T+6 horizon, after shiftinging if specified
shft =
|
mk.KnowledgeFrame.shifting(X_forecast[['geohash6', 'demand']], periods=-1)
|
pandas.DataFrame.shift
|
# pylint: disable=E1101
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from monkey.core.index import Index, Int64Index
from monkey.tcollections.frequencies import infer_freq, to_offset
from monkey.tcollections.offsets import DateOffset, generate_range, Tick
from monkey.tcollections.tools import parse_time_string, normalize_date
from monkey.util.decorators import cache_readonly
import monkey.core.common as com
import monkey.tcollections.offsets as offsets
import monkey.tcollections.tools as tools
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey._algos as _algos
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _as_i8(arg):
if incontainstance(arg, np.ndarray) and arg.dtype == np.datetime64:
return arg.view('i8', type=np.ndarray)
else:
return arg
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = lib.tz_convert(values, utc, self.tz)
return lib.fast_field_accessor(values, field)
f.__name__ = name
return property(f)
def _wrap_i8_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_as_i8(arg) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _wrap_dt_function(f):
@staticmethod
def wrapper(*args, **kwargs):
view_args = [_dt_box_array(_as_i8(arg)) for arg in args]
return f(*view_args, **kwargs)
return wrapper
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if incontainstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if incontainstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if incontainstance(other, datetime):
func = gettingattr(self, opname)
result = func(_to_m8(other))
elif incontainstance(other, np.ndarray):
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
else:
other = _ensure_datetime64(other)
func = gettingattr(super(DatetimeIndex, self), opname)
result = func(other)
try:
return result.view(np.ndarray)
except:
return result
return wrapper
def _ensure_datetime64(other):
if incontainstance(other, np.datetime64):
return other
elif com.is_integer(other):
return np.int64(other).view('M8[us]')
else:
raise TypeError(other)
def _dt_index_op(opname):
"""
Wrap arithmetic operations to convert timedelta to a timedelta64.
"""
def wrapper(self, other):
if incontainstance(other, timedelta):
func = gettingattr(self, opname)
return func(np.timedelta64(other))
else:
func = gettingattr(super(DatetimeIndex, self), opname)
return func(other)
return wrapper
class TimeCollectionsError(Exception):
pass
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented interntotal_ally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency informatingion.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
clone : bool
Make a clone of input ndarray
freq : string or monkey offset object, optional
One of monkey date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforgetting_ming
time on or just past end argument
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_distinctive = _join_i8_wrapper(
_algos.left_join_indexer_distinctive_int64, with_indexers=False)
_grouper = lib.grouper_arrays # _wrap_i8_function(lib.grouper_int64)
_arrmapping = _wrap_dt_function(_algos.arrmapping_object)
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = lib.DatetimeEngine
offset = None
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None, tz=None,
verify_integrity=True, normalize=False, **kwds):
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
infer_freq = False
if not incontainstance(freq, DateOffset):
if freq != 'infer':
freq = to_offset(freq)
else:
infer_freq = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
if incontainstance(freq, basestring):
freq = to_offset(freq)
else:
if incontainstance(freq, basestring):
freq = to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize)
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, datetime):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset)
else:
data = tools.convert_datetime(data)
data.offset = offset
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data, offset)
elif issubclass(data.dtype.type, np.datetime64):
if incontainstance(data, DatetimeIndex):
subarr = data.values
offset = data.offset
verify_integrity = False
else:
subarr = np.array(data, dtype='M8[ns]', clone=clone)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype='M8[ns]', clone=clone)
else:
subarr = tools.convert_datetime(data)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise TypeError('Unable to convert %s to datetime dtype'
% str(data))
if tz is not None:
tz = tools._maybe_getting_tz(tz)
# Convert local to UTC
ints = subarr.view('i8')
lib.tz_localize_check(ints, tz)
subarr = lib.tz_convert(ints, tz, _utc())
subarr = subarr.view('M8[ns]')
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and length(subarr) > 0:
if offset is not None and not infer_freq:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if infer_freq:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False):
_normalized = True
if start is not None:
start = Timestamp(start)
if not incontainstance(start, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% start)
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not incontainstance(end, Timestamp):
raise ValueError('Failed to convert %s to timestamp'
% end)
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end)):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None:
# Convert local to UTC
ints = index.view('i8')
lib.tz_localize_check(ints, tz)
index = lib.tz_convert(ints, tz, _utc())
index = index.view('M8[ns]')
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_getting_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if offset is None:
raise Exception('Must provide a DateOffset!')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = np.array(_to_m8_array(list(xdr)),
dtype='M8[ns]', clone=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if end is None:
raise Exception('Must provide start or end date!')
if periods is None:
raise Exception('Must provide number of periods!')
assert(incontainstance(end, Timestamp))
end = offset.rollback(end)
endLoc = cachedRange.getting_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
assert(incontainstance(start, Timestamp))
start = offset.rollforward(start)
startLoc = cachedRange.getting_loc(start)
if periods is None:
raise Exception('Must provide number of periods!')
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.getting_loc(start)
endLoc = cachedRange.getting_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return lib.ints_convert_pydatetime(self.asi8)
def __repr__(self):
from monkey.core.formating import _formating_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
total_summary = str(self.__class__)
if length(self) > 0:
first = _formating_datetime64(values[0], tz=self.tz)
final_item = _formating_datetime64(values[-1], tz=self.tz)
total_summary += '\n[%s, ..., %s]' % (first, final_item)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
total_summary += tagline % (length(self), freq, self.tz)
return total_summary
__str__ = __repr__
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if length(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
elif length(state) == 3:
# legacy formating: daterange
offset = state[1]
if length(state) > 2:
tzinfo = state[2]
else: # pragma: no cover
tzinfo = None
self.offset = offset
self.tzinfo = tzinfo
# extract the raw datetime data, turn into datetime64
index_state = state[0]
raw_data = index_state[0][4]
raw_data = np.array(raw_data, dtype='M8[ns]')
new_state = raw_data.__reduce__()
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if incontainstance(other, Index):
return self.union(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif com.is_integer(other):
return self.shifting(other)
else:
return Index(self.view(np.ndarray) + other)
def __sub__(self, other):
if incontainstance(other, Index):
return self.diff(other)
elif incontainstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shifting(-other)
else:
return Index(self.view(np.ndarray) - other)
def _add_delta(self, delta):
if incontainstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view('M8[ns]')
else:
new_values = self.totype('O') + delta
return DatetimeIndex(new_values, tz=self.tz, freq='infer')
def total_summary(self, name=None):
if length(self) > 0:
index_total_summary = ', %s to %s' % (str(self[0]), str(self[-1]))
else:
index_total_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (name, length(self), index_total_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def totype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
return Index.totype(self, dtype)
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def asstruct(self):
if self._sarr_cache is None:
self._sarr_cache = lib.build_field_sarray(self.asi8)
return self._sarr_cache
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
boxed_values = _dt_box_array(self.asi8, self.offset, self.tz)
return Index(boxed_values, dtype=object)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from monkey.tcollections.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freqstr
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted clone of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on whatever optimizing
freq = to_offset(freq)
snapped = np.empty(length(self), dtype='M8[ns]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shifting(self, n, freq=None):
"""
Specialized shifting which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shifting by
freq : DateOffset or timedelta-like, optional
Returns
-------
shiftinged : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if incontainstance(freq, basestring):
freq = to_offset(freq)
return Index.shifting(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shifting with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if incontainstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz, name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if incontainstance(result, DatetimeIndex):
result.tz = self.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not incontainstance(other, DatetimeIndex) and length(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if incontainstance(other, DatetimeIndex):
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (incontainstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not incontainstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if length(self) == 0 or length(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if length(other) == 0:
return self.view(type(self))
if length(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatingenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatingenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=getting_max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = gettingattr(obj, 'offset', None)
self.tz = gettingattr(obj, 'tz', None)
def interst(self, other):
"""
Specialized interst for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not incontainstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
result =
|
Index.interst(self, other)
|
pandas.core.index.Index.intersection
|
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().formating('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication definal_item_tails
def gettingAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def gettingIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatingIsoDate(self,d):
return moment.date(d).formating('YYYY-MM-DD')
def gettingDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.formating('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().formating('YYYY-MM-DD')
currentYear = self.gettingIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
else:
pe = 'LAST_7_DAYS'
periods.adding(pe)
return periods
def gettingHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Umkate data
def umkateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def gettingArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.adding(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.getting(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def gettingOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def gettingOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
else:
code = "{}{}{}".formating(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear total_all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " regetting_minder"
text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.adding({"id": outbreak['orgUnit']})
organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
message.adding(subject)
message.adding(text)
message.adding(users)
message.adding(organisationUnits)
message = tuple(message)
return mk.Collections(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,header_numers,type):
cols = []
for header_numer in header_numers:
if(type == 'EVENT'):
if header_numer['name'] == self.dateOfOnsetUid:
cols.adding('onSetDate')
elif header_numer['name'] == self.conditionOrDiseaseUid:
cols.adding('disease')
elif header_numer['name'] == self.regPatientStatusOutcome:
cols.adding('immediateOutcome')
elif header_numer['name'] == self.patientStatusOutcome:
cols.adding('statusOutcome')
elif header_numer['name'] == self.testResult:
cols.adding('testResult')
elif header_numer['name'] == self.testResultClassification:
cols.adding('testResultClassification')
elif header_numer['name'] == self.caseClassification:
cols.adding('caseClassification')
else:
cols.adding(header_numer['name'])
elif (type == 'DATES'):
cols.adding(header_numer['name'])
else:
cols.adding(header_numer['column'])
return cols
# Get start and end date
def gettingStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createKnowledgeFrame(self,events,type=None):
if type is None:
if events is not None:
#mk.KnowledgeFrame.from_records(events)
dataFrame = mk.io.json.json_normalize(events)
else:
dataFrame = mk.KnowledgeFrame()
else:
cols = self.createColumns(events['header_numers'],type)
dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
kfColLength = length(kf.columns)
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do average for current cases or deaths
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# periods
kf['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
kf['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
kf['orgUnit'] = kf.iloc[:,detectionLevel]
kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
kf.sip(columns=sipColumns,inplace=True)
kf['confirmedValue'] = kf.loc[:,'average_current_cases']
kf['deathValue'] = kf.loc[:,'average_current_deaths']
kf['suspectedValue'] = kf.loc[:,'average_current_cases']
kf['disease'] = diseaseMeta['disease']
kf['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
kf.query(checkEpidemic,inplace=True)
if kf.empty is True:
kf['alert'] = "false"
if kf.empty is not True:
kf['epidemic'] = 'true'
# Filter out those greater or equal to threshold
kf = kf[kf['epidemic'] == 'true']
kf['active'] = "true"
kf['alert'] = "true"
kf['regetting_minder'] = "false"
#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
else:
# No data for cases found
pass
return kf
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace total_all values with standard text
def replacingText(self,kf):
kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
return kf
# Get Confirmed,suspected cases and deaths
def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if total_all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def gettingStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
currentStatus='active'
elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'regetting_minder':
if row['regetting_minderDate'] == mk.convert_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return mk.Collections(currentStatus)
# getting onset date
def gettingOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).formating('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def gettingTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).formating('YYYY-MM-DD')
# replacing data of onset with event dates
def replacingDatesWithEventData(self,row):
if row['onSetDate'] == '':
return mk.convert_datetime(row['eventdate'])
else:
return mk.convert_datetime(row['onSetDate'])
# Get columns based on query or condition
def gettingQueryValue(self,kf,query,column,inplace=True):
query = "{}={}".formating(column,query)
kf.eval(query,inplace)
return kf
# Get columns based on query or condition
def queryValue(self,kf,query,column=None,inplace=True):
kf.query(query)
return kf
# Get epidemic, closure and status
def gettingEpidemicDefinal_item_tails(self,row,columns=None):
definal_item_tails = []
if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
definal_item_tails.adding('Closed')
definal_item_tails.adding('false')
definal_item_tails.adding(self.today)
definal_item_tails.adding('false')
definal_item_tails.adding('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
definal_item_tails.adding('Closed Vigilance')
definal_item_tails.adding('true')
definal_item_tails.adding(row['closeDate'])
definal_item_tails.adding('true')
definal_item_tails.adding(self.today)
# Send Regetting_minder for closure
else:
definal_item_tails.adding('Confirmed')
definal_item_tails.adding('true')
definal_item_tails.adding('')
definal_item_tails.adding('false')
definal_item_tails.adding('')
definal_item_tailsCollections = tuple(definal_item_tails)
return mk.Collections(definal_item_tailsCollections)
# Get key id from dataelements
def gettingDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
kf = self.createKnowledgeFrame(caseEvents,type)
caseEventsColumnsById = kf.columns
kfColLength = length(kf.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
# Replace total_all text with standard text
kf = self.replacingText(kf)
# Transpose and Aggregate values
kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(aggDf['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
aggDf.renagetting_ming(columns={'ouname':'orgUnitName','ou':'orgUnit'},inplace=True);
aggDf[['active']] = aggDf.employ(self.gettingStatus,args=['active'],axis=1)
aggDf[['regetting_minder']] = aggDf.employ(self.gettingStatus,args=['regetting_minder'],axis=1)
else:
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
if(dateData['height'] > 0):
kfDates = self.createKnowledgeFrame(dateData,'DATES')
kfDates.to_csv('aggDfDates.csv',encoding='utf-8')
kfDates.renagetting_ming(columns={kfDates.columns[7]:'disease',kfDates.columns[8]:'dateOfOnSet'},inplace=True)
kfDates['dateOfOnSet'] = kfDates.employ(self.gettingTeiOnSetDate,axis=1)
kfDates = kfDates.grouper(['ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfDates.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
kf = mk.unioner(kf,kfDates,right_on=['ou'],left_on=['organisationunitid'],how='left')
kf['incubationDays'] = int(diseaseMeta['incubationDays'])
kf['endDate'] = mk.convert_datetime(mk.convert_datetime(kf['final_itemCaseDate']) + mk.to_timedelta(
|
mk.np.ceiling(2*kf['incubationDays'])
|
pandas.np.ceil
|
# This example requires monkey, numpy, sklearn, scipy
# Inspired by an MLFlow tutorial:
# https://github.com/databricks/mlflow/blob/master/example/tutorial/train.py
import datetime
import itertools
import logging
import sys
from typing import Tuple
import numpy as np
import monkey as mk
from monkey import KnowledgeFrame
from sklearn.linear_model import Efinal_itemicNet
from sklearn.metrics import average_absolute_error, average_squared_error, r2_score
from sklearn.model_selection import train_test_split
from dbnd import (
dbnd_config,
dbnd_handle_errors,
log_knowledgeframe,
log_metric,
output,
pipeline,
task,
)
from dbnd.utils import data_combine, period_dates
from dbnd_examples.data import data_repo
from dbnd_examples.pipelines.wine_quality.serving.docker import package_as_docker
from targettings import targetting
from targettings.types import PathStr
logger = logging.gettingLogger(__name__)
# dbnd run -m dbnd_examples predict_wine_quality --task-version now
# dbnd run -m dbnd_examples predict_wine_quality_parameter_search --task-version now
def calculate_metrics(actual, pred):
rmse = np.sqrt(average_squared_error(actual, pred))
mae = average_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
@task(result="training_set, test_set, validation_set")
def prepare_data(raw_data: KnowledgeFrame) -> Tuple[KnowledgeFrame, KnowledgeFrame, KnowledgeFrame]:
""" Split data into train, test and validation """
train_kf, test_kf = train_test_split(raw_data)
test_kf, validation_kf = train_test_split(test_kf, test_size=0.5)
sys.standarderr.write("Running Prepare Data! You'll see this message in task log \n")
print("..and this one..\n")
logger.info("..and this one for sure!")
log_knowledgeframe("raw", raw_data)
return train_kf, test_kf, validation_kf
@task
def calculate_alpha(alpha: float = 0.5) -> float:
""" Calculates alpha for train_model """
alpha += 0.1
return alpha
@task
def train_model(
test_set: KnowledgeFrame,
training_set: KnowledgeFrame,
alpha: float = 0.5,
l1_ratio: float = 0.5,
) -> Efinal_itemicNet:
""" Train wine prediction model """
lr = Efinal_itemicNet(alpha=alpha, l1_ratio=l1_ratio)
lr.fit(training_set.sip(["quality"], 1), training_set[["quality"]])
prediction = lr.predict(test_set.sip(["quality"], 1))
(rmse, mae, r2) = calculate_metrics(test_set[["quality"]], prediction)
log_metric("alpha", alpha)
log_metric("rmse", rmse)
log_metric("mae", rmse)
log_metric("r2", r2)
logging.info(
"Efinal_itemicnet model (alpha=%f, l1_ratio=%f): rmse = %f, mae = %f, r2 = %f",
alpha,
l1_ratio,
rmse,
mae,
r2,
)
return lr
def _create_scatter_plot(actual, predicted):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title("Actual vs. Predicted")
ax.set_xlabel("Actual Labels")
ax.set_ylabel("Predicted Values")
ax.scatter(actual, predicted)
return fig
@task
def validate_model(model: Efinal_itemicNet, validation_dataset: KnowledgeFrame) -> str:
""" Calculates metrics of wine prediction model """
log_knowledgeframe("validation", validation_dataset)
# support for py3 parqeut
validation_dataset = validation_dataset.renagetting_ming(str, axis="columns")
validation_x = validation_dataset.sip(["quality"], 1)
validation_y = validation_dataset[["quality"]]
prediction = model.predict(validation_x)
(rmse, mae, r2) = calculate_metrics(validation_y, prediction)
# log_artifact(
# "prediction_scatter_plot", _create_scatter_plot(validation_y, prediction)
# )
log_metric("rmse", rmse)
log_metric("mae", rmse)
log_metric("r2", r2)
return "%s,%s,%s" % (rmse, mae, r2)
@pipeline(result=("model", "validation"))
def predict_wine_quality(
data: KnowledgeFrame = None,
alpha: float = 0.5,
l1_ratio: float = 0.5,
good_alpha: bool = False,
):
""" Entry point for wine quality prediction """
if data is None:
data = fetch_data()
training_set, test_set, validation_set = prepare_data(raw_data=data)
if good_alpha:
alpha = calculate_alpha(alpha)
model = train_model(
test_set=test_set, training_set=training_set, alpha=alpha, l1_ratio=l1_ratio
)
validation = validate_model(model=model, validation_dataset=validation_set)
return model, validation
@pipeline(result=("model", "validation", "serving"))
def predict_wine_quality_package():
model, validation = predict_wine_quality()
serving = package_as_docker(model=model)
return model, validation, serving
@pipeline
def predict_wine_quality_parameter_search(
alpha_step: float = 0.3, l1_ratio_step: float = 0.4
):
result = {}
variants = list(
itertools.product(np.arange(0, 1, alpha_step), np.arange(0, 1, l1_ratio_step))
)
logger.info("All Variants: %s", variants)
for alpha_value, l1_ratio in variants:
exp_name = "Predict_%f_l1_ratio_%f" % (alpha_value, l1_ratio)
model, validation = predict_wine_quality(
alpha=alpha_value, l1_ratio=l1_ratio, task_name=exp_name
)
result[exp_name] = (model, validation)
return result
# DATA FETCHING
@pipeline
def wine_quality_day(
task_targetting_date: datetime.date, root_location: PathStr = data_repo.wines_per_date
) -> mk.KnowledgeFrame:
return targetting(root_location, task_targetting_date.strftime("%Y-%m-%d"), "wine.csv")
@task(result=output.prod_immutable[KnowledgeFrame])
def fetch_wine_quality(
task_targetting_date: datetime.date, data: mk.KnowledgeFrame = data_repo.wines_full
) -> mk.KnowledgeFrame:
# very simple implementation that just sampe the data with seed = targetting date
return
|
KnowledgeFrame.sample_by_num(data, frac=0.2, random_state=task_targetting_date.day)
|
pandas.DataFrame.sample
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.