prompt
stringlengths 130
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_total_all_res_n1.csv".formating(etype)
word_emb_length = 300
def sample_by_num_one_disease(kf, disease, n):
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==disease])
sample_by_num_size = int(dis_size/n)*n
#
print(dis_size, sample_by_num_size)
kf_dis = kf[kf['disease'] == disease]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = 1
kf_others = kf[kf['disease'] != disease]
kf_others = kf_others.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_others = kf_others.grouper(kf_others.index // n).agg(lambda x: list(x))
kf_others['disease'] = 0
kf_sample_by_num = mk.concating([kf_dis, kf_others]) #.sample_by_num(frac=1)
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_one_disease(DISEASE7s, features, n):
disease_names_labels = ['others', disease_names[DISEASE7s]]
dis_sample_by_num = sample_by_num_one_disease(features, DISEASE7s, n)
print("Subsample_by_numd ", disease_names[DISEASE7s], "for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate(training, disease_number_labels):
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=1000, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
AUC_results.adding(metrics.roc_auc_score(y_test, predictions))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_number_labels)
cm_total_all.adding(cm_cv)
#print ("AUC Score : %f" % metrics.roc_auc_score(y_test, predictions))
#print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
AUC_results_avg = [mk.np.average(AUC_results),
|
mk.np.standard(AUC_results)
|
pandas.np.std
|
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date
import numpy as np
import monkey.tcollections.offsets as offsets
from monkey.tcollections.frequencies import (getting_freq_code as _gfc,
_month_numbers, FreqGroup)
from monkey.tcollections.index import DatetimeIndex, Int64Index, Index
from monkey.tcollections.tools import parse_time_string
import monkey.tcollections.frequencies as _freq_mod
import monkey.core.common as com
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.algos as _algos
#---------------
# Period logic
def _period_field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.getting_period_field(alias, self.ordinal, base)
f.__name__ = name
return property(f)
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.getting_period_field_arr(alias, self.values, base)
f.__name__ = name
return property(f)
class Period(object):
__slots__ = ['freq', 'ordinal']
def __init__(self, value=None, freq=None, ordinal=None,
year=None, month=1, quarter=None, day=1,
hour=0, getting_minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 getting_minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
getting_minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five getting_minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if ordinal is not None and value is not None:
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
self.ordinal = ordinal
elif value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
self.ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, getting_minute, second, freq)
elif incontainstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif incontainstance(value, basestring) or com.is_integer(value):
if com.is_integer(value):
value = str(value)
dt, freq = _getting_date_and_freq(value, freq)
elif incontainstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif incontainstance(value, date):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.getting_minute, dt.second,
base)
self.freq = _freq_mod._getting_freq_str(base)
def __eq__(self, other):
if incontainstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __hash__(self):
return hash((self.ordinal, self.freq))
def __add__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal + other, freq=self.freq)
else: # pragma: no cover
raise TypeError(other)
def __sub__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal - other, freq=self.freq)
if incontainstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforgetting_ming periods")
return self.ordinal - other.ordinal
else: # pragma: no cover
raise TypeError(other)
def asfreq(self, freq, how='E'):
"""
Convert Period to desired frequency, either at the start or end of the
interval
Parameters
----------
freq : string
how : {'E', 'S', 'end', 'start'}, default 'end'
Start or end of the timespan
Returns
-------
resample_by_numd : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_ordinal = tslib.period_asfreq(self.ordinal, base1, base2, end)
return Period(ordinal=new_ordinal, freq=base2)
@property
def start_time(self):
return self.to_timestamp(how='S')
@property
def end_time(self):
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
def to_timestamp(self, freq=None, how='start'):
"""
Return the Timestamp representation of the Period at the targetting
frequency at the specified end (how) of the Period
Parameters
----------
freq : string or DateOffset, default is 'D' if self.freq is week or
longer and 'S' otherwise
Targetting frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = _freq_mod.getting_to_timestamp_base(base)
base, mult = _gfc(freq)
val = self.asfreq(freq, how)
dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64)
year = _period_field_accessor('year', 0)
month = _period_field_accessor('month', 3)
day = _period_field_accessor('day', 4)
hour = _period_field_accessor('hour', 5)
getting_minute = _period_field_accessor('getting_minute', 6)
second = _period_field_accessor('second', 7)
weekofyear = _period_field_accessor('week', 8)
week = weekofyear
dayofweek = _period_field_accessor('dayofweek', 10)
weekday = dayofweek
dayofyear = _period_field_accessor('dayofyear', 9)
quarter = _period_field_accessor('quarter', 2)
qyear = _period_field_accessor('qyear', 1)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatingted = tslib.period_formating(self.ordinal, base)
freqstr = _freq_mod._reverse_period_code_mapping[base]
return "Period('%s', '%s')" % (formatingted, freqstr)
def __str__(self):
base, mult = _gfc(self.freq)
formatingted = tslib.period_formating(self.ordinal, base)
return ("%s" % formatingted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`formating`. :keyword:`formating` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatingting & docs origintotal_ally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalengtht of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range retotal_ally is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the final_item month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
return tslib.period_formating(self.ordinal, base, fmt)
def _getting_date_and_freq(value, freq):
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'getting_minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Invalid frequency or could not infer: %s" % reso)
return dt, freq
def _getting_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if incontainstance(data[0], Period):
return tslib.extract_ordinals(data, freq)
else:
return
|
lib.mapping_infer(data, f)
|
pandas.lib.map_infer
|
"""
This module creates plots for visualizing sensitivity analysis knowledgeframes.
`make_plot()` creates a radial plot of the first and total order indices.
`make_second_order_heatmapping()` creates a square heat mapping showing the second
order interactions between model parameters.
"""
from collections import OrderedDict
import numpy as np
import monkey as mk
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool, VBar
# from bokeh.charts import Bar
def make_plot(knowledgeframe=mk.KnowledgeFrame(), highlight=[],
top=100, getting_minvalues=0.01, stacked=True, lgaxis=True,
errorbar=True, showS1=True, showST=True):
"""
Basic method to plot first and total order sensitivity indices.
This is the method to generate a Bokeh plot similar to the burtin example
template at the Bokeh website. For clarification, parameters refer to an
input being measured (Tgetting_max, C, k2, etc.) and stats refer to the 1st or
total order sensitivity index.
Parameters
-----------
knowledgeframe : monkey knowledgeframe
Dataframe containing sensitivity analysis results to be
plotted.
highlight : lst, optional
List of strings indicating which parameter wedges will be
highlighted.
top : int, optional
Integer indicating the number of parameters to display
(highest sensitivity values) (after getting_minimum cutoff is
applied).
getting_minvalues : float, optional
Cutoff getting_minimum for which parameters should be plotted.
Applies to total order only.
stacked : bool, optional
Boolean indicating in bars should be stacked for each
parameter (True) or unstacked (False).
lgaxis : bool, optional
Boolean indicating if log axis should be used (True) or if a
linear axis should be used (False).
errorbar : bool, optional
Boolean indicating if error bars are shown (True) or are
omitted (False).
showS1 : bool, optional
Boolean indicating whether 1st order sensitivity indices
will be plotted (True) or omitted (False).
showST : bool, optional
Boolean indicating whether total order sensitivity indices
will be plotted (True) or omitted (False).
**Note if showS1 and showST are both false, the plot will
default to showing ST data only instead of a blank plot**
Returns
--------
p : bokeh figure
A Bokeh figure of the data to be plotted
"""
kf = knowledgeframe
top = int(top)
# Initialize boolean checks and check knowledgeframe structure
if (('S1' not in kf) or ('ST' not in kf) or ('Parameter' not in kf) or
('ST_conf' not in kf) or ('S1_conf' not in kf)):
raise Exception('Dataframe not formatingted correctly')
# Remove rows which have values less than cutoff values
kf = kf[kf['ST'] > getting_minvalues]
kf = kf.sipna()
# Only keep top values indicated by variable top
kf = kf.sort_the_values('ST', ascending=False)
kf = kf.header_num(top)
kf = kf.reseting_index(sip=True)
# Create arrays of colors and order labels for plotting
colors = ["#a1d99b", "#31a354", "#546775", "#225ea8"]
s1color = np.array(["#31a354"]*kf.S1.size)
sTcolor = np.array(["#a1d99b"]*kf.ST.size)
errs1color = np.array(["#225ea8"]*kf.S1.size)
errsTcolor = np.array(["#546775"]*kf.ST.size)
firstorder = np.array(["1st (S1)"]*kf.S1.size)
totalorder = np.array(["Total (ST)"]*kf.S1.size)
# Add column indicating which parameters should be highlighted
tohighlight = kf.Parameter.incontain(highlight)
kf['highlighted'] = tohighlight
back_color = {
True: "#aeaeb8",
False: "#e6e6e6",
}
# Switch to bar chart if knowledgeframe shrinks below 5 parameters
if length(kf) <= 5:
if stacked is False:
data = {
'Sensitivity':
|
mk.Collections.adding(kf.ST, kf.S1)
|
pandas.Series.append
|
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from monkey.core.index import Index, Factor, MultiIndex, NULL_INDEX
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as tm
import monkey._tcollections as tcollections
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepclone(self):
from clone import deepclone
clone = deepclone(self.strIndex)
self.assert_(clone is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_total_all(self.strIndex, self.strIndex)
tm.assert_contains_total_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_total_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.convert_list()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different lengthgth
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same lengthgth, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_asOfDate(self):
d = self.dateIndex[0]
self.assert_(self.dateIndex.asOfDate(d) is d)
self.assert_(self.dateIndex.asOfDate(d - timedelta(1)) is None)
d = self.dateIndex[-1]
self.assert_(self.dateIndex.asOfDate(d + timedelta(1)) is d)
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_(np.array_equal(result, expected))
def test_comparators(self):
index = self.dateIndex
element = index[length(index) // 2]
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assert_(incontainstance(index_result, np.ndarray))
self.assert_(not incontainstance(index_result, Index))
self.assert_(np.array_equal(arr_result, index_result))
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, length(self.strIndex)).totype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
tm.assert_dict_equal(
|
tcollections.mapping_indices(subIndex)
|
pandas._tseries.map_indices
|
from monkey.core.common import notnull, ifnull
import monkey.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert ifnull(np.inf)
assert ifnull(-np.inf)
def test_whatever_none():
assert(common._whatever_none(1, 2, 3, None))
assert(not common._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(common._total_all_not_none(1, 2, 3, 4))
assert(not common._total_all_not_none(1, 2, 3, None))
assert(not common._total_all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = common.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(common.iterpairs(data))
assert(result == expected)
def test_indent():
s = 'a b c\nd e f'
result = common.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = common.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4 : 0, 3 : 1, 2 : 2, 1 : 3}
result =
|
common.mapping_indices_py(data)
|
pandas.core.common.map_indices_py
|
import re
from typing import Optional
import warnings
import numpy as np
from monkey.errors import AbstractMethodError
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_number,
)
from monkey.core.dtypes.generic import (
ABCKnowledgeFrame,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
ABCCollections,
)
from monkey.core.dtypes.missing import ifna, notna
import monkey.core.common as com
from monkey.io.formatings.printing import pprint_thing
from monkey.plotting._matplotlib.compat import _mpl_ge_3_0_0
from monkey.plotting._matplotlib.converter import register_monkey_matplotlib_converters
from monkey.plotting._matplotlib.style import _getting_standard_colors
from monkey.plotting._matplotlib.tools import (
_flatten,
_getting_total_all_lines,
_getting_xlim,
_handle_shared_axes,
_subplots,
formating_date_labels,
table,
)
class MPLPlot:
"""
Base class for assembling a monkey plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = "vertical"
_default_rot = 0
orientation: Optional[str] = None
_pop_attributes = [
"label",
"style",
"logy",
"logx",
"loglog",
"mark_right",
"stacked",
]
_attr_defaults = {
"logy": False,
"logx": False,
"loglog": False,
"mark_right": True,
"stacked": False,
}
def __init__(
self,
data,
kind=None,
by=None,
subplots=False,
sharex=None,
sharey=False,
use_index=True,
figsize=None,
grid=None,
legend=True,
rot=None,
ax=None,
fig=None,
title=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
sort_columns=False,
fontsize=None,
secondary_y=False,
colormapping=None,
table=False,
layout=None,
include_bool=False,
**kwds,
):
import matplotlib.pyplot as plt
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we getting an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for formating_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else plt.rcParams["axes.grid"]
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.getting(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop("xerr", None)
yerr = kwds.pop("yerr", None)
self.errors = {
kw: self._parse_errorbars(kw, err)
for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
}
if not incontainstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmapping` name.
# Probably better to accept either.
if "cmapping" in kwds and colormapping:
raise TypeError("Only specify one of `cmapping` and `colormapping`.")
elif "cmapping" in kwds:
self.colormapping = kwds.pop("cmapping")
else:
self.colormapping = colormapping
self.table = table
self.include_bool = include_bool
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
import matplotlib.colors
if (
"color" in self.kwds
and self.ncollections == 1
and not is_list_like(self.kwds["color"])
):
# support collections.plot(color='green')
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds
and incontainstance(self.kwds["color"], tuple)
and self.ncollections == 1
and length(self.kwds["color"]) in (3, 4)
):
# support RGB and RGBA tuples in collections plot
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds or "colors" in self.kwds
) and self.colormapping is not None:
warnings.warn(
"'color' and 'colormapping' cannot be used simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
for char in s:
if char in matplotlib.colors.BASE_COLORS:
raise ValueError(
"Cannot pass 'style' string with a color symbol and "
"'color' keyword argument. Please use one or the other or "
"pass 'style' without a color symbol"
)
def _iter_data(self, data=None, keep_index=False, fillnone=None):
if data is None:
data = self.data
if fillnone is not None:
data = data.fillnone(fillnone)
for col, values in data.items():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def ncollections(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return length(ax.lines) != 0 or length(ax.artists) != 0 or length(ax.containers) != 0
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._getting_ax_layer(ax)
if hasattr(ax, "right_ax"):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, "left_ax"):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._getting_lines = orig_ax._getting_lines
new_ax._getting_patches_for_fill = orig_ax._getting_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.getting_yaxis().set_visible(False)
if self.logy is True or self.loglog is True:
new_ax.set_yscale("log")
elif self.logy == "sym" or self.loglog == "sym":
new_ax.set_yscale("symlog")
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(
naxes=self.ncollections,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
ax=self.ax,
layout=self.layout,
layout_type=self._layout_type,
)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.getting_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
invalid_log = next(iter((input_log - valid_log)))
raise ValueError(
f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
)
if self.logx is True or self.loglog is True:
[a.set_xscale("log") for a in axes]
elif self.logx == "sym" or self.loglog == "sym":
[a.set_xscale("symlog") for a in axes]
if self.logy is True or self.loglog is True:
[a.set_yscale("log") for a in axes]
elif self.logy == "sym" or self.loglog == "sym":
[a.set_yscale("symlog") for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = incontainstance(self.secondary_y, bool) and self.secondary_y
total_all_sec = (
is_list_like(self.secondary_y) and length(self.secondary_y) == self.ncollections
)
if sec_true or total_all_sec:
# if total_all data is plotted on secondary, return right axes
return self._getting_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if incontainstance(data, ABCCollections):
label = self.label
if label is None and data.name is None:
label = "None"
data = data.to_frame(name=label)
# GH16953, _convert is needed as ftotal_allback, for ``Collections``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
include_type = [np.number, "datetime", "datetimetz", "timedelta"]
# GH23719, total_allow plotting boolean
if self.include_bool is True:
include_type.adding(np.bool_)
# GH22799, exclude datatime-like type for boxplot
exclude_type = None
if self._kind == "box":
# TODO: change after solving issue 27881
include_type = [np.number]
exclude_type = ["timedelta"]
# GH 18755, include object and category type for scatter plot
if self._kind == "scatter":
include_type.extend(["object", "category"])
numeric_data = data.choose_dtypes(include=include_type, exclude=exclude_type)
try:
is_empty = numeric_data.columns.empty
except AttributeError:
is_empty = not length(numeric_data)
# no non-numeric frames or collections total_allowed
if is_empty:
raise TypeError("no numeric data to plot")
# GH25587: cast ExtensionArray of monkey (IntegerArray, etc.) to
# np.ndarray before plot.
numeric_data = numeric_data.clone()
for col in numeric_data:
numeric_data[col] = np.asarray(numeric_data[col])
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._getting_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
if self.orientation == "vertical" or self.orientation is None:
self._employ_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
self._employ_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
elif self.orientation == "horizontal":
self._employ_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
self._employ_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if length(self.axes) > 0:
total_all_axes = self._getting_subplots()
nrows, ncols = self._getting_axes_layout()
_handle_shared_axes(
axarr=total_all_axes,
nplots=length(total_all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if length(self.title) != self.ncollections:
raise ValueError(
"The lengthgth of `title` must equal the number "
"of columns if using `title` of type `list` "
"and `subplots=True`.\n"
f"lengthgth of title = {length(self.title)}\n"
f"number of columns = {self.ncollections}"
)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = (
"Using `title` of type `list` is not supported "
"unless `subplots=True` is passed"
)
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _employ_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
interntotal_ally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.getting_majorticklabels() + axis.getting_getting_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not incontainstance(self.data.columns, ABCMultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = mapping(pprint_thing, self.data.columns.names)
return ",".join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + " (right)"
self.legend_handles.adding(handle)
self.legend_labels.adding(label)
def _make_legend(self):
ax, leg, handle = self._getting_ax_legend_handle(self.axes[0])
handles = []
labels = []
title = ""
if not self.subplots:
if leg is not None:
title = leg.getting_title().getting_text()
# Replace leg.LegendHandles because it misses marker info
handles.extend(handle)
labels = [x.getting_text() for x in leg.getting_texts()]
if self.legend:
if self.legend == "reverse":
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if length(handles) > 0:
ax.legend(handles, labels, loc="best", title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.getting_visible():
ax.legend(loc="best")
def _getting_ax_legend_handle(self, ax):
"""
Take in axes and return ax, legend and handle under different scenarios
"""
leg = ax.getting_legend()
# Get handle from axes
handle, _ = ax.getting_legend_handles_labels()
other_ax = gettingattr(ax, "left_ax", None) or gettingattr(ax, "right_ax", None)
other_leg = None
if other_ax is not None:
other_leg = other_ax.getting_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg, handle
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _getting_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and incontainstance(index, ABCPeriodIndex):
self.data = self.data.reindexing(index=index.sort_the_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sorting_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = list(range(length(index)))
else:
x = list(range(length(index)))
return x
@classmethod
@register_monkey_matplotlib_converters
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = ifna(y)
if mask.whatever():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if incontainstance(x, ABCIndexClass):
x = x._mpl_repr()
if is_errorbar:
if "xerr" in kwds:
kwds["xerr"] = np.array(kwds.getting("xerr"))
if "yerr" in kwds:
kwds["yerr"] = np.array(kwds.getting("yerr"))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _getting_index_name(self):
if incontainstance(self.data.index, ABCMultiIndex):
name = self.data.index.names
if
|
com.whatever_not_none(*name)
|
pandas.core.common.any_not_none
|
"""
Base and utility classes for monkey objects.
"""
import textwrap
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey.compat as compat
from monkey.compat import PYPY, OrderedDict, builtins, mapping, range
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.util._validators import validate_bool_kwarg
from monkey.core.dtypes.common import (
is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype,
is_extension_type, is_list_like, is_object_dtype, is_scalar)
from monkey.core.dtypes.generic import ABCKnowledgeFrame, ABCIndexClass, ABCCollections
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, common as com
from monkey.core.accessor import DirNamesMixin
import monkey.core.nanops as nanops
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
distinctive='IndexOpsMixin', duplicated_values='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(kf) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from monkey.core.config import getting_option
encoding = getting_option("display.encoding")
return self.__unicode__().encode(encoding, 'replacing')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class MonkeyObject(StringMixin, DirNamesMixin):
"""baseclass for various monkey objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if gettingattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Collections of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.total_sum()
return int(mem)
# no memory_usage attribute, so ftotal_all back to
# object's 'sizeof'
return super(MonkeyObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
ctotal_all to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Collections.cat/.str/.dt`).
If you retotal_ally want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding whatever attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) gettingattr(self, key)
# because
# 1.) gettingattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (gettingattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
gettingattr(self, key, None) is not None)):
raise AttributeError("You cannot add whatever new attribute '{key}'".
formating(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.total_sum, np.total_sum),
(builtins.getting_max, np.getting_max),
(builtins.getting_min, np.getting_min),
))
_cython_table = OrderedDict((
(builtins.total_sum, 'total_sum'),
(builtins.getting_max, 'getting_max'),
(builtins.getting_min, 'getting_min'),
(np.total_all, 'total_all'),
(np.whatever, 'whatever'),
(np.total_sum, 'total_sum'),
(np.nantotal_sum, 'total_sum'),
(np.average, 'average'),
(np.nanaverage, 'average'),
(np.prod, 'prod'),
(np.nanprod, 'prod'),
(np.standard, 'standard'),
(np.nanstandard, 'standard'),
(np.var, 'var'),
(np.nanvar, 'var'),
(np.median, 'median'),
(np.nanmedian, 'median'),
(np.getting_max, 'getting_max'),
(np.nangetting_max, 'getting_max'),
(np.getting_min, 'getting_min'),
(np.nangetting_min, 'getting_min'),
(np.cumprod, 'cumprod'),
(np.nancumprod, 'cumprod'),
(np.cumtotal_sum, 'cumtotal_sum'),
(np.nancumtotal_sum, 'cumtotal_sum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would idetotal_ally be ctotal_alled
the 'name' property, but we cannot conflict with the
Collections.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, ABCCollections,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, ABCCollections):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and incontainstance(self.obj,
ABCKnowledgeFrame):
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
def __gettingitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.formating(selection=self._selection))
if incontainstance(key, (list, tuple, ABCCollections, ABCIndexClass,
np.ndarray)):
if length(self.obj.columns.interst(key)) != length(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.formating(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not gettingattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert incontainstance(arg, compat.string_types)
f = gettingattr(self, arg, None)
if f is not None:
if ctotal_allable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-ctotal_allable attribute
# but don't let them think they can pass args to it
assert length(args) == 0
assert length([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = gettingattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".formating(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: incontainstance(x, (list, tuple, dict))
is_nested_renagetting_mingr = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = gettingattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if incontainstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if incontainstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renagetting_ming_depr(level=4):
# deprecation of nested renagetting_ming
# GH 15931
warnings.warn(
("using a dict with renagetting_ming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of whatever non-scalars
# eg. {'A' : ['average']}, normalize total_all to
# be list-likes
if whatever(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renagetting_mingrs for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'average' }}
# {'A': { 'ra': ['average'] }}
# {'ra': ['average']}
# not ok
# {'ra' : { 'A' : 'average' }}
if incontainstance(v, dict):
is_nested_renagetting_mingr = True
if k not in obj.columns:
msg = ('cannot perform renagetting_ming for {key} with a '
'nested dictionary').formating(key=k)
raise SpecificationError(msg)
nested_renagetting_ming_depr(4 + (_level or 0))
elif incontainstance(obj, ABCCollections):
nested_renagetting_ming_depr()
elif (incontainstance(obj, ABCKnowledgeFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".formating(col=k))
arg = new_arg
else:
# deprecation of renagetting_ming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (incontainstance(obj, ABCKnowledgeFrame) and
length(obj.columns.interst(keys)) != length(keys)):
nested_renagetting_ming_depr()
from monkey.core.reshape.concating import concating
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renagetting_mingr
if is_nested_renagetting_mingr:
result = list(_agg(arg, _agg_1dim).values())
if total_all(incontainstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.umkate(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Collections like object,
# but may have multiple aggregations
if length(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not length(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a KnowledgeFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting total_all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_whatever_collections():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCCollections)
for r in compat.itervalues(result))
def is_whatever_frame():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCKnowledgeFrame)
for r in compat.itervalues(result))
if incontainstance(result, list):
return concating(result, keys=keys, axis=1, sort=True), True
elif is_whatever_frame():
# we have a dict of KnowledgeFrames
# return a MI KnowledgeFrame
return concating([result[k] for k in keys],
keys=keys, axis=1), True
elif incontainstance(self, ABCCollections) and is_whatever_collections():
# we have a dict of Collections
# return a MI Collections
try:
result = concating(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatictotal_ally broadcast
raise ValueError("cannot perform both aggregation "
"and transformatingion operations "
"simultaneously")
return result, True
# ftotal_all thru
from monkey import KnowledgeFrame, Collections
try:
result = KnowledgeFrame(result)
except ValueError:
# we have a dict of scalars
result = Collections(result,
name=gettingattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return gettingattr(self, f)(), None
# ctotal_aller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from monkey.core.reshape.concating import concating
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.adding(colg.aggregate(a))
# make sure we find a good name
name = com.getting_ctotal_allable_name(a) or a
keys.adding(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not length(results):
raise ValueError("no results")
try:
return concating(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatingting non-NDFrame objects,
# e.g. a list of scalars
from monkey.core.dtypes.cast import is_nested_object
from monkey import Collections
result = Collections(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shtotal_allow_clone(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacingment attributes
"""
if obj is None:
obj = self._selected_obj.clone()
if obj_type is None:
obj_type = self._constructor
if incontainstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = gettingattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.getting(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.getting(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Collections /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
"""
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="Return the transpose, which is by "
"definition self.")
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Collections and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
KnowledgeFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
try:
return self.values.item()
except IndexError:
# clone numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
"""
Return the data pointer of the underlying data.
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return self._values.size
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def array(self):
# type: () -> Union[np.ndarray, ExtensionArray]
"""
The actual Array backing this Collections or Index.
.. versionadded:: 0.24.0
Returns
-------
array : numpy.ndarray or ExtensionArray
This is the actual array stored within this object. This differs
from ``.values`` which may require converting the data
to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Collections.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within monkey.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For whatever 3rd-party extension types, the array type will be an
ExtensionArray.
For total_all remaining dtypes ``.array`` will be the :class:`numpy.ndarray`
stored within. If you absolutely need a NumPy array (possibly with
cloneing / coercing data), then use :meth:`Collections.to_numpy` instead.
.. note::
``.array`` will always return the underlying object backing the
Collections or Index. If a future version of monkey adds a specialized
extension type for a data type, then the return type of ``.array``
for that data type will change from an object-dtype ndarray to the
new ExtensionArray.
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
return self._values
def to_numpy(self, dtype=None, clone=False):
"""
A NumPy ndarray representing the values in this Collections or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
clone : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``clone=False`` does not *ensure* that
``to_numpy()`` is no-clone. Rather, ``clone=True`` ensure that
a clone is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Collections.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
KnowledgeFrame.to_numpy : Similar method for KnowledgeFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Collections,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Collections or Index (astotal_sugetting_ming ``clone=False``). Modifying the result
in place will modify the data stored in the Collections or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require cloneing data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-clone reference to the underlying data,
:attr:`Collections.array` should be used instead.
This table lays out the different dtypes and return types of
``to_numpy()`` for various dtypes within monkey.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of monkey :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = mk.Collections(mk.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is sipped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if (is_extension_array_dtype(self.dtype) or
is_datetime64tz_dtype(self.dtype)):
# TODO(DatetimeArray): remove the second clause.
# TODO(GH-24345): Avoid potential double clone
result = np.asarray(self._values, dtype=dtype)
else:
result = self._values
if clone:
result = result.clone()
return result
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
The data as an ndarray, possibly losing informatingion.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def getting_max(self):
"""
Return the getting_maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.getting_min : Return the getting_minimum value in an Index.
Collections.getting_max : Return the getting_maximum value in a Collections.
KnowledgeFrame.getting_max : Return the getting_maximum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_max()
3
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_max()
'c'
For a MultiIndex, the getting_maximum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_max()
('b', 2)
"""
return nanops.nangetting_max(self.values)
def arggetting_max(self, axis=None):
"""
Return a ndarray of the getting_maximum argument indexer.
See Also
--------
numpy.ndarray.arggetting_max
"""
return nanops.nanarggetting_max(self.values)
def getting_min(self):
"""
Return the getting_minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.getting_max : Return the getting_maximum value of the object.
Collections.getting_min : Return the getting_minimum value in a Collections.
KnowledgeFrame.getting_min : Return the getting_minimum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_min()
1
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_min()
'a'
For a MultiIndex, the getting_minimum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_min()
('a', 1)
"""
return nanops.nangetting_min(self.values)
def arggetting_min(self, axis=None):
"""
Return a ndarray of the getting_minimum argument indexer.
See Also
--------
numpy.ndarray.arggetting_min
"""
return nanops.nanarggetting_min(self.values)
def convert_list(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.convert_list
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.convert_list()
to_list = convert_list
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
return
|
mapping(com.maybe_box_datetimelike, self._values)
|
pandas.compat.map
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import os
import arff
import urllib
import monkey as mk
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from lale.lib.sklearn import SimpleImputer, OneHotEncoder
from sklearn.compose import ColumnTransformer
download_data_dir = os.path.join(os.path.dirname(__file__), 'download_data')
experiments_dict:Dict[str,Dict[str,str]] = {}
# 1.25
experiments_dict['vehicle'] = {}
experiments_dict['vehicle']['download_arff_url'] = 'https://www.openml.org/data/download/54/dataset_54_vehicle.arff'
experiments_dict['vehicle']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/54/dataset_54_vehicle.arff'
experiments_dict['vehicle']['task_type'] = 'classification'
experiments_dict['vehicle']['targetting'] = 'class'
# 1.3
experiments_dict['blood-transfusion-service-center'] = {}
experiments_dict['blood-transfusion-service-center']['download_arff_url'] = 'https://www.openml.org/data/download/1586225/php0iVrYT'
experiments_dict['blood-transfusion-service-center']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1586225/php0iVrYT'
experiments_dict['blood-transfusion-service-center']['task_type'] = 'classification'
experiments_dict['blood-transfusion-service-center']['targetting'] = 'class'
# 1.5
experiments_dict['car'] = {}
experiments_dict['car']['download_arff_url'] = 'https://www.openml.org/data/download/18116966/php2jDIhh'
experiments_dict['car']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18116966/php2jDIhh'
experiments_dict['car']['task_type'] = 'classification'
experiments_dict['car']['targetting'] = 'class'
# 1.6
experiments_dict['kc1'] = {}
experiments_dict['kc1']['download_arff_url'] = 'https://www.openml.org/data/download/53950/kc1.arff'
experiments_dict['kc1']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/53950/kc1.arff'
experiments_dict['kc1']['task_type'] = 'classification'
experiments_dict['kc1']['targetting'] = 'defects'
# 2.6
experiments_dict['Australian'] = {}
experiments_dict['Australian']['download_arff_url'] = 'https://www.openml.org/data/download/18151910/phpelnJ6y'
experiments_dict['Australian']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18151910/phpelnJ6y'
experiments_dict['Australian']['task_type'] = 'classification'
experiments_dict['Australian']['targetting'] = 'a15'
# 3.1
experiments_dict['credit-g'] = {}
experiments_dict['credit-g']['download_arff_url'] = 'https://www.openml.org/data/download/31/dataset_31_credit-g.arff'
experiments_dict['credit-g']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/31/dataset_31_credit-g.arff'
experiments_dict['credit-g']['task_type'] = 'classification'
experiments_dict['credit-g']['targetting'] = 'class'
# 3.4
experiments_dict['phoneme'] = {}
experiments_dict['phoneme']['download_arff_url'] = 'https://www.openml.org/data/download/1592281/php8Mz7BG'
experiments_dict['phoneme']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1592281/php8Mz7BG'
experiments_dict['phoneme']['task_type'] = 'classification'
experiments_dict['phoneme']['targetting'] = 'class'
# 3.6
experiments_dict['kr-vs-kp'] = {}
experiments_dict['kr-vs-kp']['download_arff_url'] = 'https://www.openml.org/data/download/3/dataset_3_kr-vs-kp.arff'
experiments_dict['kr-vs-kp']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/3/dataset_3_kr-vs-kp.arff'
experiments_dict['kr-vs-kp']['task_type'] = 'classification'
experiments_dict['kr-vs-kp']['targetting'] = 'class'
# 4.0
experiments_dict['mfeat-factors'] = {}
experiments_dict['mfeat-factors']['download_arff_url'] = 'https://www.openml.org/data/download/12/dataset_12_mfeat-factors.arff'
experiments_dict['mfeat-factors']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/12/dataset_12_mfeat-factors.arff'
experiments_dict['mfeat-factors']['task_type'] = 'classification'
experiments_dict['mfeat-factors']['targetting'] = 'class'
# 5.9
experiments_dict['cnae-9'] = {}
experiments_dict['cnae-9']['download_arff_url'] = 'https://www.openml.org/data/download/1586233/phpmcGu2X'
experiments_dict['cnae-9']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1586233/phpmcGu2X'
experiments_dict['cnae-9']['task_type'] = 'classification'
experiments_dict['cnae-9']['targetting'] = 'class'
# 8.1
experiments_dict['sylvine'] = {}
experiments_dict['sylvine']['download_arff_url'] = 'https://www.openml.org/data/download/19335519/file7a97574fa9ae.arff'
experiments_dict['sylvine']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335519/file7a97574fa9ae.arff'
experiments_dict['sylvine']['task_type'] = 'classification'
experiments_dict['sylvine']['targetting'] = 'class'
# 17
experiments_dict['jungle_chess_2pcs_raw_endgame_complete'] = {}
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['download_arff_url'] = 'https://www.openml.org/data/download/18631418/jungle_chess_2pcs_raw_endgame_complete.arff'
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18631418/jungle_chess_2pcs_raw_endgame_complete.arff'
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['task_type'] = 'classification'
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['targetting'] = 'class'
# 32
experiments_dict['shuttle'] = {}
experiments_dict['shuttle']['download_arff_url'] = 'https://www.openml.org/data/download/4965262/shuttle.arff'
experiments_dict['shuttle']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/4965262/shuttle.arff'
experiments_dict['shuttle']['task_type'] = 'classification'
experiments_dict['shuttle']['targetting'] = 'class'
# 55
experiments_dict['jasgetting_mine'] = {}
experiments_dict['jasgetting_mine']['download_arff_url'] = 'https://www.openml.org/data/download/19335516/file79b563a1a18.arff'
experiments_dict['jasgetting_mine']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335516/file79b563a1a18.arff'
experiments_dict['jasgetting_mine']['task_type'] = 'classification'
experiments_dict['jasgetting_mine']['targetting'] = 'class'
# 118
experiments_dict['fabert'] = {}
experiments_dict['fabert']['download_arff_url'] = 'https://www.openml.org/data/download/19335687/file1c555f4ca44d.arff'
experiments_dict['fabert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335687/file1c555f4ca44d.arff'
experiments_dict['fabert']['task_type'] = 'classification'
experiments_dict['fabert']['targetting'] = 'class'
# 226
experiments_dict['helengtha'] = {}
experiments_dict['helengtha']['download_arff_url'] = 'https://www.openml.org/data/download/19335692/file1c556677f875.arff'
experiments_dict['helengtha']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335692/file1c556677f875.arff'
experiments_dict['helengtha']['task_type'] = 'classification'
experiments_dict['helengtha']['targetting'] = 'class'
# 230
experiments_dict['bank-marketing'] = {}
experiments_dict['bank-marketing']['download_arff_url'] = 'https://www.openml.org/data/download/1586218/phpkIxskf'
experiments_dict['bank-marketing']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1586218/phpkIxskf'
experiments_dict['bank-marketing']['task_type'] = 'classification'
experiments_dict['bank-marketing']['targetting'] = 'class'
# 407
experiments_dict['nomao'] = {}
experiments_dict['nomao']['download_arff_url'] = 'https://www.openml.org/data/download/1592278/phpDYCOet'
experiments_dict['nomao']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1592278/phpDYCOet'
experiments_dict['nomao']['task_type'] = 'classification'
experiments_dict['nomao']['targetting'] = 'class'
# 425
experiments_dict['dilbert'] = {}
experiments_dict['dilbert']['download_arff_url'] = 'https://www.openml.org/data/download/19335686/file1c5552c0c4b0.arff'
experiments_dict['dilbert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335686/file1c5552c0c4b0.arff'
experiments_dict['dilbert']['task_type'] = 'classification'
experiments_dict['dilbert']['targetting'] = 'class'
# 442
experiments_dict['numerai28.6'] = {}
experiments_dict['numerai28.6']['download_arff_url'] = 'https://www.openml.org/data/download/2160285/phpg2t68G'
experiments_dict['numerai28.6']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/2160285/phpg2t68G'
experiments_dict['numerai28.6']['task_type'] = 'classification'
experiments_dict['numerai28.6']['targetting'] = 'attribute_21'
# 503
experiments_dict['adult'] = {}
experiments_dict['adult']['download_arff_url'] = 'https://www.openml.org/data/download/1595261/phpMawTba'
experiments_dict['adult']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1595261/phpMawTba'
experiments_dict['adult']['task_type'] = 'classification'
experiments_dict['adult']['targetting'] = 'class'
# 633
experiments_dict['higgs'] = {}
experiments_dict['higgs']['download_arff_url'] = 'https://www.openml.org/data/download/2063675/phpZLgL9q'
experiments_dict['higgs']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/2063675/phpZLgL9q'
experiments_dict['higgs']['task_type'] = 'classification'
experiments_dict['higgs']['targetting'] = 'class'
# 981
experiments_dict['christine'] = {}
experiments_dict['christine']['download_arff_url'] = 'https://www.openml.org/data/download/19335515/file764d5d063390.arff'
experiments_dict['christine']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335515/file764d5d063390.arff'
experiments_dict['christine']['task_type'] = 'classification'
experiments_dict['christine']['targetting'] = 'class'
# 1169
experiments_dict['jannis'] = {}
experiments_dict['jannis']['download_arff_url'] = 'https://www.openml.org/data/download/19335691/file1c558ee247d.arff'
experiments_dict['jannis']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335691/file1c558ee247d.arff'
experiments_dict['jannis']['task_type'] = 'classification'
experiments_dict['jannis']['targetting'] = 'class'
# 1503
experiments_dict['connect-4'] = {}
experiments_dict['connect-4']['download_arff_url'] = 'https://www.openml.org/data/download/4965243/connect-4.arff'
experiments_dict['connect-4']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/4965243/connect-4.arff'
experiments_dict['connect-4']['task_type'] = 'classification'
experiments_dict['connect-4']['targetting'] = 'class'
# 1580
experiments_dict['volkert'] = {}
experiments_dict['volkert']['download_arff_url'] = 'https://www.openml.org/data/download/19335689/file1c556e3db171.arff'
experiments_dict['volkert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335689/file1c556e3db171.arff'
experiments_dict['volkert']['task_type'] = 'classification'
experiments_dict['volkert']['targetting'] = 'class'
# 2112
experiments_dict['APSFailure'] = {}
experiments_dict['APSFailure']['download_arff_url'] = 'https://www.openml.org/data/download/19335511/aps_failure.arff'
experiments_dict['APSFailure']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335511/aps_failure.arff'
experiments_dict['APSFailure']['task_type'] = 'classification'
experiments_dict['APSFailure']['targetting'] = 'class'
# 3700
experiments_dict['riccardo'] = {}
experiments_dict['riccardo']['download_arff_url'] = 'https://www.openml.org/data/download/19335534/file7b535210a7kf.arff'
experiments_dict['riccardo']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335534/file7b535210a7kf.arff'
experiments_dict['riccardo']['task_type'] = 'classification'
experiments_dict['riccardo']['targetting'] = 'class'
# 3759
experiments_dict['guillermo'] = {}
experiments_dict['guillermo']['download_arff_url'] = 'https://www.openml.org/data/download/19335532/file7b5323e77330.arff'
experiments_dict['guillermo']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335532/file7b5323e77330.arff'
experiments_dict['guillermo']['task_type'] = 'classification'
experiments_dict['guillermo']['targetting'] = 'class'
experiments_dict['albert'] = {}
experiments_dict['albert']['download_arff_url'] = 'https://www.openml.org/data/download/19335520/file7b53746cbda2.arff'
experiments_dict['albert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335520/file7b53746cbda2.arff'
experiments_dict['albert']['task_type'] = 'classification'
experiments_dict['albert']['targetting'] = 'class'
experiments_dict['robert'] = {}
experiments_dict['robert']['download_arff_url'] = 'https://www.openml.org/data/download/19335688/file1c55384ec217.arff'
experiments_dict['robert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335688/file1c55384ec217.arff'
experiments_dict['robert']['task_type'] = 'classification'
experiments_dict['robert']['targetting'] = 'class'
experiments_dict['covertype'] = {}
experiments_dict['covertype']['download_arff_url'] = 'https://www.openml.org/data/download/1601911/phpQOf0wY'
experiments_dict['covertype']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1601911/phpQOf0wY'
experiments_dict['covertype']['task_type'] = 'classification'
experiments_dict['covertype']['targetting'] = 'class'
#This dataset doesn't work with the pre-processing pipeline coded below, as the SimpleImputer sips some columns
#which have total_all missing values. There is no easy way to pass this info to the downstream ColumnTransformer.
# experiments_dict['KDDCup09_appetency'] = {}
# experiments_dict['KDDCup09_appetency']['download_arff_url'] = 'https://www.openml.org/data/download/53994/KDDCup09_appetency.arff'
# experiments_dict['KDDCup09_appetency']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/53994/KDDCup09_appetency.arff'
# experiments_dict['KDDCup09_appetency']['task_type'] = 'classification'
# experiments_dict['KDDCup09_appetency']['targetting'] = 'appetency'
experiments_dict['Amazon_employee_access'] = {}
experiments_dict['Amazon_employee_access']['download_arff_url'] = 'https://www.openml.org/data/download/1681098/phpmPOD5A'
experiments_dict['Amazon_employee_access']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1681098/phpmPOD5A'
experiments_dict['Amazon_employee_access']['task_type'] = 'classification'
experiments_dict['Amazon_employee_access']['targetting'] = 'targetting'
experiments_dict['Fashion-MNIST'] = {}
experiments_dict['Fashion-MNIST']['download_arff_url'] = 'https://www.openml.org/data/download/18238735/phpnBqZGZ'
experiments_dict['Fashion-MNIST']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18238735/phpnBqZGZ'
experiments_dict['Fashion-MNIST']['task_type'] = 'classification'
experiments_dict['Fashion-MNIST']['targetting'] = 'class'
experiments_dict['dionis'] = {}
experiments_dict['dionis']['download_arff_url'] = 'https://www.openml.org/data/download/19335690/file1c55272d7b5b.arff'
experiments_dict['dionis']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335690/file1c55272d7b5b.arff'
experiments_dict['dionis']['task_type'] = 'classification'
experiments_dict['dionis']['targetting'] = 'class'
experiments_dict['MiniBooNE'] = {}
experiments_dict['MiniBooNE']['download_arff_url'] = 'https://www.openml.org/data/download/19335523/MiniBooNE.arff'
experiments_dict['MiniBooNE']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335523/MiniBooNE.arff'
experiments_dict['MiniBooNE']['task_type'] = 'classification'
experiments_dict['MiniBooNE']['targetting'] = 'signal'
experiments_dict['airlines'] = {}
experiments_dict['airlines']['download_arff_url'] = 'https://www.openml.org/data/download/66526/phpvcoG8S'
experiments_dict['airlines']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/66526/phpvcoG8S'
experiments_dict['airlines']['task_type'] = 'stream classification'
experiments_dict['airlines']['targetting'] = 'class'
def add_schemas(schema_orig, targetting_col, train_X, test_X, train_y, test_y):
from lale.datasets.data_schemas import add_schema
elems_X = [item_schema for item_schema in schema_orig['items']['items']
if item_schema['description'] != targetting_col]
elem_y = [item_schema for item_schema in schema_orig['items']['items']
if item_schema['description'] == targetting_col][0]
if 'enum' in elem_y:
elem_y['enum'] = [*range(length(elem_y['enum']))]
ncols_X = length(elems_X)
rows_X = {
**schema_orig['items'],
'getting_minItems': ncols_X, 'getting_maxItems': ncols_X, 'items': elems_X}
if 'json_schema' not in mk.KnowledgeFrame._internal_names:
|
mk.KnowledgeFrame._internal_names.adding('json_schema')
|
pandas.DataFrame._internal_names.append
|
import tensorflow as tf
import numpy as np
from scipy.stats import stats
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
import monkey as mk
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, test_images = train_images / 255, test_images / 255
def build_nn_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation=tf.nn.softgetting_max)
])
model.compile(optimizer=tf.keras.optimizers.SGD(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
return model
def bootstrapping():
model = build_nn_model()
#model.load_weights("../result/model/20200118-085651-496.h5") sample_by_num
model.load_weights("E:/experiments/MNIST_FL_1/model/20200317-171952-491-0.9456.h5")
print("==> bootstrapping start")
n_bootstraps = 10000
rng_seed = 3033 # control reproducibility
bootstrapped_auroc = []
bootstrapped_auprc = []
bootstrapped_sen = []
bootstrapped_spe = []
bootstrapped_bac = []
bootstrapped_f1 = []
bootstrapped_pre = []
bootstrapped_NLR = []
bootstrapped_PLR = []
final = {}
result = model.predict(test_images)
auroc = metrics.roc_auc_score(test_labels, result, multi_class='ovr')
print("auroc ovr : ", auroc)
auroc_ovo = metrics.roc_auc_score(test_labels, result, multi_class='ovo')
print("auroc ovo : ", auroc_ovo)
result = np.arggetting_max(result, axis=1)
auprc = metrics.auc(test_labels, result)
print("auprc : ", auprc)
'''
fpr = dict()
tpr = dict()
for i in range(10):
fpr[i], tpr[i], _ = roc_curve(test_labels[:, i], result[:, i])
print(fpr, tpr)
fpr, tpr, thresholds = metrics.roc_curve(test_labels, result)
#roc_auc = metrics.auc(fpr, tpr)
'''
(precisions, rectotal_alls, thresholds) = metrics.precision_rectotal_all_curve(test_labels, result)
getting_minpse = np.getting_max([getting_min(x, y) for (x, y) in zip(precisions, rectotal_alls)])
result = np.arggetting_max(result, axis=1)
cf = metrics.confusion_matrix(test_labels, result)
print(cf)
cf = cf.totype(np.float32)
acc = (cf[0][0] + cf[1][1]) / np.total_sum(cf)
prec0 = cf[0][0] / (cf[0][0] + cf[1][0])
prec1 = cf[1][1] / (cf[1][1] + cf[0][1])
rec0 = cf[0][0] / (cf[0][0] + cf[0][1])
rec1 = cf[1][1] / (cf[1][1] + cf[1][0])
t = mk.concating([mk.KnowledgeFrame(thresholds), mk.KnowledgeFrame(tpr), mk.KnowledgeFrame(1-fpr), mk.KnowledgeFrame(((1-fpr+tpr)/2))], axis=1)
t.columns = ['threshold', 'sensitivity', 'specificity', 'bac']
t_ = t.iloc[np.getting_min(np.where(t['bac'] == getting_max(t['bac']))), :]
y_pred_ = (result >= t_['threshold']).totype(bool)
cm_ = metrics.confusion_matrix(test_labels, result)
tp = cm_[1, 1]
fn = cm_[1, 0]
fp = cm_[0, 1]
tn = cm_[0, 0]
bac = t_['bac'] # balanced accuracy
sensitivity = t_['sensitivity'] # sensitivity
specificity = t_['specificity'] # specificity
precision = tp / (tp + fp) # precision
f1 = 2 * ((sensitivity * precision) / (sensitivity + precision)) # f1 score
plr = sensitivity / (1 - specificity) # PLR
nlr = (1 - sensitivity) / specificity # NLR
rng = np.random.RandomState(rng_seed)
y_true = np.array(test_labels)
for j in range(n_bootstraps):
indices = rng.random_integers(0, length(result)-1, length(result))
if length(np.distinctive(y_true[indices])) < 2:
continue
auroc_ = metrics.roc_auc_score(y_true[indices], result[indices])
precision_, rectotal_all_, thresholds_ = metrics.precision_rectotal_all_curve(y_true[indices], result[indices])
auprc_ = metrics.auc(rectotal_all_, precision_)
CM = metrics.confusion_matrix(np.array(y_true)[indices], result.arggetting_max(axis=1))
TP = CM[1, 1]
FN = CM[1, 0]
FP = CM[0, 1]
TN = CM[0, 0]
TPV = TP / (TP + FN) # sensitivity
TNV = TN / (TN + FP) # specificity
PPV = TP / (TP + FP) # precision
BAAC = (TPV + TNV) / 2 # balanced accuracy
F1 = 2 * ((PPV * TPV) / (PPV + TPV)) # f1 score
PLR = TPV / (1 - TNV) # LR+
NLR = (1 - TPV) / TNV # LR-
bootstrapped_auroc.adding(auroc_) # AUROC
bootstrapped_auprc.adding(auprc_) # AUPRC
bootstrapped_sen.adding(TPV) # Sensitivity
bootstrapped_spe.adding(TNV) # Specificity
bootstrapped_bac.adding(BAAC) # Balanced Accuracy
bootstrapped_f1.adding(F1) # F1 score
bootstrapped_pre.adding(PPV) # Precision
bootstrapped_NLR.adding(NLR) # Negative Likelihood Ratio
bootstrapped_PLR.adding(PLR) # positive Likelihood Ratio
sorted_auroc = np.array(bootstrapped_auroc)
sorted_auroc.sort()
sorted_auprc = np.array(bootstrapped_auprc)
sorted_auprc.sort()
sorted_sen = np.array(bootstrapped_sen)
sorted_sen.sort()
sorted_spe = np.array(bootstrapped_spe)
sorted_spe.sort()
sorted_bac = np.array(bootstrapped_bac)
sorted_bac.sort()
sorted_f1 = np.array(bootstrapped_f1)
sorted_f1.sort()
sorted_pre = np.array(bootstrapped_pre)
sorted_pre.sort()
sorted_NLR = np.array(bootstrapped_NLR)
sorted_NLR.sort()
sorted_PLR = np.array(bootstrapped_PLR)
sorted_PLR.sort()
auroc_lower = value_round(sorted_auroc[int(0.025 * length(sorted_auroc))], 4)
auroc_upper = value_round(sorted_auroc[int(0.975 * length(sorted_auroc))], 4)
auprc_lower = value_round(sorted_auprc[int(0.025 * length(sorted_auprc))], 4)
auprc_upper = value_round(sorted_auprc[int(0.975 * length(sorted_auprc))], 4)
sen_lower = value_round(sorted_sen[int(0.025 * length(sorted_sen))], 4)
sen_upper = value_round(sorted_sen[int(0.975 * length(sorted_sen))], 4)
spe_lower = value_round(sorted_spe[int(0.025 * length(sorted_spe))], 4)
spe_upper = value_round(sorted_spe[int(0.975 * length(sorted_spe))], 4)
bac_lower = value_round(sorted_bac[int(0.025 * length(sorted_bac))], 4)
bac_upper = value_round(sorted_bac[int(0.975 * length(sorted_bac))], 4)
f1_lower = value_round(sorted_f1[int(0.025 * length(sorted_f1))], 4)
f1_upper = value_round(sorted_f1[int(0.975 * length(sorted_f1))], 4)
pre_lower = value_round(sorted_pre[int(0.025 * length(sorted_pre))], 4)
pre_upper = value_round(sorted_pre[int(0.975 * length(sorted_pre))], 4)
NLR_lower = value_round(sorted_NLR[int(0.025 * length(sorted_NLR))], 4)
NLR_upper = value_round(sorted_NLR[int(0.975 * length(sorted_NLR))], 4)
PLR_lower = value_round(sorted_PLR[int(0.025 * length(sorted_PLR))], 4)
PLR_upper = value_round(sorted_PLR[int(0.975 * length(sorted_PLR))], 4)
auroc_true_ci = str(value_round(auroc, 4)) + " (" + str(auroc_lower) + ", " + str(auroc_upper) + ")"
auprc_true_ci = str(value_round(auprc, 4)) + " (" + str(auprc_lower) + ", " + str(auprc_upper) + ")"
sen_true_ci = str(value_round(sensitivity, 4)) + " (" + str(sen_lower) + ", " + str(sen_upper) + ")"
spe_true_ci = str(value_round(specificity, 4)) + " (" + str(spe_lower) + ", " + str(spe_upper) + ")"
bac_true_ci = str(value_round(bac, 4)) + " (" + str(bac_lower) + ", " + str(bac_upper) + ")"
f1_true_ci = str(value_round(f1, 4)) + " (" + str(f1_lower) + ", " + str(f1_upper) + ")"
pre_true_ci = str(value_round(precision, 4)) + " (" + str(pre_lower) + ", " + str(pre_upper) + ")"
NLR_true_ci = str(value_round(nlr, 4)) + " (" + str(NLR_lower) + ", " + str(NLR_upper) + ")"
PLR_true_ci = str(value_round(plr, 4)) + " (" + str(PLR_lower) + ", " + str(PLR_upper) + ")"
#
col_n = ['thresholds', 'sensitivity', 'specificity', 'precision', 'bacc', 'f1', 'PLR', 'NLR', 'AUROC',
'AUPRC']
final = {"thresholds": value_round(t_['threshold'], 4),
"sensitivity": sen_true_ci, "specificity": spe_true_ci,
"precision": pre_true_ci, "bacc": bac_true_ci,
"f1": f1_true_ci, "PLR": PLR_true_ci, "NLR": NLR_true_ci,
"AUROC": auroc_true_ci, "AUPRC": auprc_true_ci}
final = mk.KnowledgeFrame(final, index=[0])
#final1 = mk.KnowledgeFrame(final)
final = final.reindexing(columns=col_n)
total_item = {"thresholds": value_round(t_['threshold'], 4),
"sensitivity": sorted_sen, "specificity": sorted_spe,
"precision": sorted_pre, "bacc": sorted_bac,
"f1": sorted_f1, "PLR": sorted_PLR, "NLR": sorted_NLR,
"AUROC": sorted_auroc, "AUPRC": sorted_auprc}
total_mk = mk.KnowledgeFrame.from_dict(total_item, orient='columns')
print(total_mk)
final2 =
|
mk.KnowledgeFrame.adding(final, total_mk)
|
pandas.DataFrame.append
|
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from monkey.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_whatever_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from monkey.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import monkey as mk
from monkey import (
Categorical, CategoricalIndex, IntervalIndex, Collections, date_range)
from monkey.core.sparse.api import SparseDtype
import monkey.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not length(self.dtype._cache)
# force back to the cache
result = tm.value_round_trip_pickle(self.dtype)
assert not length(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not length(self.dtype._cache)
# force back to the cache
result = tm.value_round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` togettingher with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Collections(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert total_all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_whatever_dtype(self.dtype)
assert is_datetime64_whatever_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Collections(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Collections(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Collections(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.formating(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == mk.tcollections.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == mk.tcollections.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == mk.tcollections.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = mk.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Collections(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatingted string passed to constructor. "
r"Valid formatings include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = 'interval[{subtype}]'.formating(subtype=subtype)
assert str(dtype) == expected
assert dtype.name == 'interval'
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == 'interval'
assert dtype.name == 'interval'
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Collections(ii, name='A')
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert length(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert length(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
|
tm.value_round_trip_pickle(dtype)
|
pandas.util.testing.round_trip_pickle
|
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8=mk.Collections.convert_list(d1[0:16][8])
list9=mk.Collections.convert_list(d1[0:16][9])
list10=mk.Collections.convert_list(d1[0:16][10])
#forecast table
c=[]
for j in total_all_t[1].findAll('td'):
c.adding(j.text)
bv=mk.KnowledgeFrame()
i=0
while(i<=(91-13)):
bv=bv.adding(mk.KnowledgeFrame([c[i:i+13]]) )
i=i+13
listq1=mk.Collections.convert_list(bv[0:7][0])
list11=mk.Collections.convert_list(bv[0:7][1])
list21=mk.Collections.convert_list(bv[0:7][2])
list31=mk.Collections.convert_list(bv[0:7][3])
list41=mk.Collections.convert_list(bv[0:7][4])
list51=mk.Collections.convert_list(bv[0:7][5])
list61=mk.Collections.convert_list(bv[0:7][6])
list71=mk.Collections.convert_list(bv[0:7][7])
list81=mk.Collections.convert_list(bv[0:7][8])
list91=
|
mk.Collections.convert_list(bv[0:7][9])
|
pandas.Series.tolist
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
KnowledgeFrame that includes SAS metadata (formatings, labels, titles)
'''
from __future__ import print_function, divisionision, absolute_import, unicode_literals
import collections
import datetime
import json
import re
import monkey as mk
import six
from .cas.table import CASTable
from .utils.compat import (a2u, a2n, int32, int64, float64, int32_types,
int64_types, float64_types, bool_types, text_types,
binary_types)
from .utils import dict2kwargs
from .clib import errorcheck
from .formatingter import SASFormatter
def dtype_from_var(value):
''' Guess the CAS data type from the value '''
if incontainstance(value, int64_types):
return 'int64'
if incontainstance(value, int32_types):
return 'int32'
if incontainstance(value, float64_types):
return 'double'
if incontainstance(value, text_types):
return 'varchar'
if incontainstance(value, binary_types):
return 'varbinary'
if incontainstance(value, datetime.datetime):
return 'datetime'
if incontainstance(value, datetime.date):
return 'date'
if incontainstance(value, datetime.time):
return 'time'
raise TypeError('Unrecognized type for value: %s' % value)
def split_formating(fmt):
''' Split a SAS formating name into components '''
if not fmt:
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(False, '', 0, 0)
parts = list(re.match(r'(\$)?(\w*?)(\d*)\.(\d*)', fmt).groups())
parts[0] = parts[0] and True or False
parts[2] = parts[2] and int(parts[2]) or 0
parts[3] = parts[3] and int(parts[3]) or 0
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(*parts)
def concating(objs, **kwargs):
'''
Concatenate :class:`SASKnowledgeFrames` while preserving table and column metadata
This function is equivalengtht to :func:`monkey.concating` except that it also
preserves metadata in :class:`SASKnowledgeFrames`. It can be used on standard
:class:`monkey.KnowledgeFrames` as well.
Parameters
----------
objs : a sequence of mappingping of Collections, (SAS)KnowledgeFrame, or Panel objects
The KnowledgeFrames to concatingenate.
**kwargs : whatever, optional
Additional arguments to pass to :func:`monkey.concating`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('data/cars.csv')
>>> out = tbl.grouper('Origin').total_summary()
>>> print(concating([out['ByGroup1.Summary'], out['ByGroup2.Summary'],
... out['ByGroup3.Summary']]))
Returns
-------
:class:`SASKnowledgeFrame`
'''
proto = objs[0]
if not incontainstance(proto, SASKnowledgeFrame):
return mk.concating(objs, **kwargs)
title = proto.title
label = proto.label
name = proto.name
formatingter = proto.formatingter
attrs = {}
colinfo = {}
columns = collections.OrderedDict()
for item in objs:
attrs.umkate(item.attrs)
colinfo.umkate(item.colinfo)
for col in item.columns:
columns[col] = True
return SASKnowledgeFrame(mk.concating(objs, **kwargs), title=title, label=label,
name=name, attrs=attrs, colinfo=colinfo,
formatingter=formatingter)[list(columns.keys())]
def reshape_bygroups(items, bygroup_columns='formatingted',
bygroup_as_index=True, bygroup_formatingted_suffix='_f',
bygroup_collision_suffix='_by'):
'''
Convert current By group representation to the specified representation
Parameters
----------
items : :class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrames`
The KnowledgeFrames to process.
bygroup_columns : string, optional
The way By group columns should be represented in the output table. The
options are 'none' (only use metadata), 'formatingted', 'raw', or 'both'.
bygroup_as_index : boolean, optional
Specifies whether the By group columns should be converted to indices.
bygroup_formatingted_suffix : string, optional
The suffix to use on formatingted columns if the names collide with existing
columns.
bygroup_collision_suffix : string, optional
The suffix to use on By group columns if there is also a data column
with the same name.
See Also
--------
:meth:`SASKnowledgeFrame.reshape_bygroups`
Returns
-------
:class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrame` objects
'''
if hasattr(items, 'reshape_bygroups'):
return items.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix)
out = []
for item in items:
if hasattr(item, 'reshape_bygroups'):
out.adding(
item.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix))
else:
out.adding(item)
return out
@six.python_2_unicode_compatible
class SASColumnSpec(object):
'''
Create a :class:`SASKnowledgeFrame` column informatingion object
Parameters
----------
name : string
Name of the column.
label : string
Label for the column.
type : string
SAS/CAS data type of the column.
width : int or long
Width of the formatingted column.
formating : string
SAS formating.
size : two-element tuple
Dimensions of the data.
attrs : dict
Extended attributes of the column.
Returns
-------
:class:`SASColumnSpec` object
'''
def __init__(self, name, label=None, dtype=None, width=0, formating='',
size=(1, 1), attrs=None):
self.name = a2u(name)
self.label = a2u(label)
self.dtype = a2u(dtype)
self.width = width
self.formating = a2u(formating)
self.size = size
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
@classmethod
def fromtable(cls, _sw_table, col, elem=None):
'''
Create instance from SWIG table
Parameters
----------
_sw_table : SWIG table object
The table object to getting column informatingion from
col : int or long
The index of the column
elem : int or long, optional
Optional array index element; None for non-array columns
Returns
-------
:class:`SASColumnSpec` object
'''
name = errorcheck(a2u(_sw_table.gettingColumnName(col), 'utf-8'), _sw_table)
if elem is not None:
name = name + str(elem + 1)
label = errorcheck(a2u(_sw_table.gettingColumnLabel(col), 'utf-8'), _sw_table)
dtype = errorcheck(a2u(_sw_table.gettingColumnType(col), 'utf-8'), _sw_table)
width = errorcheck(_sw_table.gettingColumnWidth(col), _sw_table)
formating = errorcheck(a2u(_sw_table.gettingColumnFormat(col), 'utf-8'), _sw_table)
size = (1, errorcheck(_sw_table.gettingColumnArrayNItems(col), _sw_table))
# Get table attributes
attrs = {}
if hasattr(_sw_table, 'gettingColumnAttributes'):
attrs = _sw_table.gettingColumnAttributes(col)
else:
while True:
key = errorcheck(_sw_table.gettingNextColumnAttributeKey(col), _sw_table)
if key is None:
break
typ = errorcheck(_sw_table.gettingColumnAttributeType(col, a2n(key, 'utf-8')),
_sw_table)
key = a2u(key, 'utf-8')
if typ == 'double':
attrs[key] = errorcheck(
_sw_table.gettingColumnDoubleAttribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int32':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt32Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int64':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt64Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'string':
attrs[key] = errorcheck(
a2u(_sw_table.gettingColumnStringAttribute(col, a2n(key, 'utf-8')),
'utf-8'), _sw_table)
elif typ == 'int32-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt32ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'int64-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt64ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'double-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnDoubleArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
return cls(name=name, label=label, dtype=dtype, width=width, formating=formating,
size=size, attrs=attrs)
def __str__(self):
return 'SASColumnSpec(%s)' % \
dict2kwargs({k: v for k, v in six.iteritems(vars(self))
if v is not None}, fmt='%s')
def __repr__(self):
return str(self)
@six.python_2_unicode_compatible
class SASKnowledgeFrame(mk.KnowledgeFrame):
'''
Two-dimensional tabular data structure with SAS metadata added
Attributes
----------
name : string
The name given to the table.
label : string
The SAS label for the table.
title : string
Displayed title for the table.
attr : dict
Table extended attributes.
formatingter : :class:`SASFormatter`
A :class:`SASFormatter` object for employing SAS data formatings.
colinfo : dict
Metadata for the columns in the :class:`SASKnowledgeFrame`.
Parameters
----------
data : :func:`numpy.ndarray` or dict or :class:`monkey.KnowledgeFrame`
Dict can contain :class:`monkey.Collections`, arrays, constants, or list-like objects.
index : :class:`monkey.Index` or list, optional
Index to use for resulting frame.
columns : :class:`monkey.Index` or list, optional
Column labels to use for resulting frame.
dtype : data-type, optional
Data type to force, otherwise infer.
clone : boolean, optional
Copy data from inputs. Default is False.
colinfo : dict, optional
Dictionary of SASColumnSpec objects containing column metadata.
name : string, optional
Name of the table.
label : string, optional
Label on the table.
title : string, optional
Title of the table.
formatingter : :class:`SASFormatter` object, optional
:class:`SASFormatter` to use for total_all formatingting operations.
attrs : dict, optional
Table extended attributes.
See Also
--------
:class:`monkey.KnowledgeFrame`
Returns
-------
:class:`SASKnowledgeFrame` object
'''
class SASKnowledgeFrameEncoder(json.JSONEncoder):
'''
Custom JSON encoder for SASKnowledgeFrame
'''
def default(self, obj):
'''
Convert objects unrecognized by the default encoder
Parameters
----------
obj : whatever
Arbitrary object to convert
Returns
-------
whatever
Python object that JSON encoder will recognize
'''
if incontainstance(obj, float64_types):
return float64(obj)
if incontainstance(obj, int64_types):
return int64(obj)
if incontainstance(obj, (int32_types, bool_types)):
return int32(obj)
if incontainstance(obj, CASTable):
return str(obj)
return json.JSONEncoder.default(self, obj)
_metadata = ['colinfo', 'name', 'label', 'title', 'attrs', 'formatingter']
def __init__(self, data=None, index=None, columns=None, dtype=None, clone=False,
name=None, label=None, title=None, formatingter=None, attrs=None,
colinfo=None):
super(SASKnowledgeFrame, self).__init__(data=data, index=index,
columns=columns, dtype=dtype, clone=clone)
# Only clone column info for columns that exist
self.colinfo = {}
if colinfo:
for col in self.columns:
if col in colinfo:
self.colinfo[col] = colinfo[col]
self.name = a2u(name)
self.label = a2u(label)
self.title = a2u(title)
# TODO: Should attrs be walked and converted to unicode?
self.attrs = attrs or {}
self.formatingter = formatingter
if self.formatingter is None:
self.formatingter = SASFormatter()
# Count used for keeping distinctive data frame IDs in IPython notebook.
# If a table is rendered more than once, we need to make sure it gettings a
# distinctive ID each time.
self._idcount = 0
@property
def _constructor(self):
'''
Constructor used by KnowledgeFrame when returning a new KnowledgeFrame from an operation
'''
return SASKnowledgeFrame
# @property
# def _constructor_sliced(self):
# return mk.Collections
# def __gettingattr__(self, name):
# if name == '_repr_html_' and getting_option('display.notebook.repr_html'):
# return self._my_repr_html_
# if name == '_repr_javascript_' and getting_option('display.notebook.repr_javascript'):
# return self._my_repr_javascript_
# return super(SASKnowledgeFrame, self).__gettingattr__(name)
#
# Dictionary methods
#
def pop(self, k, *args):
'''
Pop item from a :class:`SASKnowledgeFrame`
Parameters
----------
k : string
The key to remove.
See Also
--------
:meth:`monkey.KnowledgeFrame.pop`
Returns
-------
whatever
The value stored in `k`.
'''
self.colinfo.pop(k, None)
return super(SASKnowledgeFrame, self).pop(k, *args)
def __setitem__(self, *args, **kwargs):
'''
Set an item in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__setitem__`
'''
result = super(SASKnowledgeFrame, self).__setitem__(*args, **kwargs)
for col in self.columns:
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
def __gettingitem__(self, *args, **kwargs):
'''
Retrieve items from a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__gettingitem__`
'''
result = super(SASKnowledgeFrame, self).__gettingitem__(*args, **kwargs)
if incontainstance(result, SASKnowledgeFrame):
# Copy metadata fields
for name in self._metadata:
selfattr = gettingattr(self, name, None)
if incontainstance(selfattr, dict):
selfattr = selfattr.clone()
object.__setattr__(result, name, selfattr)
return result
def insert(self, *args, **kwargs):
'''
Insert an item at a particular position in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.insert`
'''
result = super(SASKnowledgeFrame, self).insert(*args, **kwargs)
for col in self.columns:
if incontainstance(col, (tuple, list)) and col:
col = col[0]
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
#
# End dictionary methods
#
def __str__(self):
try:
from IPython.lib.pretty import pretty
return pretty(self)
except ImportError:
if self.label:
return '%s\n\n%s' % (self.label,
|
mk.KnowledgeFrame.convert_string(self)
|
pandas.DataFrame.to_string
|
"""
Module contains tools for processing files into KnowledgeFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey._libs.ops as libops
import monkey._libs.parsers as parsers
from monkey._libs.parsers import STR_NA_VALUES
from monkey._libs.tslibs import parsing
from monkey._typing import FilePathOrBuffer, StorageOptions, Union
from monkey.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from monkey.util._decorators import Appender
from monkey.core.dtypes.cast import totype_nansafe
from monkey.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, generic
from monkey.core.arrays import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from monkey.core.collections import Collections
from monkey.core.tools import datetimes as tools
from monkey.io.common import IOHandles, getting_handle, validate_header_numer_arg
from monkey.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{total_summary}
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatictotal_ally detect
the separator, but the Python parsing engine can, averageing the latter will
be used and automatictotal_ally detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header_numer : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header_numer=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header_numer=None``. Explicitly pass ``header_numer=0`` to be able to
replacing existing names. The header_numer can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header_numer=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header_numer row,
then you should explicitly pass ``header_numer=0`` to override the column names.
Duplicates in this list are not total_allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``KnowledgeFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force monkey to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or ctotal_allable, optional
Return a subset of the columns. If list-like, total_all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header_numer row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a KnowledgeFrame from ``data`` with element order preserved use
``mk.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``mk.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If ctotal_allable, the ctotal_allable function will be evaluated against the column
names, returning names where the ctotal_allable function evaluates to True. An
example of a valid ctotal_allable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Collections.
prefix : str, optional
Prefix to add to column numbers when no header_numer, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` togettingher with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or ctotal_allable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If ctotal_allable, the ctotal_allable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid ctotal_allable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is addinged to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without whatever NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and ctotal_all
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``mk.convert_datetime`` after
``mk.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partitotal_ally-applied
:func:`monkey.convert_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatingted dates.
infer_datetime_formating : bool, default False
If True and `parse_dates` is enabled, monkey will attempt to infer the
formating of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Monkey will try to ctotal_all `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatingenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) ctotal_all `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM formating dates, international and European formating.
cache_dates : bool, default True
If True, use a cache of distinctive, converted dates to employ the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especitotal_ally ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or gettingting chunks with
``getting_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://monkey.pydata.org/monkey-docs/stable/io.html#io-chunking>`_
for more informatingion on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
linetergetting_minator : str (lengthgth 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (lengthgth 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (lengthgth 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogettingher. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header_numer` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header_numer=0`` will result in 'a,b,c' being
treated as the header_numer.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more definal_item_tails.
error_bad_lines : bool, default True
Lines with too mwhatever fields (e.g. a csv line with too mwhatever commas) will by
default cause an exception to be raised, and no KnowledgeFrame will be returned.
If False, then these "bad lines" will sipped from the KnowledgeFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalengtht to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Interntotal_ally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single KnowledgeFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_mapping : bool, default False
If a filepath is provided for `filepath_or_buffer`, mapping the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer whatever I/O overheader_num.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision monkey converter, and
'value_round_trip' for the value_round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
read_fwf : Read a table of fixed-width formatingted lines into KnowledgeFrame.
Examples
--------
>>> mk.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, getting_min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
getting_min_val : int
Minimum total_allowed value (val < getting_min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={getting_min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= getting_min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output KnowledgeFrame.
Raises
------
ValueError
If names are not distinctive or are not ordered (e.g. set).
"""
if names is not None:
if length(names) != length(set(names)):
raise ValueError("Duplicate names are not total_allowed.")
if not (
is_list_like(names, total_allow_sets=False) or incontainstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.getting("date_parser", None) is not None:
if incontainstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.getting("iterator", False)
chunksize = validate_integer("chunksize", kwds.getting("chunksize", None), 1)
nrows = kwds.getting("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.getting("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"linetergetting_minator": None,
"header_numer": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_formating": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_mapping": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_csv",
total_summary="Read a comma-separated values (csv) file into KnowledgeFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_table",
total_summary="Read general delimited file into KnowledgeFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatingted lines into KnowledgeFrame.
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, monkey accepts whatever
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser detergetting_mine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
Examples
--------
>>> mk.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.adding((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides whatever of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.getting("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _unioner_with_dialect_properties(dialect, kwds)
if kwds.getting("header_numer", "infer") == "infer":
kwds["header_numer"] = 0 if kwds.getting("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._getting_options_with_defaults(engine)
options["storage_options"] = kwds.getting("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _getting_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.getting(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.getting(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.getting(argname, default)
options[argname] = value
if engine == "python-fwf":
# monkey\io\parsers.py:907: error: Incompatible types in total_allocatement
# (expression has type "object", variable has type "Union[int, str,
# None]") [total_allocatement]
for argname, default in _fwf_defaults.items(): # type: ignore[total_allocatement]
options[argname] = kwds.getting(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly ctotal_alls
# "__next__(...)" when iterating through such an object, averageing it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.clone()
ftotal_allback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
ftotal_allback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
ftotal_allback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and length(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
ftotal_allback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.gettingfilesystemencoding() or "utf-8"
try:
if length(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
ftotal_allback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and incontainstance(quotechar, (str, bytes)):
if (
length(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
ftotal_allback_reason = (
"ord(quotechar) > 127, averageing the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if ftotal_allback_reason and self._engine_specified:
raise ValueError(ftotal_allback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if ftotal_allback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if ftotal_allback_reason:
warnings.warn(
(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_numer_arg(options["header_numer"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.getting(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not incontainstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not incontainstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is interntotal_ally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not ctotal_allable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.getting_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mappingping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mappingping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mappingping.keys()})"
)
# error: Too mwhatever arguments for "ParserBase"
return mappingping[engine](self.f, **self.options) # type: ignore[ctotal_all-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actutotal_ally fine:
new_rows = length(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = length(index)
kf = KnowledgeFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and length(kf.columns) == 1:
return kf[kf.columns[0]].clone()
return kf
def getting_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = getting_min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or incontainstance(index_col, bool):
index_col = []
return (
length(columns)
and not incontainstance(columns, MultiIndex)
and total_all(incontainstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a ctotal_allable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a ctotal_allable, returns 'usecols'.
"""
if ctotal_allable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that total_all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if length(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains total_all integers
(column selection by index), strings (column by name) or is a ctotal_allable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, ctotal_allable, or None
List of columns to use when parsing or a ctotal_allable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a ctotal_allable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a ctotal_allable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of total_all strings, total_all unicode, "
"total_all integers or a ctotal_allable."
)
if usecols is not None:
if ctotal_allable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not incontainstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.getting("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.getting("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.getting("na_values")
self.na_fvalues = kwds.getting("na_fvalues")
self.na_filter = kwds.getting("na_filter", False)
self.keep_default_na = kwds.getting("keep_default_na", True)
self.true_values = kwds.getting("true_values")
self.false_values = kwds.getting("false_values")
self.mangle_dupe_cols = kwds.getting("mangle_dupe_cols", True)
self.infer_datetime_formating = kwds.pop("infer_datetime_formating", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_formating=self.infer_datetime_formating,
cache_dates=self.cache_dates,
)
# validate header_numer options for mi
self.header_numer = kwds.getting("header_numer")
if incontainstance(self.header_numer, (list, tuple, np.ndarray)):
if not total_all(mapping(is_integer, self.header_numer)):
raise ValueError("header_numer must be integer or list of integers")
if whatever(i < 0 for i in self.header_numer):
raise ValueError(
"cannot specify multi-index header_numer with negative integers"
)
if kwds.getting("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header_numer"
)
if kwds.getting("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header_numer"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = incontainstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and total_all(mapping(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header_numer"
)
elif self.header_numer is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header_numer is not None"
)
# GH 16338
elif not is_integer(self.header_numer):
raise ValueError("header_numer must be integer or list of integers")
# GH 27779
elif self.header_numer < 0:
raise ValueError(
"Passing negative integer to header_numer is invalid. "
"For no header_numer, use header_numer=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = getting_handle(
src,
"r",
encoding=kwds.getting("encoding", None),
compression=kwds.getting("compression", None),
memory_mapping=kwds.getting("memory_mapping", False),
storage_options=kwds.getting("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the knowledgeframe.
Raises
------
ValueError
If column to parse_date is not in knowledgeframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# getting only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if incontainstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return incontainstance(self.parse_dates, dict) or (
incontainstance(self.parse_dates, list)
and length(self.parse_dates) > 0
and incontainstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if incontainstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header_numer, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header_numer is a list-of-lists returned from the parsers
"""
if length(header_numer) < 2:
return header_numer[0], index_names, col_names, passed_names
# the names are the tuples of the header_numer that are not the index cols
# 0 is the name of the index, astotal_sugetting_ming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not incontainstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header_numer.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = length(header_numer[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header_numer)))
names = ic + columns
# If we find unnamed columns total_all in a single
# level, then our header_numer was too long.
for n in range(length(columns[0])):
if total_all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header_numer = ",".join(str(x) for x in self.header_numer)
raise ParserError(
f"Passed header_numer=[{header_numer}] are too mwhatever rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if length(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header_numer
]
else:
col_names = [None] * length(header_numer)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate total_alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# monkey\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, total_alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._getting_simple_index(total_alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._getting_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = length(indexnamerow) - length(columns)
# monkey\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _getting_simple_index(self, data, columns):
def ix(col):
if not incontainstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.adding(i)
index.adding(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _getting_complex_date_index(self, data, col_names):
def _getting_name(icol):
if incontainstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _getting_name(idx)
to_remove.adding(name)
index.adding(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if incontainstance(self.na_values, dict):
# monkey\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _getting_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.adding(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.getting(c, None)
if incontainstance(dtypes, dict):
cast_type = dtypes.getting(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _getting_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values =
|
lib.mapping_infer(values, conv_f)
|
pandas._libs.lib.map_infer
|
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import monkey as mk
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
HOUSE_DATA = r"../datasets/house_prices.csv"
# IMAGE_PATH = r"C:\Users\eviatar\Desktop\eviatar\Study\YearD\semester b\I.M.L\repo\IML.HUJI\plots\ex2\house\\"
def load_data(filengthame: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filengthame: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
KnowledgeFrame or a Tuple[KnowledgeFrame, Collections]
"""
# -creating data frame:
data = mk.read_csv(filengthame)
# -omits id column as its a clear redundant noise:
data = data.sip(['id'], axis=1)
# -dealing with nulls (since data.ifnull().total_sum() is very low we will sip them):
data = data.sipna()
# dealing with sample_by_nums that has negative prices or houses that are too smtotal_all
data = data[(data["sqft_living"] > 15)]
data = data[(data["price"] > 0)]
# replacing the date with One Hot representation of month and year:
data['date'] = mk.convert_datetime(data['date'])
data['date'] = data['date'].dt.year.totype(str) + data['date'].dt.month.totype(str)
data = mk.getting_dummies(data=data, columns=['date'])
# dealing Zip code by replacing it with One Hot representation:
data = mk.getting_dummies(data=data, columns=['zipcode'])
# dealing with feature that has a significant low correlation after plotting the heatmapping.
data = data.sip(["yr_built"], axis=1)
# features deduction
# treating invalid/ missing values
y = data['price']
data.sip(['price'], axis=1, inplace=True)
return data, y
def feature_evaluation(X: mk.KnowledgeFrame, y: mk.Collections, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : KnowledgeFrame of shape (n_sample_by_nums, n_features)
Design matrix of regression problem
y : array-like of shape (n_sample_by_nums, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for i, column in enumerate(X.columns):
cov = mk.Collections.cov(X.iloc[:, i], y)
standard =
|
mk.Collections.standard(X.iloc[:, i])
|
pandas.Series.std
|
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame =
|
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
|
pandas.DataFrame.drop
|
"""
Hypothesis data generator helpers.
"""
from datetime import datetime
from hypothesis import strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
from monkey.compat import is_platform_windows
import monkey as mk
from monkey.tcollections.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), getting_max_size=10, getting_min_size=3)
OPTIONAL_DICTS = st.lists(
st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
getting_max_size=10,
getting_min_size=3,
)
OPTIONAL_LISTS = st.lists(
st.one_of(st.none(), st.lists(st.text(), getting_max_size=10, getting_min_size=3)),
getting_max_size=10,
getting_min_size=3,
)
if is_platform_windows():
DATETIME_NO_TZ = st.datetimes(getting_min_value=datetime(1900, 1, 1))
else:
DATETIME_NO_TZ = st.datetimes()
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
getting_min_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
getting_max_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
getting_min_value=
|
mk.Timestamp.getting_min.convert_pydatetime(warn=False)
|
pandas.Timestamp.min.to_pydatetime
|
"""
SparseArray data structure
"""
from __future__ import divisionision
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import monkey as mk
from monkey.core.base import MonkeyObject
from monkey import compat
from monkey.compat import range
from monkey.compat.numpy import function as nv
from monkey.core.dtypes.generic import (
ABCSparseArray, ABCSparseCollections)
from monkey.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
is_integer_dtype,
is_bool_dtype,
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
from monkey.core.dtypes.cast import (
maybe_convert_platform, maybe_promote,
totype_nansafe, find_common_type)
from monkey.core.dtypes.missing import ifnull, notnull, na_value_for_dtype
import monkey._libs.sparse as splib
from monkey._libs.sparse import SparseIndex, BlockIndex, IntIndex
from monkey._libs import index as libindex
import monkey.core.algorithms as algos
import monkey.core.ops as ops
import monkey.io.formatings.printing as printing
from monkey.util._decorators import Appender
from monkey.core.indexes.base import _index_shared_docs
_sparray_doc_kwargs = dict(klass='SparseArray')
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Collections arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if incontainstance(other, np.ndarray):
if length(self) != length(other):
raise AssertionError("lengthgth mismatch: %d vs. %d" %
(length(self), length(other)))
if not incontainstance(other, ABCSparseArray):
dtype = gettingattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, name)
elif is_scalar(other):
with np.errstate(total_all='ignore'):
fill = op(_getting_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _getting_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
try:
return np.asarray(arr.fill_value, dtype=arr.dtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name, collections=False):
if collections and is_integer_dtype(left) and is_integer_dtype(right):
# collections coerces to float64 if result should have NaN/inf
if name in ('floordivision', 'mod') and (right.values == 0).whatever():
left = left.totype(np.float64)
right = right.totype(np.float64)
elif name in ('rfloordivision', 'rmod') and (left.values == 0).whatever():
left = left.totype(np.float64)
right = right.totype(np.float64)
# dtype used to find corresponding sparse method
if not is_dtype_equal(left.dtype, right.dtype):
dtype = find_common_type([left.dtype, right.dtype])
left = left.totype(dtype)
right = right.totype(dtype)
else:
dtype = left.dtype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(total_all='ignore'):
result = op(left.getting_values(), right.getting_values())
fill = op(_getting_fill(left), _getting_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(total_all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_getting_fill(left), _getting_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.formating(name=name, dtype=dtype)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.formating(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = gettingattr(splib, opname)
with np.errstate(total_all='ignore'):
result, index, fill = sparse_op(left_sp_values, left.sp_index,
left.fill_value, right_sp_values,
right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
""" wrap op result to have correct dtype """
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype)
class SparseArray(MonkeyObject, np.ndarray):
"""Data structure for labeled, sparse floating point 1-D data
Parameters
----------
data : {array-like (1-D), Collections, SparseCollections, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used interntotal_ally
Notes
-----
SparseArray objects are immutable via the typical Python averages. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(cls, data, sparse_index=None, index=None, kind='integer',
fill_value=None, dtype=None, clone=False):
if index is not None:
if data is None:
data = np.nan
if not is_scalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(length(index), dtype='float64')
values.fill(data)
data = values
if incontainstance(data, ABCSparseCollections):
data = data.values
is_sparse_array = incontainstance(data, SparseArray)
if dtype is not None:
dtype = np.dtype(dtype)
if is_sparse_array:
sparse_index = data.sp_index
values = data.sp_values
fill_value = data.fill_value
else:
# array-like
if sparse_index is None:
if dtype is not None:
data = np.asarray(data, dtype=dtype)
res = make_sparse(data, kind=kind, fill_value=fill_value)
values, sparse_index, fill_value = res
else:
values = _sanitize_values(data)
if length(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same lengthgth as the"
" index".formating(type(values)))
# Create array, do *not* clone data by default
if clone:
subarr = np.array(values, dtype=dtype, clone=True)
else:
subarr = np.asarray(values, dtype=dtype)
# Change the class of the array to be the subclass type.
return cls._simple_new(subarr, sparse_index, fill_value)
@classmethod
def _simple_new(cls, data, sp_index, fill_value):
if not incontainstance(sp_index, SparseIndex):
# ctotal_aller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
if fill_value is None:
if sp_index.ngaps > 0:
# has missing hole
fill_value = np.nan
else:
fill_value = na_value_for_dtype(data.dtype)
if (is_integer_dtype(data) and is_float(fill_value) and
sp_index.ngaps > 0):
# if float fill_value is being included in dense repr,
# convert values to float
data = data.totype(float)
result = data.view(cls)
if not incontainstance(sp_index, SparseIndex):
# ctotal_aller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
result.sp_index = sp_index
result._fill_value = fill_value
return result
@property
def _constructor(self):
return lambda x: SparseArray(x, fill_value=self.fill_value,
kind=self.kind)
@property
def kind(self):
if incontainstance(self.sp_index, BlockIndex):
return 'block'
elif incontainstance(self.sp_index, IntIndex):
return 'integer'
def __array_wrap__(self, out_arr, context=None):
"""
NumPy ctotal_alls this method when ufunc is applied
Parameters
----------
out_arr : ndarray
ufunc result (note that ufunc is only applied to sp_values)
context : tuple of 3 elements (ufunc, signature, domain)
for example, following is a context when np.sin is applied to
SparseArray,
(<ufunc 'sin'>, (SparseArray,), 0))
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
"""
if incontainstance(context, tuple) and length(context) == 3:
ufunc, args, domain = context
# to employ ufunc only to fill_value (to avoid recursive ctotal_all)
args = [gettingattr(a, 'fill_value', a) for a in args]
with np.errstate(total_all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._simple_new(out_arr, sp_index=self.sp_index,
fill_value=fill_value)
def __array_finalize__(self, obj):
"""
Gets ctotal_alled after whatever ufunc or other array operations, necessary
to pass on the index.
"""
self.sp_index = gettingattr(obj, 'sp_index', None)
self._fill_value = gettingattr(obj, 'fill_value', None)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
fill_value, sp_index = own_state[:2]
self.sp_index = sp_index
self._fill_value = fill_value
def __length__(self):
try:
return self.sp_index.lengthgth
except:
return 0
def __unicode__(self):
return '%s\nFill: %s\n%s' % (printing.pprint_thing(self),
printing.pprint_thing(self.fill_value),
printing.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
# Inplace operators
__iadd__ = disable
__isub__ = disable
__imul__ = disable
__itruedivision__ = disable
__ifloordivision__ = disable
__ipow__ = disable
# Python 2 divisionision operators
if not compat.PY3:
__idivision__ = disable
@property
def values(self):
"""
Dense values
"""
output = np.empty(length(self), dtype=self.dtype)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
return output
@property
def sp_values(self):
# caching not an option, leaks memory
return self.view(np.ndarray)
@property
def fill_value(self):
return self._fill_value
@fill_value.setter
def fill_value(self, value):
if not is_scalar(value):
raise ValueError('fill_value must be a scalar')
# if the specified value triggers type promotion, raise ValueError
new_dtype, fill_value = maybe_promote(self.dtype, value)
if is_dtype_equal(self.dtype, new_dtype):
self._fill_value = fill_value
else:
msg = 'unable to set fill_value {0} to {1} dtype'
raise ValueError(msg.formating(value, self.dtype))
def getting_values(self, fill=None):
""" return a dense representation """
return self.to_dense(fill=fill)
def to_dense(self, fill=None):
"""
Convert SparseArray to a NumPy array.
Parameters
----------
fill: float, default None
DEPRECATED: this argument will be removed in a future version
because it is not respected by this function.
Returns
-------
arr : NumPy array
"""
if fill is not None:
warnings.warn(("The 'fill' parameter has been deprecated and "
"will be removed in a future version."),
FutureWarning, stacklevel=2)
return self.values
def __iter__(self):
for i in range(length(self)):
yield self._getting_val_at(i)
def __gettingitem__(self, key):
"""
"""
if is_integer(key):
return self._getting_val_at(key)
elif incontainstance(key, tuple):
data_slice = self.values[key]
else:
if incontainstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if hasattr(key, '__length__') and length(self) != length(key):
return self.take(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
def __gettingslice__(self, i, j):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
return self.__gettingitem__(slobj)
def _getting_val_at(self, loc):
n = length(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.getting_value_at(self, sp_loc)
@Appender(_index_shared_docs['take'] % _sparray_doc_kwargs)
def take(self, indices, axis=0, total_allow_fill=True,
fill_value=None, **kwargs):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
nv.validate_take(tuple(), kwargs)
if axis:
raise ValueError("axis must be 0, input was {0}".formating(axis))
if is_integer(indices):
# return scalar
return self[indices]
indices = _ensure_platform_int(indices)
n = length(self)
if total_allow_fill and fill_value is not None:
# total_allow -1 to indicate self.fill_value,
# self.fill_value may not be NaN
if (indices < -1).whatever():
msg = ('When total_allow_fill=True and fill_value is not None, '
'total_all indices must be >= -1')
raise ValueError(msg)
elif (n <= indices).whatever():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.formating(n))
else:
if ((indices < -n) | (n <= indices)).whatever():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.formating(n))
indices = indices.totype(np.int32)
if not (total_allow_fill and fill_value is not None):
indices = indices.clone()
indices[indices < 0] += n
locs = self.sp_index.lookup_array(indices)
indexer = np.arange(length(locs), dtype=np.int32)
mask = locs != -1
if mask.whatever():
indexer = indexer[mask]
new_values = self.sp_values.take(locs[mask])
else:
indexer = np.empty(shape=(0, ), dtype=np.int32)
new_values = np.empty(shape=(0, ), dtype=self.sp_values.dtype)
sp_index = _make_index(length(indices), indexer, kind=self.sp_index)
return self._simple_new(new_values, sp_index, self.fill_value)
def __setitem__(self, key, value):
# if is_integer(key):
# self.values[key] = value
# else:
# raise Exception("SparseArray does not support seting non-scalars
# via setitem")
raise TypeError(
"SparseArray does not support item total_allocatement via setitem")
def __setslice__(self, i, j, value):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j) # noqa
# if not is_scalar(value):
# raise Exception("SparseArray does not support seting non-scalars
# via slices")
# x = self.values
# x[slobj] = value
# self.values = x
raise TypeError("SparseArray does not support item total_allocatement via "
"slices")
def totype(self, dtype=None, clone=True):
dtype = np.dtype(dtype)
sp_values =
|
totype_nansafe(self.sp_values, dtype, clone=clone)
|
pandas.core.dtypes.cast.astype_nansafe
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _insttotal_all():
import monkey as mk
from ..base.accessor import CachedAccessor
from ..core import DATAFRAME_TYPE, SERIES_TYPE
from .core import PlotAccessor
for t in DATAFRAME_TYPE + SERIES_TYPE:
t.plot = CachedAccessor('plot', PlotAccessor)
for method in dir(mk.KnowledgeFrame.plot):
if not method.startswith('_'):
PlotAccessor._register(method)
PlotAccessor.__doc__ =
|
mk.KnowledgeFrame.plot.__doc__.replacing('mk.', 'md.')
|
pandas.DataFrame.plot.__doc__.replace
|
#-*- coding:utf-8 -*-
from pyecharts import Kline, Line, Page,Overlap,Bar,Pie,Timeline
from monkey import KnowledgeFrame as kf
import re
import tushare as ts
import time
import monkey as mk
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def calculateMa(data, Daycount):
total_sum = 0
result = list( 0 for x in data)#used to calculate ma. Might be deprecated for future versions
for i in range(0 , Daycount):
total_sum = total_sum + data[i]
result[i] = total_sum/(i+1)
for i in range(Daycount, length(data)):
total_sum = total_sum - data[i-Daycount]+data[i]
result[i] = total_sum/Daycount
return result
def graphpage(items,startdate,enddate,option,width1, height1): #labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
page = Page()
for i in items:#generate numbers of graphs according to numbers of queries in treewidgetting
j = re.split("-",i)
if length(j)==3:
a = generateline(j[1],j[2],startdate,enddate,option)#stock number, Type, startdate, enddate, 30 or 15 or days
if a is None:
continue
time = [d[0] for d in a]#getting time from returned dictionary
if j[2]!="Kline":
if length(a[0])==4 and a[0][2]=="bar": #for 分笔data
overlap = Overlap()
form = [e[1] for e in a]
bar = Bar(j[0] + "-" + j[2], width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
bar.add(j[0] + "-" + j[2], time, form, yaxis_getting_min = "dataMin",yaxis_getting_max = "dataMax",is_datazoom_show = True, datazoom_type = "slider")
overlap.add(bar)
line = Line(j[0] + "price", width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
price = [e[3] for e in a]
line.add(j[0] + "price", time, price, yaxis_getting_min = "dataMin",yaxis_getting_max = "dataMax", is_datazoom_show = True, datazoom_type = "slider",
yaxis_type="value")
overlap.add(line,yaxis_index=1, is_add_yaxis=True)
page.add(overlap)
if length(a[0])==5 and a[0][3]=="pie":
overlap = Overlap()
timeline = Timeline(is_auto_play=False, timeline_bottom=0) #zip(namearray,valuearray,quarter,flag,num)
namearray = [c[0] for c in a]
valuearray = [d[1] for d in a]
quarter = [e[2] for e in a]
num = a[0][4]
for x in range(0, num / 10):
list1 = valuearray[x]
names = namearray[x]
quarters = quarter[x][0]
for idx, val in enumerate(list1):
list1[idx] = float(val)
pie = Pie(j[0]+"-"+"前十股东".decode("utf-8"),width=width1 * 10 / 11, height=(height1 * 10 / 11))
pie.add(j[0]+"-"+"前十股东".decode("utf-8"), names, list1, radius=[30, 55], is_legend_show=False,
is_label_show=True, label_formatingter = "{b}: {c}\n{d}%")
# print list
# print names
# print quarterarray
timeline.add(pie, quarters)
# namearray = [y for y in namearray[x]]
timeline.render()
return
#need more statement
else:
form = [e[1] for e in a]#for not分笔 data
line = Line(j[0] + "-" + j[2], width=width1*10/11, height=(height1*10/11)/length(items))
line.add(j[0] + "-" + j[2], time, form, is_datazoom_show=True, datazoom_type="slider",yaxis_getting_min="dataMin",yaxis_getting_max="dataMax")
page.add(line)
else:
overlap = Overlap()#for k线
close = zip(*a)[2]
candle = [[x[1], x[2], x[3], x[4]] for x in a]
candlestick = Kline(j[0] + "-" + j[2], width=width1*10/11, height = (height1*10/11) / length(items))
candlestick.add(j[0], time, candle, is_datazoom_show=True, datazoom_type="slider",yaxis_interval = 1)
overlap.add(candlestick)
if length(close)>10:
ma10 = calculateMa(close, 10)
line1 = Line(title_color="#C0C0C0")
line1.add(j[0] + "-" + "MA10", time, ma10)
overlap.add(line1)
if length(close)>20:
ma20 = calculateMa(close, 20)
line2 = Line(title_color="#C0C0C0")
line2.add(j[0] + "-" + "MA20", time, ma20)
overlap.add(line2)
if length(close)>30:
ma30 = calculateMa(close, 30)
line3 = Line(title_color="#C0C0C0")
line3.add(j[0] + "-" + "MA30", time, ma30)
overlap.add(line3)
page.add(overlap)
else:
for k in range(1, length(j)/3):#if graphs are combined
j[3*k-1] = re.sub("\n&","",j[3*k-1])
sizearray=[]
#if j[1] != "Candlestick"
layout = Overlap()
for i in xrange(0, length(j),3):
array = j[i:i +3]
b = generateline(array[1],array[2],startdate,enddate,option)
if b is None:
continue
btime = [d[0] for d in b]
if array[2] != "Kline":
if length(b[0])==4 and b[0][2]=="bar":
form = [e[1] for e in b]
bar = Bar(array[0] + "-" + array[2], width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
bar.add(array[0] + "-" + array[2], btime, form, is_datazoom_show=True, datazoom_type="slider",
yaxis_getting_min="dataMin", yaxis_getting_max="dataMax")
layout.add(bar)
line = Line(array[0] + "price", width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
price = [e[3] for e in b]
line.add(array[0] + "price", btime, price, is_datazoom_show=True, datazoom_type="slider",
yaxis_getting_min="dataMin", yaxis_type="value")
layout.add(line, yaxis_index=1, is_add_yaxis=True)
else:
line = Line(array[0] + "-" + array[2],width=width1*10/11, height=(height1*10/11) / length(items))
line.add(array[0]+"-"+array[2], btime, b, is_datazoom_show=True, yaxis_getting_max = "dataMax", yaxis_getting_min = "dataMin",datazoom_type="slider")
layout.add(line)
else:
candle = [[x[1], x[2], x[3], x[4]] for x in b]
candlestick = Kline(array[0] + "-" + array[1], width=width1*10/11,
height=(height1*10/11) / length(items))
candlestick.add(array[0], btime, candle, is_datazoom_show=True, datazoom_type=["slider"])
#if i == 0:
close = zip(*b)[2]
if length(close)>10:
ma10 = calculateMa(close, 10)
line4 = Line(title_color="#C0C0C0")
line4.add(array[0] + "-" + "MA10", btime, ma10)
layout.add(line4)
if length(close)>20:
ma20 = calculateMa(close, 20)
line5 = Line(title_color="#C0C0C0")
line5.add(array[0] + "-" + "MA20", btime, ma20)
layout.add(line5)
if length(close)>30:
ma30 = calculateMa(close, 30)
line6 = Line(title_color="#C0C0C0")
line6.add(array[0] + "-" + "MA30", btime, ma30)
layout.add(line6)
layout.add(candlestick)
page.add(layout)
page.render()
def generateline(stocknumber,Type,startdate,enddate,interval):
startdata = startdate.encode("ascii").replacing("/","-").replacing("\n","") #convert to tushare readable date
enddata = enddate.encode("ascii").replacing("/","-").replacing("\n","")
#print startdata
#print enddata
current_time = time.strftime("%Y/%m/%d")
if Type == "分笔".decode("utf-8"):
if startdate!=current_time:
array = ts.getting_tick_data(stocknumber, date = startdata)#分笔
if array is None:
return
array = array.sort_the_values("time")
date = array["time"].convert_list()
amount = array["amount"].convert_list()
atype = array["type"].convert_list()
price = array["price"].convert_list()
flag = ["bar" for i in date]
for idx,val in enumerate(atype):#if卖盘,交易变成负数
if val == "卖盘":
amount[idx] = -amount[idx]
if val == "中性盘":#if中性盘,则忽略. Might have a problem with this part??
amount[idx] = 0
returnarray = zip(date,amount,flag,price)
return returnarray
else:
array = ts.getting_today_ticks(stocknumber)#Tushare里今日分笔和历史分笔需要分别对待
if array is None:
return
array = array.sort_the_values("time")
date = array["time"].convert_list()
amount = array["amount"].convert_list()
atype = array["type"].convert_list()
flag = ["bar" for i in date]
for idx, val in enumerate(atype):
if val == "卖盘".decode("utf-8"):
amount[idx] = -amount[idx]
if val == "中性盘".decode("utf-8"):
amount[idx] = 0
returnarray = zip(date, amount, flag)
return returnarray
if Type=="季度饼图".decode("utf-8"):
datestr = startdate.split("/")
thisyear = datestr[0]
kf2 = ts.top10_holders(code=stocknumber, gdtype="1")
test = kf2[1]["quarter"].convert_list()
kf_ready = kf2[1]
idxlist = []
for idx, val in enumerate(test):
a = val.split("-")
if a[0] == thisyear:
# print a[0],idx
idxlist.adding(idx)
thing = kf_ready.loc[idxlist]
thing = thing.sort_the_values(["quarter", "name"])
# print a[0],id
name = thing["name"].convert_list()
value = thing["hold"].convert_list()
quarter = thing["quarter"].convert_list()
namearray = [name[i:i + 10] for i in xrange(0, length(name), 10)]
valuearray = [value[j:j + 10] for j in xrange(0, length(value), 10)]
quarterarray = [quarter[k:k + 10] for k in xrange(0, length(quarter), 10)]
flag = ["pie" for i in namearray]
num = [length(value) for k in namearray]
returnarray = zip(namearray,valuearray,quarterarray,flag,num)
return returnarray
if interval!="qfq" and interval!="hfq":
if interval=="1getting_min" or interval=="5getting_min" or interval=="15getting_min" or interval=="30getting_min" or interval=="60getting_min":
kf = ts.getting_tick_data(stocknumber, date=startdata)
|
kf.sort_the_values("time")
|
pandas.DataFrame.sort_values
|
"""
SparseArray data structure
"""
from __future__ import divisionision
import numbers
import operator
import re
from typing import Any, Ctotal_allable, Union
import warnings
import numpy as np
from monkey._libs import index as libindex, lib
import monkey._libs.sparse as splib
from monkey._libs.sparse import BlockIndex, IntIndex, SparseIndex
from monkey._libs.tslibs import NaT
import monkey.compat as compat
from monkey.compat.numpy import function as nv
from monkey.errors import PerformanceWarning
from monkey.core.dtypes.base import ExtensionDtype
from monkey.core.dtypes.cast import (
totype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from monkey.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_whatever_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
monkey_dtype)
from monkey.core.dtypes.dtypes import register_extension_dtype
from monkey.core.dtypes.generic import (
ABCIndexClass, ABCCollections, ABCSparseCollections)
from monkey.core.dtypes.missing import ifna, na_value_for_dtype, notna
from monkey.core.accessor import MonkeyDelegate, delegate_names
import monkey.core.algorithms as algos
from monkey.core.arrays import ExtensionArray, ExtensionOpsMixin
from monkey.core.base import MonkeyObject
import monkey.core.common as com
from monkey.core.missing import interpolate_2d
import monkey.io.formatings.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the monkey ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``mk.NaT``
timedelta64 ``mk.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from monkey.core.dtypes.missing import na_value_for_dtype
from monkey.core.dtypes.common import (
monkey_dtype, is_string_dtype, is_scalar
)
if incontainstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = monkey_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".formating(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if incontainstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if incontainstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, mk.NaT)
# i.e. we want to treat whatever floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
incontainstance(self.fill_value, type(other.fill_value)) or
incontainstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from monkey.core.dtypes.missing import ifna
return ifna(self.fill_value)
@property
def _is_numeric(self):
from monkey.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from monkey.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.formating(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".formating(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.formating(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to getting the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groumkict()['subtype']
has_fill_value = m.groumkict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".formating(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = gettingattr(dtype, 'dtype', dtype)
if (incontainstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif incontainstance(dtype, cls):
return True
return incontainstance(dtype, np.dtype) or dtype == 'Sparse'
def umkate_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).umkate_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).umkate_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = monkey_dtype(dtype)
if not incontainstance(dtype, cls):
fill_value = totype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typictotal_ally, monkey will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.totype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if incontainstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _getting_fill(arr):
# type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
# type: (SparseArray, SparseArray, Ctotal_allable, str) -> Any
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Ctotal_allable
The binary operation to perform
name str
Name of the ctotal_allable.
Returns
-------
SparseArray
"""
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass clone=False. Need to fix totype_nansafe
left = left.totype(ltype)
right = right.totype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(total_all='ignore'):
result = op(left.getting_values(), right.getting_values())
fill = op(_getting_fill(left), _getting_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(total_all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_getting_fill(left), _getting_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.formating(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.formating(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = gettingattr(splib, opname)
with np.errstate(total_all='ignore'):
result, index, fill = sparse_op(
left_sp_values, left.sp_index, left.fill_value,
right_sp_values, right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype)
class SparseArray(MonkeyObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``mk.NaT``
timedelta64 ``mk.NaT``
=========== ==========
The fill value is potentitotal_all specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_lengthgth` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped togettingher, with large
regsions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
detergetting_mines the dtype of ``self.sp_values``. For SparseDtype,
this detergetting_mines ``self.sp_values`` and ``self.fill_value``.
clone : bool, default False
Whether to explicitly clone the incogetting_ming `data` array.
"""
__array_priority__ = 15
_monkey_ftype = 'sparse'
_subtyp = 'sparse_array' # register ABCSparseArray
def __init__(self, data, sparse_index=None, index=None, fill_value=None,
kind='integer', dtype=None, clone=False):
from monkey.core.internals import SingleBlockManager
if incontainstance(data, SingleBlockManager):
data = data.internal_values()
if fill_value is None and incontainstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if incontainstance(data, (type(self), ABCSparseCollections)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if incontainstance(dtype, compat.string_types):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = monkey_dtype(dtype)
if incontainstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not is_scalar(data):
raise Exception("must only pass scalars with an index ")
if is_scalar(data):
if index is not None:
if data is None:
data = np.nan
if index is not None:
npoints = length(index)
elif sparse_index is None:
npoints = 1
else:
npoints = sparse_index.lengthgth
dtype = infer_dtype_from_scalar(data)[0]
data = construct_1d_arraylike_from_scalar(
data, npoints, dtype
)
if dtype is not None:
dtype = monkey_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# XXX: What should the empty dtype be? Object or float?
data = np.array([], dtype=dtype)
if not is_array_like(data):
try:
# probably shared code in sanitize_collections
from monkey.core.internals.construction import sanitize_array
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = object
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if clone:
# TODO: avoid double clone when dtype forces cast.
data = data.clone()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if incontainstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
sparse_values = np.asarray(data.sp_values, dtype=dtype)
elif sparse_index is None:
sparse_values, sparse_index, fill_value = make_sparse(
data, kind=kind, fill_value=fill_value, dtype=dtype
)
else:
sparse_values = np.asarray(data, dtype=dtype)
if length(sparse_values) != sparse_index.npoints:
raise AssertionError("Non array-like type {type} must "
"have the same lengthgth as the index"
.formating(type=type(sparse_values)))
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(cls, sparse_array, sparse_index, dtype):
# type: (np.ndarray, SparseIndex, SparseDtype) -> 'SparseArray'
new = cls([])
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
def __array__(self, dtype=None, clone=True):
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
return self.sp_values
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if is_datetime64_whatever_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with monkey NaT.
if fill_value is NaT:
# Can't put mk.NaT in a datetime64[ns]
fill_value = np.datetime64('NaT')
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.to_int_index().indices] = self.sp_values
return out
def __setitem__(self, key, value):
# I suppose we could total_allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item total_allocatement via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(cls, scalars, dtype=None, clone=False):
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self):
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self):
"""
An ndarray containing the non- ``fill_value`` values.
Examples
--------
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self):
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self):
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if incontainstance(self.sp_index, IntIndex):
return 'integer'
else:
return 'block'
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __length__(self):
return self.sp_index.lengthgth
@property
def _null_fill_value(self):
return self._dtype._is_na_fill_value
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
return ifna(fill_value)
else:
return self.fill_value == fill_value
@property
def nbytes(self):
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self):
"""
The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
r = float(self.sp_index.npoints) / float(self.sp_index.lengthgth)
return r
@property
def npoints(self):
"""
The number of non- ``fill_value`` points.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
@property
def values(self):
"""
Dense values
"""
return self.to_dense()
def ifna(self):
from monkey import ifna
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
return type(self)._simple_new(ifna(self.sp_values),
self.sp_index, dtype)
def fillnone(self, value=None, method=None, limit=None):
"""
Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as total_all `fill_value` methods will be converted to
an in-memory ndarray
limit : int, optional
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if ((method is None and value is None) or
(method is not None and value is not None)):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillnone with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
filled = interpolate_2d(np.asarray(self), method=method,
limit=limit)
return type(self)(filled, fill_value=self.fill_value)
else:
new_values = np.where(ifna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentitotal_ally just umkating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shifting(self, periods=1, fill_value=None):
if not length(self) or periods == 0:
return self.clone()
if ifna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.totype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * getting_min(abs(periods), length(self)),
dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods):]
b = empty
return arr._concating_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first missing value.
Returns
-------
int
"""
if length(self) == 0 or self.sp_index.npoints == length(self):
return -1
indices = self.sp_index.to_int_index().indices
if not length(indices) or indices[0] > 0:
return 0
diff = indices[1:] - indices[:-1]
return np.searchsorted(diff, 2) + 1
def distinctive(self):
distinctives = list(algos.distinctive(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
distinctives.insert(fill_loc, self.fill_value)
return type(self)._from_sequence(distinctives, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_monkey_object
return np.asarray(self), self.fill_value
def factorize(self, na_sentinel=-1):
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of labels, why bother
# implementing an efficient factorize?
labels, distinctives = algos.factorize(np.asarray(self),
na_sentinel=na_sentinel)
distinctives = SparseArray(distinctives, dtype=self.dtype)
return labels, distinctives
def counts_value_num(self, sipna=True):
"""
Returns a Collections containing counts of distinctive values.
Parameters
----------
sipna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Collections
"""
from monkey import Index, Collections
keys, counts = algos._counts_value_num_arraylike(self.sp_values,
sipna=sipna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and sipna:
pass
else:
if self._null_fill_value:
mask = ifna(keys)
else:
mask = keys == self.fill_value
if mask.whatever():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not incontainstance(keys, ABCIndexClass):
keys = Index(keys)
result = Collections(counts, index=keys)
return result
# --------
# Indexing
# --------
def __gettingitem__(self, key):
if incontainstance(key, tuple):
if length(key) > 1:
raise IndexError("too mwhatever indices for array.")
key = key[0]
if is_integer(key):
return self._getting_val_at(key)
elif incontainstance(key, tuple):
data_slice = self.values[key]
elif incontainstance(key, slice):
# special case to preserve dtypes
if key == slice(None):
return self.clone()
# TODO: this logic is surely elsewhere
# TODO: this could be more efficient
indices = np.arange(length(self), dtype=np.int32)[key]
return self.take(indices)
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
# key's fill_value for True / False, and then do an interst
# on the indicies of the sp_values.
if incontainstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if com.is_bool_indexer(key) and length(self) == length(key):
return self.take(np.arange(length(key), dtype=np.int32)[key])
elif hasattr(key, '__length__'):
return self.take(key)
else:
raise ValueError("Cannot slice with '{}'".formating(key))
return type(self)(data_slice, kind=self.kind)
def _getting_val_at(self, loc):
n = length(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.getting_value_at(self.sp_values, sp_loc)
def take(self, indices, total_allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError("'indices' must be an array, not a "
"scalar '{}'.".formating(indices))
indices = np.asarray(indices, dtype=np.int32)
if indices.size == 0:
result = []
kwargs = {'dtype': self.dtype}
elif total_allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
kwargs = {}
else:
result = self._take_without_fill(indices)
kwargs = {'dtype': self.dtype}
return type(self)(result, fill_value=self.fill_value, kind=self.kind,
**kwargs)
def _take_with_fill(self, indices, fill_value=None):
if fill_value is None:
fill_value = self.dtype.na_value
if indices.getting_min() < -1:
raise ValueError("Invalid value in 'indices'. Must be between -1 "
"and the lengthgth of the array.")
if indices.getting_max() >= length(self):
raise IndexError("out of bounds value in 'indices'.")
if length(self) == 0:
# Empty... Allow taking only if total_all empty
if (indices == -1).total_all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError('cannot do a non-empty take from an empty '
'axes.')
sp_indexer = self.sp_index.lookup_array(indices)
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
taken = np.full(sp_indexer.shape, fill_value=fill_value,
dtype=np.result_type(type(fill_value)))
else:
taken = self.sp_values.take(sp_indexer)
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
# Fill in two steps.
# Old fill values
# New fill values
# potentitotal_ally coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.whatever():
result_type = np.result_type(result_type,
type(self.fill_value))
taken = taken.totype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.whatever():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.totype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices):
to_shifting = indices < 0
indices = indices.clone()
n = length(self)
if (indices.getting_max() >= n) or (indices.getting_min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an "
"empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
if to_shifting.whatever():
indices[to_shifting] += n
if self.sp_index.npoints == 0:
# edge case in take...
# I think just return
out = np.full(indices.shape, self.fill_value,
dtype=np.result_type(type(self.fill_value)))
arr, sp_index, fill_value = make_sparse(out,
fill_value=self.fill_value)
return type(self)(arr, sparse_index=sp_index,
fill_value=fill_value)
sp_indexer = self.sp_index.lookup_array(indices)
taken = self.sp_values.take(sp_indexer)
fillable = (sp_indexer < 0)
if fillable.whatever():
# TODO: may need to coerce array to fill value
result_type = np.result_type(taken, type(self.fill_value))
taken = taken.totype(result_type)
taken[fillable] = self.fill_value
return taken
def searchsorted(self, v, side="left", sorter=None):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(
v, side, sorter
)
def clone(self, deep=False):
if deep:
values = self.sp_values.clone()
else:
values = self.sp_values
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concating_same_type(cls, to_concating):
fill_values = [x.fill_value for x in to_concating]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha total_all NA case too.
if not (length(set(fill_values)) == 1 or ifna(fill_values).total_all()):
warnings.warn("Concatenating sparse arrays with multiple fill "
"values: '{}'. Picking the first and "
"converting the rest.".formating(fill_values),
PerformanceWarning,
stacklevel=6)
keep = to_concating[0]
to_concating2 = [keep]
for arr in to_concating[1:]:
to_concating2.adding(cls(np.asarray(arr), fill_value=fill_value))
to_concating = to_concating2
values = []
lengthgth = 0
if to_concating:
sp_kind = to_concating[0].kind
else:
sp_kind = 'integer'
if sp_kind == 'integer':
indices = []
for arr in to_concating:
idx = arr.sp_index.to_int_index().indices.clone()
idx += lengthgth # TODO: wrapavalue_round
lengthgth += arr.sp_index.lengthgth
values.adding(arr.sp_values)
indices.adding(idx)
data = np.concatingenate(values)
indices = np.concatingenate(indices)
sp_index = IntIndex(lengthgth, indices)
else:
# when concatingentating block indices, we don't claim that you'll
# getting an identical index as concatinging the values and then
# creating a new index. We don't want to spend the time trying
# to unioner blocks across arrays in `to_concating`, so the resulting
# BlockIndex may have more blocs.
blengthgths = []
blocs = []
for arr in to_concating:
idx = arr.sp_index.to_block_index()
values.adding(arr.sp_values)
blocs.adding(idx.blocs.clone() + lengthgth)
blengthgths.adding(idx.blengthgths)
lengthgth += arr.sp_index.lengthgth
data = np.concatingenate(values)
blocs = np.concatingenate(blocs)
blengthgths = np.concatingenate(blengthgths)
sp_index = BlockIndex(lengthgth, blocs, blengthgths)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def totype(self, dtype=None, clone=True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
clone : bool, default True
Whether to ensure a clone is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.totype(np.dtype('int32'))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.totype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
[0, 0, 1.0, 2.0]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Use a SparseDtype if you wish to be change the fill value as well.
>>> arr.totype(SparseDtype("float64", fill_value=np.nan))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
"""
dtype = self.dtype.umkate_dtype(dtype)
subtype = dtype._subtype_with_str
sp_values = totype_nansafe(self.sp_values,
subtype,
clone=clone)
if sp_values is self.sp_values and clone:
sp_values = sp_values.clone()
return self._simple_new(sp_values,
self.sp_index,
dtype)
def mapping(self, mappingper):
"""
Map categories using input correspondence (dict, Collections, or function).
Parameters
----------
mappingper : dict, Collections, ctotal_allable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of employing the
mappingping to ``self.fill_value``
Examples
--------
>>> arr = mk.SparseArray([0, 1, 2])
>>> arr.employ(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.employ({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.employ(mk.Collections([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in employ.
# We getting hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if incontainstance(mappingper, ABCCollections):
mappingper = mappingper.convert_dict()
if incontainstance(mappingper, compat.Mapping):
fill_value = mappingper.getting(self.fill_value, self.fill_value)
sp_values = [mappingper.getting(x, None) for x in self.sp_values]
else:
fill_value = mappingper(self.fill_value)
sp_values = [mappingper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value)
def to_dense(self):
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
# TODO: Look into deprecating this in favor of `to_dense`.
getting_values = to_dense
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if incontainstance(state, tuple):
# Compat for monkey < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.umkate(state)
def nonzero(self):
if self.fill_value == 0:
return self.sp_index.to_int_index().indices,
else:
return self.sp_index.to_int_index().indices[self.sp_values != 0],
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(self, name, skipna=True, **kwargs):
method = gettingattr(self, name, None)
if method is None:
raise TypeError("cannot perform {name} with type {dtype}".formating(
name=name, dtype=self.dtype))
if skipna:
arr = self
else:
arr = self.sipna()
# we don't support these kwargs.
# They should only be present when ctotal_alled via monkey, so do it here.
# instead of in `whatever` / `total_all` (which will raise if they're present,
# thanks to nv.validate
kwargs.pop('filter_type', None)
kwargs.pop('numeric_only', None)
kwargs.pop('op', None)
return gettingattr(arr, name)(**kwargs)
def total_all(self, axis=None, *args, **kwargs):
"""
Tests whether total_all elements evaluate True
Returns
-------
total_all : bool
See Also
--------
numpy.total_all
"""
nv.validate_total_all(args, kwargs)
values = self.sp_values
if length(values) != length(self) and not np.total_all(self.fill_value):
return False
return values.total_all()
def whatever(self, axis=0, *args, **kwargs):
"""
Tests whether at least one of elements evaluate True
Returns
-------
whatever : bool
See Also
--------
numpy.whatever
"""
nv.validate_whatever(args, kwargs)
values = self.sp_values
if length(values) != length(self) and np.whatever(self.fill_value):
return True
return values.whatever().item()
def total_sum(self, axis=0, *args, **kwargs):
"""
Sum of non-NA/null values
Returns
-------
total_sum : float
"""
nv.validate_total_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_total_sum = valid_vals.total_sum()
if self._null_fill_value:
return sp_total_sum
else:
nsparse = self.sp_index.ngaps
return sp_total_sum + self.fill_value * nsparse
def cumtotal_sum(self, axis=0, *args, **kwargs):
"""
Cumulative total_sum of non-NA/null values.
When perforgetting_ming the cumulative total_summation, whatever non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative total_summation. If None,
perform cumulative total_summation over flattened array.
Returns
-------
cumtotal_sum : SparseArray
"""
nv.validate_cumtotal_sum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".formating(axis=axis))
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumtotal_sum()
return SparseArray(self.sp_values.cumtotal_sum(), sparse_index=self.sp_index,
fill_value=self.fill_value)
def average(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
average : float
"""
nv.validate_average(args, kwargs)
valid_vals = self._valid_sp_values
sp_total_sum = valid_vals.total_sum()
ct = length(valid_vals)
if self._null_fill_value:
return sp_total_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_total_sum + self.fill_value * nsparse) / (ct + nsparse)
def transpose(self, *axes):
"""
Returns the SparseArray.
"""
return self
@property
def T(self):
"""
Returns the SparseArray.
"""
return self
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
def __array_wrap__(self, array, context=None):
from monkey.core.dtypes.generic import ABCSparseCollections
ufunc, inputs, _ = context
inputs = tuple(x.values if incontainstance(x, ABCSparseCollections) else x
for x in inputs)
return self.__array_ufunc__(ufunc, '__ctotal_all__', *inputs)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.getting('out', ())
for x in inputs + out:
if not incontainstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
special = {'add', 'sub', 'mul', 'pow', 'mod', 'floordivision', 'truedivision',
'divisionmod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'remainder'}
if compat.PY2:
special.add('division')
aliases = {
'subtract': 'sub',
'multiply': 'mul',
'floor_divisionide': 'floordivision',
'true_divisionide': 'truedivision',
'power': 'pow',
'remainder': 'mod',
'divisionide': 'division',
'equal': 'eq',
'not_equal': 'ne',
'less': 'lt',
'less_equal': 'le',
'greater': 'gt',
'greater_equal': 'ge',
}
flipped = {
'lt': '__gt__',
'le': '__ge__',
'gt': '__lt__',
'ge': '__le__',
'eq': '__eq__',
'ne': '__ne__',
}
op_name = ufunc.__name__
op_name = aliases.getting(op_name, op_name)
if op_name in special and kwargs.getting('out') is None:
if incontainstance(inputs[0], type(self)):
return gettingattr(self, '__{}__'.formating(op_name))(inputs[1])
else:
name = flipped.getting(op_name, '__r{}__'.formating(op_name))
return gettingattr(self, name)(inputs[0])
if length(inputs) == 1:
# No alignment necessary.
sp_values = gettingattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = gettingattr(ufunc, method)(self.fill_value, **kwargs)
return self._simple_new(sp_values,
self.sp_index,
SparseDtype(sp_values.dtype, fill_value))
result = gettingattr(ufunc, method)(*[np.asarray(x) for x in inputs],
**kwargs)
if out:
if length(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
return type(self)(result)
def __abs__(self):
return np.abs(self)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
@classmethod
def _create_unary_method(cls, op):
def sparse_unary_method(self):
fill_value = op(np.array(self.fill_value)).item()
values = op(self.sp_values)
dtype = SparseDtype(values.dtype, fill_value)
return cls._simple_new(values, self.sp_index, dtype)
name = '__{name}__'.formating(name=op.__name__)
return compat.set_function_name(sparse_unary_method, name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
def sparse_arithmetic_method(self, other):
op_name = op.__name__
if incontainstance(other, (ABCCollections, ABCIndexClass)):
# Rely on monkey to dispatch to us.
return NotImplemented
if incontainstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(total_all='ignore'):
fill = op(_getting_fill(self), np.asarray(other))
result = op(self.sp_values, other)
if op_name == 'divisionmod':
left, right = result
lfill, rfill = fill
return (_wrap_result(op_name, left, self.sp_index, lfill),
_wrap_result(op_name, right, self.sp_index, rfill))
return _wrap_result(op_name, result, self.sp_index, fill)
else:
other = np.asarray(other)
with np.errstate(total_all='ignore'):
# TODO: delete sparse stuff in core/ops.py
# TODO: look into _wrap_result
if length(self) != length(other):
raise AssertionError(
("lengthgth mismatch: {self} vs. {other}".formating(
self=length(self), other=length(other))))
if not incontainstance(other, SparseArray):
dtype = gettingattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, op_name)
name = '__{name}__'.formating(name=op.__name__)
return compat.set_function_name(sparse_arithmetic_method, name, cls)
@classmethod
def _create_comparison_method(cls, op):
def cmp_method(self, other):
op_name = op.__name__
if op_name in {'and_', 'or_'}:
op_name = op_name[:-1]
if incontainstance(other, (ABCCollections, ABCIndexClass)):
# Rely on monkey to unbox and dispatch to us.
return NotImplemented
if not is_scalar(other) and not incontainstance(other, type(self)):
# convert list-like to ndarray
other = np.asarray(other)
if incontainstance(other, np.ndarray):
# TODO: make this more flexible than just ndarray...
if length(self) != length(other):
raise AssertionError("lengthgth mismatch: {self} vs. {other}"
.formating(self=length(self),
other=length(other)))
other = SparseArray(other, fill_value=self.fill_value)
if incontainstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
else:
with np.errstate(total_all='ignore'):
fill_value = op(self.fill_value, other)
result = op(self.sp_values, other)
return type(self)(result,
sparse_index=self.sp_index,
fill_value=fill_value,
dtype=np.bool_)
name = '__{name}__'.formating(name=op.__name__)
return compat.set_function_name(cmp_method, name, cls)
@classmethod
def _add_unary_ops(cls):
cls.__pos__ = cls._create_unary_method(operator.pos)
cls.__neg__ = cls._create_unary_method(operator.neg)
cls.__invert__ = cls._create_unary_method(operator.invert)
@classmethod
def _add_comparison_ops(cls):
cls.__and__ = cls._create_comparison_method(operator.and_)
cls.__or__ = cls._create_comparison_method(operator.or_)
super(SparseArray, cls)._add_comparison_ops()
# ----------
# Formatting
# -----------
def __unicode__(self):
return '{self}\nFill: {fill}\n{index}'.formating(
self=printing.pprint_thing(self),
fill=printing.pprint_thing(self.fill_value),
index=printing.pprint_thing(self.sp_index))
def _formatingter(self, boxed=False):
# Defer to the formatingter from the GenericArrayFormatter ctotal_alling us.
# This will infer the correct formatingter from the dtype of the values.
return None
SparseArray._add_arithmetic_ops()
SparseArray._add_comparison_ops()
SparseArray._add_unary_ops()
def _maybe_to_dense(obj):
"""
try to convert to dense
"""
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
"""
array must be SparseCollections or SparseArray
"""
if incontainstance(array, ABCSparseCollections):
array = array.values.clone()
return array
def _sanitize_values(arr):
"""
return an ndarray for our input,
in a platform independent manner
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
# scalar
if is_scalar(arr):
arr = [arr]
# ndarray
if incontainstance(arr, np.ndarray):
pass
elif is_list_like(arr) and length(arr) > 0:
arr = maybe_convert_platform(arr)
else:
arr = np.asarray(arr)
return arr
def make_sparse(arr, kind='block', fill_value=None, dtype=None, clone=False):
"""
Convert ndarray to sparse formating
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
dtype : np.dtype, optional
clone : bool, default False
Returns
-------
(sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
"""
arr = _sanitize_values(arr)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if ifna(fill_value):
mask = notna(arr)
else:
# For str arrays in NumPy 1.12.0, operator!= below isn't
# element-wise but just returns False if fill_value is not str,
# so cast to object comparison to be safe
if is_string_dtype(arr):
arr = arr.totype(object)
if is_object_dtype(arr.dtype):
# element-wise equality check method in numpy doesn't treat
# each element type, eg. 0, 0.0, and False are treated as
# same. So we have to check the both of its type and value.
mask = splib.make_mask_object_ndarray(arr, fill_value)
else:
mask = arr != fill_value
lengthgth = length(arr)
if lengthgth != length(mask):
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].totype(np.int32)
index = _make_index(lengthgth, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values =
|
totype_nansafe(sparsified_values, dtype=dtype)
|
pandas.core.dtypes.cast.astype_nansafe
|
import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances, gold_label_key='tags',
treat_tie_as='O', span_level=True):
tp, fp, fn = 0, 0, 0
for instance in instances:
maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p, r, f1 = _getting_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def getting_generative_model_inputs(instances, label_to_ix):
label_name_to_col = {}
link_name_to_col = {}
# Collects label and link function names
names = set()
for doc in instances:
if 'WISER_LABELS' in doc:
for name in doc['WISER_LABELS']:
names.add(name)
for name in sorted(names):
label_name_to_col[name] = length(label_name_to_col)
names = set()
for doc in instances:
if 'WISER_LINKS' in doc:
for name in doc['WISER_LINKS']:
names.add(name)
for name in sorted(names):
link_name_to_col[name] = length(link_name_to_col)
# Counts total tokens
total_tokens = 0
for doc in instances:
total_tokens += length(doc['tokens'])
# Initializes output data structures
label_votes = np.zeros((total_tokens, length(label_name_to_col)), dtype=np.int)
link_votes = np.zeros((total_tokens, length(link_name_to_col)), dtype=np.int)
seq_starts = np.zeros((length(instances),), dtype=np.int)
# Populates outputs
offset = 0
for i, doc in enumerate(instances):
seq_starts[i] = offset
for name in sorted(doc['WISER_LABELS'].keys()):
for j, vote in enumerate(doc['WISER_LABELS'][name]):
label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
if 'WISER_LINKS' in doc:
for name in sorted(doc['WISER_LINKS'].keys()):
for j, vote in enumerate(doc['WISER_LINKS'][name]):
link_votes[offset + j, link_name_to_col[name]] = vote
offset += length(doc['tokens'])
return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
gold_label_key='tags', span_level=True):
tp, fp, fn = 0, 0, 0
offset = 0
for instance in instances:
lengthgth = length(instance[gold_label_key])
if span_level:
scores = _score_sequence_span_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
else:
scores = _score_sequence_token_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
offset += lengthgth
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1]
index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def score_tagging_rules(instances, gold_label_key='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LABELS'].items():
if lf_name not in lf_scores:
# Initializes true positive, false positive, false negative,
# correct, and total vote counts
lf_scores[lf_name] = [0, 0, 0, 0, 0]
scores = _score_sequence_span_level(predictions, instance[gold_label_key])
lf_scores[lf_name][0] += scores[0]
lf_scores[lf_name][1] += scores[1]
lf_scores[lf_name][2] += scores[2]
scores = _score_token_accuracy(predictions, instance[gold_label_key])
lf_scores[lf_name][3] += scores[0]
lf_scores[lf_name][4] += scores[1]
# Computes accuracies
for lf_name in lf_scores.keys():
if lf_scores[lf_name][3] > 0:
lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
lf_scores[lf_name][3] = value_round(lf_scores[lf_name][3], ndigits=4)
else:
lf_scores[lf_name][3] = float('NaN')
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
results =
|
mk.KnowledgeFrame.sorting_index(results)
|
pandas.DataFrame.sort_index
|
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 - Global Cases - EDA and Forecasting
# This is the data repository for the 2019 Novel Coronavirus Visual Dashboard operated by the Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE). Also, Supported by ESRI Living Atlas Team and the Johns Hopkins University Applied Physics Lab (JHU APL).
#
# Data is sourced from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data
#
#
# * Visual Dashboard (desktop):
# https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6
#
# * Visual Dashboard (mobile):
# http://www.arcgis.com/apps/opsdashboard/index.html#/85320e2ea5424kfaaa75ae62e5c06e61
#
# * Lancet Article:
# An interactive web-based dashboard to track COVID-19 in real time
#
# * Provided by Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE):
# https://systems.jhu.edu/
#
# * Data Sources:
#
# - World Health Organization (WHO): https://www.who.int/
# - DXY.cn. Pneumonia. 2020. http://3g.dxy.cn/newh5/view/pneumonia.
# - BNO News: https://bnonews.com/index.php/2020/02/the-latest-coronavirus-cases/
# - National Health Commission of the People’s Republic of China (NHC):
# http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml
# - China CDC (CCDC): http://weekly.chinacdc.cn/news/TrackingtheEpidemic.htm
# - Hong Kong Department of Health: https://www.chp.gov.hk/en/features/102465.html
# - Macau Government: https://www.ssm.gov.mo/portal/
# - Taiwan CDC: https://sites.google.com/cdc.gov.tw/2019ncov/taiwan?authuser=0
# - US CDC: https://www.cdc.gov/coronavirus/2019-ncov/index.html
# - Government of Canada: https://www.canada.ca/en/public-health/services/diseases/coronavirus.html
# - Australia Government Department of Health: https://www.health.gov.au/news/coronavirus-umkate-at-a-glance
# - European Centre for Disease Prevention and Control (ECDC): https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases
# - Ministry of Health Singapore (MOH): https://www.moh.gov.sg/covid-19
# - Italy Ministry of Health: http://www.salute.gov.it/nuovocoronavirus
#
# - Additional Informatingion about the Visual Dashboard:
# https://systems.jhu.edu/research/public-health/ncov/
#
# Contact Us:
#
# Email: <EMAIL>
#
# Terms of Use:
#
# This GitHub repo and its contents herein, including total_all data, mappingping, and analysis, cloneright 2020 Johns Hopkins University, total_all rights reserved, is provided to the public strictly for educational and academic research purposes. The Website relies upon publicly available data from multiple sources, that do not always agree. The Johns Hopkins University hereby disclaims whatever and total_all representations and warranties with respect to the Website, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited.
# __For better viewing experience, I recommend to enable NBextensions as guided @__
#
# https://github.com/lsunku/DataScience/tree/master/JupyterNotebook
# # Steps invoved in this notebook
# 1. Import Python Libraries for data analysis and ML
# 2. Local user defined functions
# 3. Sourcing the Data
# 4. Inspect and Clean the Data
# 5. Exploratory Data Analysis
# 6. Preparing the data for modelling(train-test split, rescaling etc)
# 7. Model evaluation for Advanced Regression Criteria
# 8. Linear Regression Model for World Wide Case Predictions
# 9. Linear Regression Model for Italy Predictions
# 10. Linear Regression Model for US Predictions
# 11. Linear Regression Model for Spain Predictions
# 12. Linear Regression Model for Germwhatever Predictions
# 13. Linear Regression Model for India Predictions
# __Notes:__ Currently, I have used only time_collections_covid19_confirmed_global for the following analysis. When I getting time, I shtotal_all enhance the same with additional files time_collections_covid19_deaths_global, time_collections_covid19_recovered_global and integrate with daily reports.
# # __Import Python Functions__
# In[284]:
# Local classes and Local flags
# Local Classes
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Debug flag for investigative purpose
DEBUG = 0
# Default random_state
rndm_stat = 42
# In[285]:
# Python libraries for Data processing and analysis
import time as time
strt = time.time()
import monkey as mk
mk.set_option('display.getting_max_columns', 200)
mk.set_option('display.getting_max_rows', 100)
mk.options.mode.use_inf_as_na = True
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import glob
from matplotlib.pyplot import figure
import warnings
import math
import itertools
warnings.filterwarnings('ignore')
sns.set_style("whitegrid")
from math import sqrt
import re
from prettytable import PrettyTable
# ML Libraries
import statsmodels
import statsmodels.api as sm
import sklearn as sk
from sklearn.model_selection import train_test_split,GridSearchCV, KFold,RandomizedSearchCV,StratifiedKFold
from sklearn.metrics import r2_score,average_squared_error,average_absolute_error
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler,OrdinalEncoder,LabelEncoder,Normalizer,RobustScaler,PowerTransformer,PolynomialFeatures
from statsmodels.stats.outliers_influence import variance_inflation_factor
import xgboost
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor
# # __Local User Defined Functions__
# ## Local functions for data overview and data cleaning
# In[286]:
# local functions
# Function to read a file & Store it in Monkey
# read_file takes either csv or excel file as input and reuturns a monkey DF and
# also prints header_num, final_item_tail, description, info and shape of the DF
def read_file(l_fname,l_path,header_num=0):
i = l_fname.split(".")
f_path = l_path+'/'+l_fname
print(f_path,i[0],i[1])
if (i[1] == "xlsx"):
l_kf = mk.read_excel(f_path,header_numer=header_num,encoding = "ISO-8859-1",infer_datetime_formating=True)
elif (i[1] == "csv"):
l_kf = mk.read_csv(f_path,header_numer=header_num,encoding = "ISO-8859-1",infer_datetime_formating=True)
ov_kf(l_kf)
return(l_kf)
# Function to getting the Overview of KnowledgeFrame
# take kf as input and prints header_num, final_item_tail, description, info and shape of the DF
def ov_kf(l_kf):
print(color.BOLD+color.PURPLE + 'Inspect and Explore the Dataset' + color.END)
print("\n##################### KnowledgeFrame Head ######################")
print(l_kf.header_num(3))
print("\n##################### KnowledgeFrame Tail ######################")
print(l_kf.final_item_tail(3))
print("\n##################### KnowledgeFrame Info ######################")
print(l_kf.info())
print("\n#################### KnowledgeFrame Columns ####################")
print(list(l_kf.columns))
print("\n#################### KnowledgeFrame Shape ####################")
print("No of Rows",l_kf.shape[0])
print("No of Columns",l_kf.shape[1])
# Function per_col_null takes a kf as input and prints total_summary of Null Values across Columns
def per_col_null(l_kf):
print("\n############ Missing Values of Columns in % ############")
col_null = value_round((l_kf.ifnull().total_sum().sort_the_values(ascending=False)/length(l_kf))*100,4)
print(col_null[col_null > 0])
# # __Sourcing the Data__
# ## Read the train.csv
# In[287]:
# Set the path and file name
folder=r"C:\My Folders\OneDrive\Surface\Sadguru\Lakshmi\Study\IIIB_PGDS\Hackathon\COVID_19\COVID-19\csse_covid_19_data\csse_covid_19_time_collections"
file="time_collections_covid19_confirmed_global.csv"
# Read file using local functions. read_file takes either csv or excel file as input and reuturns a monkey DF and
# also prints header_num, final_item_tail, description, info and shape of the DF
raw_kf = read_file(file,folder)
# In[288]:
# transpose and formating the columns
raw_kf = raw_kf.sip(["Province/State","Lat","Long"],axis=1).set_index("Country/Region").T.reseting_index().renagetting_ming(columns={'index':'Date'}).renagetting_ming_axis("",axis="columns")
# In[289]:
ov_kf(raw_kf)
# ## Inspect the Column Data Types of c_kf
# In[290]:
# Analyze Categorical, Numerical and Date variables of Application Data
print(color.BOLD+"Categorical and Numerical Variables"+ color.END)
display(raw_kf.dtypes.counts_value_num())
print(color.BOLD+"Numerical Integer Variables"+ color.END)
display(raw_kf.choose_dtypes(include='int64').dtypes)
print(color.BOLD+"Categorical Variables"+ color.END)
display(raw_kf.choose_dtypes(include=object).dtypes)
print(color.BOLD+"Numerical Float Variables"+ color.END)
display(raw_kf.choose_dtypes(include='float64').dtypes)
# In[291]:
# Change the Date formating
raw_kf["Date"] = mk.convert_datetime(raw_kf["Date"],infer_datetime_formating=True)
# In[292]:
# as the given data is segrated in some countries which are epicenters and for some, it is not. To make it uniform, I total_sum up the data across countries
dt = raw_kf.pop("Date")
dt.header_num()
# In[293]:
# Aggregate the data across columns as there are columns with same column name
c_kf = raw_kf.grouper(by=raw_kf.columns,axis=1).agg(total_sum)
c_kf.header_num()
# In[294]:
c_kf.insert(0,"Date",dt)
c_kf.header_num()
# # __Exploratory Data Analysis__
# ## Inspect the Null Values in c_kf
# In[295]:
# Null values in the Application DF.
# per_col_null is local function which returns the % of null columns which are non zero
per_col_null(c_kf)
# ## Derived Columns
# In[296]:
c_kf["WW"] = c_kf.total_sum(axis=1)
c_kf.header_num()
# In[297]:
import plotly.express as ply
import plotly.graph_objects as go
import cufflinks as cf
# In[298]:
cntry_li = list(c_kf.columns)
cntry_li.remove("Date")
# In[299]:
fig = go.Figure()
for i in cntry_li:
fig.add_trace(go.Scatter(x=c_kf["Date"],y=c_kf[i],mode='lines+markers',name=i))
fig.umkate_layout(
margin=dict(l=30, r=20, t=25, b=25),
)
#fig.umkate_layout(yaxis_type="log")
fig.show()
# ## List of countries which are contributing to high number of positive cases
# In[300]:
hi_co_li = [i for i,j in (c_kf[cntry_li].iloc[-1] > 1500).items() if j == True]
print(hi_co_li)
# In[301]:
fig = go.Figure()
for i in hi_co_li:
fig.add_trace(go.Scatter(x=c_kf["Date"],y=c_kf[i],mode='lines+markers',name=i))
fig.umkate_layout(
margin=dict(l=40, r=30, t=25, b=25),
)
#fig.umkate_layout(yaxis_type="log")
fig.show()
# ## Analyze Categorical Columns of the c_kf
# In[302]:
c_kf.insert(0,"Day",np.arange(1,length(c_kf)+1))
# In[303]:
c_kf.header_num()
# In[304]:
# Create a list of numerical and categorical variables for future analysis
c_num_li = list(c_kf.choose_dtypes(include=np.number).columns)
c_cat_li = list(c_kf.choose_dtypes(exclude=np.number).columns)
print(color.BOLD+"\nNumerical Columns -"+color.END,c_num_li)
print(color.BOLD+"\nCategorical Columns -"+color.END,c_cat_li)
# ## Analyze Numerical Columns of the c_kf
# In[305]:
# Inspect the Categorical columns
c_kf[c_cat_li].header_num()
# In[306]:
# Inspect the Numerical columns
c_kf[c_num_li].header_num()
# ## Univariate analysis
# Univariate analysis is performed only on specific countries which are suffering with high number of positive cases
# ### Univariate analysis of Countries which are sufferring with high number of corona cases
# In[307]:
# Inspect list of categorical variables
print(hi_co_li)
# In[308]:
# Function to plot 2 or more line plots or time collections plots
# line_pltly takes a kf, dependent variable and variable list of columns
# to plot multiple reg plots
def line_pltly (l_kf,l_dep,*args):
for i in args:
fig = go.Figure()
for l in ["WW","China","Korea, South"]:
fig.add_trace(go.Scatter(x=l_kf[l_dep],y=l_kf[l],mode='lines+markers',name=l))
fig.add_trace(go.Scatter(x=l_kf[l_dep],y=l_kf[i],mode='lines+markers',name=i))
fig.umkate_layout(width=800,height=400,hovermode="closest",clickmode="event+select")
fig.show()
# In[309]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[0:4])
# In[310]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[4:8])
# In[311]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[8:12])
# In[312]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[12:16])
# In[313]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[16:20])
# In[314]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[20:24])
# ## Preparing the data for modelling(encoding,train-test split, rescaling etc)
# In[315]:
# split the data for training and testing
kf_train,kf_test = train_test_split(c_kf,train_size=0.93,random_state=rndm_stat,shuffle=False)
print(kf_train.shape)
print(kf_test.shape)
# In[316]:
# Extract the serial number and store it for future purposes
trn_date = kf_train.pop('Date')
tst_date = kf_test.pop('Date')
# In[317]:
print(kf_train.header_num())
print(kf_test.header_num())
# #### Scaling of Test Data LR Model 1 and Model 2 using Standardization
# # __Model Evaluation Criteria__
# ### Model Evaluation Criteria
# Following criteria should be fulfilled for the best model and each model is evaluated based on the following conditions.
# 1. Residuals (Actual Test data and Predicted Test data) should be normtotal_ally distributed with average zero.
# 2. Residuals (Actual Test data and Predicted Test data) are independent of each other.
# 3. Residuals (Actual Test data and Predicted Test data) have constant variance.
# 4. Model should not be overfit.
# 5. Adjusted R-Square should be little less but comappingritively closer to R-Square.
# 6. R-Square should be comparitvely high suggesting a good fit.
# 7. R-Square of Test and Train should be closer to each other suggesting that model has worked well with unseen data.
# 8. Check the RMSE, MSE and MAE of each model and compare it among the 3 models.
# # __LR Model using Linear Regression for World Wide Cases__
# __Ridge Regression Steps__
# * 1) Prepare the data for modelling
# * 2) Hyperparameter tuning and selection using GridSearchCV
# * 3) Build the Ridge Regression Model using optimal Lambda value
# * 4) Predict on Train Set
# * 5) Predict on Test Set
# ## Prepare the data for Modelling
# In[318]:
# Prepare the strings to be used
cntry = "WW"
cntry_act = cntry+"_Actuals"
cntry_pred_m1 = cntry+"_Pred_M1"
cntry_pred_m2 = cntry+"_Pred_M2"
# In[319]:
# 2 Models are created and hence 2 copies of kf_train and test to perform the analysis
y_train = kf_train[cntry].clone(deep=True)
X_train = kf_train[["Day"]].clone(deep=True)
y_test = kf_test[cntry].clone(deep=True)
X_test = kf_test[["Day"]].clone(deep=True)
# In[320]:
# Targetting variable is removed from predictor variables
display(y_train.header_num())
display(X_train.header_num())
# ## Build the LR Model on Training Set
# ### Parameter Tuning and Selection of Degree
# In[321]:
# function to populate linear regression model metrics
def lm_metrics(y_act,y_pred):
# calculate the RSquared and RMSE for test data and Predicted data
rsqr = r2_score(y_true=y_act,y_pred=y_pred)
mar = average_absolute_error(y_true=y_act,y_pred=y_pred)
mse = average_squared_error(y_act, y_pred)
rmse = sqrt(average_squared_error(y_act, y_pred))
return (rsqr,mar,mse,rmse)
# In[322]:
# function to populate evaluation metrics for different degree
def eval_reg (X_trn,y_trn,deg):
# list of degrees to tune
deg_li = list(np.arange(2,deg))
metric_cols = ["Degree","RSquare","MAE","MSE","RMSE"]
lm_metrics_kf = mk.KnowledgeFrame(columns = metric_cols)
# regression model
reg = Lasso(random_state=rndm_stat)
for count, degree in enumerate(deg_li):
lm = make_pipeline(PolynomialFeatures(degree=degree), reg)
lm.fit(X_trn, y_trn)
y_trn_pred = lm.predict(X_trn)
rsqr,mar,mse,rmse = lm_metrics(y_trn,y_trn_pred)
lm_metrics_kf.loc[count] = [degree,rsqr,mar,mse,rmse]
display(lm_metrics_kf)
# In[323]:
# Populate the results for different degrees
deg = 12
eval_reg(X_train,y_train,12)
# ### Build the Model using the selected degree
# In[324]:
# Build the model with optimal degree.
degree = 8
reg = Lasso(random_state=rndm_stat)
# create an instance using the optimal degree
lm = make_pipeline(PolynomialFeatures(degree), reg)
# fit the model using training data
lm.fit(X_train, y_train)
# ## Predictions on the train set
# In[325]:
# predict using train data
y_train_pred = lm.predict(X_train)
# ### Residual Analysis and validating the astotal_sumptions on Train Set
# #### Error terms are normtotal_ally distributed with average zero
# In[326]:
# Calculate the Residuals and check if they are normtotal_ally distributed or not
res_m1 = y_train - y_train_pred
plt.figure(1,figsize=(8,4))
sns.set(style="whitegrid",font_scale=1.2)
sns.distplot(value_round(res_m1,2),bins=8,color="green")
plt.vlines(value_round(res_m1,2).average(),ygetting_min=0,ygetting_max=2,linewidth=3.0,color="black",linestyles='dotted')
plt.title('Distribution of Residual plot Actual and Predicted Train Data')
plt.show()
# In[327]:
# Mean of Residuals
value_round(res_m1,2).average()
# * The average of residuals is observed to be very close 0
# #### Error terms are independent of each other:
# In[328]:
# check if the Residuals are normtotal_ally distributed or not
plt.figure(1,figsize=(6,4))
sns.set(style="whitegrid",font_scale=1.2)
ax = sns.lineplot(data=res_m1, color="green", label="line")
plt.title('Distribution of Residuals of Train Data')
plt.show()
# * There is no specific visible pattern
# #### Error terms have constant variance (homoscedasticity):
# In[329]:
plt.figure(2,figsize=(6,6))
sns.set(style="whitegrid",font_scale=1.2)
ax1 = sns.regplot(x=y_train,y=y_train_pred,color='green')
plt.title('Linear Regression Plot of Train and Train Pred',fontsize=12)
plt.show()
# * Error terms have constant variance but in the end couple of points are out of the variance
# In[330]:
# calculate the RSquared and RMSE for test data and Predicted data
print(color.BOLD+"\nModel Evalutation metrics of train set with degree ",degree)
rsqr,mar,mse,rmse = lm_metrics(y_train,y_train_pred)
print(color.BOLD+"RSquare of the Model is ",value_round(rsqr,2))
print(color.BOLD+"Mean Absolute Error of the Model is",value_round(mar,2))
print(color.BOLD+"MSE of the model is ",value_round(mse,2))
print(color.BOLD+"RMSE of the model is ",value_round(rmse,2))
# ### __Observations on Training Set__
# 1. Residuals (Actual Train data and Predicted Train data) are be normtotal_ally distributed with average zero.
# - Here it is close to 0
# 2. Residuals (Actual Train data and Predicted Train data) are independent of each other.
# 3. Residuals (Actual Train data and Predicted Train data) have constant variance.
# 4. Adjusted R-Square and R-Square are close to each other and Adjusted R-Square is below R-Square.
# ___Hence the basic checks are good on training data, this model can be used on test set for further evaluations___
# ## Prediction and Evaluation on the Test Set
# * Make predictions on the test set (y_test_pred)
# * evaluate the model, r-squared on the test set
# ### Preprocessing of Test Set Data based on Train Set
# In[331]:
display(y_test.header_num())
display(X_test.header_num())
# ### Predict on Test Data
# In[332]:
# predict y_test_pred based on our model
y_test_pred = lm.predict(X_test)
# In[333]:
y_test_pred
# ### Model Evalution of Metrics of Test Data
# In[334]:
# calculate the RSquared and RMSE for test data and Predicted data
print(color.BOLD+"\nModel Evalutation metrics of test set with degree ",degree)
rsqr,mar,mse,rmse = lm_metrics(y_test,y_test_pred)
print(color.BOLD+"RSquare of the Model is ",value_round(rsqr,2))
print(color.BOLD+"Mean Absolute Error of the Model is",value_round(mar,2))
print(color.BOLD+"MSE of the model is ",value_round(mse,2))
print(color.BOLD+"RMSE of the model is ",value_round(rmse,2))
print(color.BOLD+"\nModel Evalutation metrics of test set with degree ",degree)
# ### Residual Analysis and validating the astotal_sumptions on Test Set
# #### Error terms are normtotal_ally distributed with average zero
# In[335]:
# Calculate the Residuals and check if they are normtotal_ally distributed or not
res_test_m1 = y_test - y_test_pred
plt.figure(1,figsize=(8,4))
sns.set(style="whitegrid",font_scale=1.2)
sns.distplot(value_round(res_test_m1,2),bins=10,color="firebrick")
plt.vlines(value_round(res_test_m1,2).average(),ygetting_min=0,ygetting_max=2,linewidth=3.0,color="black",linestyles='dotted')
plt.title('Distribution of Residual plot Actual and Predicted Test Data')
plt.show()
# In[336]:
# Mean of Residuals
value_round(res_test_m1,2).average()
# * The average of residuals is observed to be very close 0
# #### Error terms are independent of each other:
# In[337]:
plt.figure(1,figsize=(6,4))
sns.set(style="whitegrid",font_scale=1.2)
ax = sns.lineplot(data=res_test_m1, color="firebrick", label="line")
plt.title('Distribution of Residuals of Test Data')
plt.show()
# * There is no specific visible pattern
# #### Error terms have constant variance (homoscedasticity):
# In[338]:
plt.figure(2,figsize=(6,6))
sns.set(style="whitegrid",font_scale=1.2)
ax1 = sns.regplot(x=y_test,y=y_test_pred,color="firebrick")
plt.title('Linear Regression Plot of Test and Test_Pred',fontsize=12)
plt.show()
# * Error terms have constant variance but in the end couple of points are out of the variance
# #### Distribution of Actual Test Data and Predicted Test Data
# In[339]:
# Plot the distribution of Actual values of Price and Predicted values of Price
plt.figure(1,figsize=(10,4))
sns.set(style="whitegrid",font_scale=1)
ax1 = sns.distplot(y_test, hist=False, color="r", label="Actual Values of COVID Cases")
sns.distplot(y_test_pred, hist=False, color="b", label="Predicted Values of COVID Cases" , ax=ax1)
sns.distplot((y_test_pred+rmse), hist=False, color="y", label="Predicated Values of Price + RMSE" , ax=ax1, kde_kws={'linestyle':'--'})
sns.distplot((y_test_pred-rmse), hist=False, color="y", label="Predicated Values of Price - RMSE" , ax=ax1, kde_kws={'linestyle':'--'})
plt.title('LR Model I - Distribution of Actual Values of COVID Cases and Predicted Values of COVID Cases',fontsize=12)
plt.show()
# ### Predict on Actual Test Data
# In[340]:
# generate days up to 72.
X_act_test = np.arange(1,72).reshape(-1,1)
# In[341]:
# predict y_test_pred based on our model
y_act_test_pred = lm.predict(X_act_test)
# In[342]:
# create a kf with predicted values
covid_kf = mk.KnowledgeFrame()
# In[343]:
# Create a column with Dates and Day. Starting date is 2020-01-22
covid_kf["Day"] = np.arange(1,72)
covid_kf["Date"] = mk.date_range(start=c_kf.Date[0], end=c_kf.Date[0]+mk.to_timedelta(
|
mk.np.ceiling(70)
|
pandas.np.ceil
|
# -*- coding: utf-8 -*-
"""Created on Thu Jan 24 13:50:03 2019
@author: <NAME>, Shehnaaz.
"""
#########################################################################################################################
# Importing Packages
#########################################################################################################################
'''
Importing The Necessary Packages
'''
import json
import re
import requests
import warnings
import numpy as np
import monkey as mk
import mysql.connector
import urllib.request
from scipy import stats
import seaborn as sns
from bs4 import BeautifulSoup
from currency_converter import CurrencyConverter
from matplotlib import pyplot as plt
import nltk
import unicodedata
import vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
from sklearn import metrics as sm
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfikfVectorizer
warnings.filterwarnings('ignore')
#########################################################################################################################
# Defining Functions
#########################################################################################################################
class ImdbMovies:
model=''
vectorizer=''
mydb=''
'''Loading constructor, so when instance is instantiate it will load our model and as well
as it will create a connection with the database'''
def __init__(self,**kwargs):
self.firstname=kwargs.getting('firstname','Firstname Not Provided')
self.final_itemname=kwargs.getting('final_itemname','LastName Not Provided')
self.mydb=self.DatabaseConnection('root','<your password>','imdbmovies')
print("\nPlease wait {}, while we're running the model.....".formating(self.firstname))
self.model,self.vectorizer=self.UserReview_SentimentAnalyzer()
print('''\nDone!!, you're good to go\n''')
print("#########################################################################################################################")
print("Welcome! {} {} to our movie search and data analysis program:\n".formating(self.firstname.capitalize(),self.final_itemname.capitalize()))
print("#########################################################################################################################")
'''This is just to provide user freindly string when object is print'''
def __str__(self):
return '''What's going on {} {}, enjoy your movie buddy'''.formating(self.firstname.capitalize(),self.final_itemname.capitalize())
'''Using Vader lexicon function to getting the polarity'''
def sentiment_lexicon(self,review, threshold=0.1):
sid = SIA()
ss = sid.polarity_scores(review)
agg_score = ss['compound']
if agg_score >= threshold:
final_sentiment = 'Positive'
else:
final_sentiment = 'Negative'
return final_sentiment
'''Sentiment analysis based on user review submited'''
def UserReview_SentimentAnalyzer(self):
self.kf=mk.read_sql("select imdbid,User_Review,Polarity from movies;",self.mydb)
# User_Review
self.data = self.kf['User_Review']
self.data=
|
mk.Collections.convert_string(self.data)
|
pandas.Series.to_string
|
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import monkey as mk
import monkey._libs.tslib as tslib
import monkey.util.testing as tm
from monkey.errors import PerformanceWarning
from monkey.core.indexes.datetimes import cdate_range
from monkey import (DatetimeIndex, PeriodIndex, Collections, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from monkey.tcollections.offsets import BMonthEnd, CDay, BDay
from monkey.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (incontainstance(x, DatetimeIndex) or
incontainstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: incontainstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: gettingattr(self.dt_collections, op))
# attribute access should still work!
s = Collections(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_convert_list(self):
idx = mk.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = mk.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert incontainstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.convert_list() == expected_list
idx = mk.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = mk.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert incontainstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.convert_list() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
mk.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), mk.NaT,
Timestamp('2013-01-04')]
expected = mk.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert incontainstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.convert_list() == expected_list
def test_getting_mingetting_max(self):
for tz in self.tz:
# monotonic
idx1 = mk.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = mk.DatetimeIndex(['2011-01-01', mk.NaT, '2011-01-03',
'2011-01-02', mk.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.getting_min() == Timestamp('2011-01-01', tz=tz)
assert idx.getting_max() == Timestamp('2011-01-03', tz=tz)
assert idx.arggetting_min() == 0
assert idx.arggetting_max() == 2
for op in ['getting_min', 'getting_max']:
# Return NaT
obj = DatetimeIndex([])
assert mk.ifna(gettingattr(obj, op)())
obj = DatetimeIndex([mk.NaT])
assert mk.ifna(gettingattr(obj, op)())
obj = DatetimeIndex([mk.NaT, mk.NaT, mk.NaT])
assert mk.ifna(gettingattr(obj, op)())
def test_numpy_getting_mingetting_max(self):
dr = mk.date_range(start='2016-01-15', end='2016-01-20')
assert np.getting_min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.getting_max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.getting_min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.getting_max, dr, out=0)
assert np.arggetting_min(dr) == 0
assert np.arggetting_max(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.arggetting_min, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.arggetting_max, dr, out=0)
def test_value_round(self):
for tz in self.tz:
rng = mk.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.value_round(freq='H'), expected_rng)
assert elt.value_round(freq='H') == expected_elt
msg = mk.tcollections.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.value_round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.value_round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.value_round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.value_round, freq='M')
# GH 14440 & 15578
index = mk.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.value_round('ms')
expected = mk.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.value_round(freq))
index = mk.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.value_round('ms')
expected = mk.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = mk.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.value_round('10ns')
expected = mk.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
mk.DatetimeIndex([ts]).value_round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert length(result) == 5 * length(rng)
for tz in self.tz:
index = mk.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = mk.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = mk.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = mk.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = mk.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.adding(DatetimeIndex([], freq='D'))
idx.adding(DatetimeIndex(['2011-01-01'], freq='D'))
idx.adding(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.adding(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.adding(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.adding(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT], tz='US/Eastern'))
idx.adding(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT], tz='UTC'))
exp = []
exp.adding("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.adding("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.adding("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.adding("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.adding("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.adding("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.adding("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with mk.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = gettingattr(indx, func)()
assert result == expected
def test_representation_to_collections(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Collections([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with mk.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Collections(idx))
assert result == expected
def test_total_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.total_summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'getting_minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = mk.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = mk.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = mk.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = mk.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = mk.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = mk.DatetimeIndex([], tz=tz)
expected3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [mk.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = mk.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = mk.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = mk.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = mk.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = mk.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = mk.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = mk.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = mk.DatetimeIndex([], tz=tz)
expected3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [mk.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = mk.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = mk.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = mk.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = mk.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different lengthgth raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = mk.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = mk.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = mk.DatetimeIndex([mk.Timestamp('2011-01-01'), mk.NaT,
mk.Timestamp('2011-01-03')])
right = mk.DatetimeIndex([mk.NaT, mk.NaT, mk.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == mk.NaT, expected)
tm.assert_numpy_array_equal(mk.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != mk.NaT, expected)
tm.assert_numpy_array_equal(mk.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < mk.NaT, expected)
tm.assert_numpy_array_equal(mk.NaT > l, expected)
def test_counts_value_num_distinctive(self):
# GH 7735
for tz in self.tz:
idx = mk.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, length(idx) + 1)),
tz=tz)
exp_idx = mk.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Collections(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
expected = mk.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.distinctive(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', mk.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Collections([3, 2], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
mk.NaT], tz=tz)
expected = Collections([3, 2, 1], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(sipna=False),
expected)
tm.assert_index_equal(idx.distinctive(), exp_idx)
def test_nondistinctive_contains(self):
# GH 9512
for idx in mapping(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_the_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_the_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_the_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([mk.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', mk.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([mk.NaT, mk.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_the_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_the_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_the_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_gettingitem(self):
idx1 = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = mk.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = mk.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = mk.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = mk.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_sip_duplicates_metadata(self):
# GH 10115
idx = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.sip_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.adding(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.sip_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_sip_duplicates(self):
# to check Index/Collections compat
base = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.adding(base[:5])
res = idx.sip_duplicates()
tm.assert_index_equal(res, base)
res = Collections(idx).sip_duplicates()
tm.assert_collections_equal(res, Collections(base))
res = idx.sip_duplicates(keep='final_item')
exp = base[5:].adding(base[:5])
tm.assert_index_equal(res, exp)
res = Collections(idx).sip_duplicates(keep='final_item')
tm.assert_collections_equal(res, Collections(exp, index=np.arange(5, 36)))
res = idx.sip_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Collections(idx).sip_duplicates(keep=False)
tm.assert_collections_equal(res, Collections(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = mk.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = mk.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = mk.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = mk.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = mk.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = mk.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = mk.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = mk.DatetimeIndex([mk.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shifting(self):
# GH 9903
for tz in self.tz:
idx = mk.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(0, freq='H'), idx)
tm.assert_index_equal(idx.shifting(3, freq='H'), idx)
idx = mk.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(0, freq='H'), idx)
exp = mk.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(3, freq='H'), exp)
exp = mk.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(-3, freq='H'), exp)
def test_nat(self):
assert mk.DatetimeIndex._na_value is mk.NaT
assert mk.DatetimeIndex([])._na_value is mk.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = mk.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = mk.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = mk.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.clone())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(mk.Collections(idx))
idx2 = mk.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.clone())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(mk.Collections(idx2))
# same internal, different tz
idx3 = mk.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.clone())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(mk.Collections(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_getting_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Collections, DatetimeIndex],
[tm.assert_collections_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + mk.DateOffset(years=1)
result2 = mk.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - mk.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
mk.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + mk.offsets.Day()
result2 = mk.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
mk.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + mk.offsets.MonthEnd()
result2 = mk.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Collections only
if klass is Collections:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Collections([mk.offsets.DateOffset(years=1),
mk.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Collections([mk.offsets.DateOffset(years=1),
mk.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('getting_minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = mk.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
op = mk.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
# assert these are equal on a piecewise basis
offsets = ['YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0,
'startingMonth': 2,
'variation':
'nearest'}),
('WeekOfMonth', {'weekday': 2,
'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})]
with warnings.catch_warnings(record=True):
for normalize in (True, False):
for do in offsets:
if incontainstance(do, tuple):
do, kwargs = do
else:
do = do
kwargs = {}
for n in [0, 5]:
if (do in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
continue
op = gettingattr(mk.offsets, do)(n,
normalize=normalize,
**kwargs)
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
assert_func(klass([op + x for x in s]), op + s)
@pytest.mark.parametrize('years,months', product([-1, 0, 1], [-2, 0, 2]))
def test_shifting_months(years, months):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(tslib.shifting_months(s.asi8, years * 12 +
months))
expected = DatetimeIndex([x + offsets.DateOffset(
years=years, months=months) for x in s])
tm.assert_index_equal(actual, expected)
class TestBusinessDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.value_round_trip_pickle(self.rng)
assert unpickled.offset is not None
def test_clone(self):
cp = self.rng.clone()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_repr(self):
# only retotal_ally care that it works
repr(self.rng)
def test_gettingitem(self):
smtotal_aller = self.rng[:5]
exp = DatetimeIndex(self.rng.view(np.ndarray)[:5])
tm.assert_index_equal(smtotal_aller, exp)
assert smtotal_aller.offset == self.rng.offset
sliced = self.rng[::5]
assert sliced.offset == BDay() * 5
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
assert length(fancy_indexed) == 5
assert incontainstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert self.rng[4] == self.rng[np.int_(4)]
def test_gettingitem_matplotlib_hackavalue_round(self):
values = self.rng[:, None]
expected = self.rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_shifting(self):
shiftinged = self.rng.shifting(5)
assert shiftinged[0] == self.rng[5]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(-5)
assert shiftinged[5] == self.rng[0]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(0)
assert shiftinged[0] == self.rng[0]
assert shiftinged.offset == self.rng.offset
rng = date_range(START, END, freq=BMonthEnd())
shiftinged = rng.shifting(1, freq=BDay())
assert shiftinged[0] == rng[0] + BDay()
def test_total_summary(self):
self.rng.total_summary()
self.rng[2:2].total_summary()
def test_total_summary_pytz(self):
bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).total_summary()
def test_total_summary_dateutil(self):
bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).total_summary()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.clone()
t2 = self.rng.clone()
assert t1.identical(t2)
# name
t1 = t1.renagetting_ming('foo')
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.renagetting_ming('foo')
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex(object):
def setup_method(self, method):
self.rng = cdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_clone(self):
cp = self.rng.clone()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_repr(self):
# only retotal_ally care that it works
repr(self.rng)
def test_gettingitem(self):
smtotal_aller = self.rng[:5]
exp = DatetimeIndex(self.rng.view(np.ndarray)[:5])
tm.assert_index_equal(smtotal_aller, exp)
assert smtotal_aller.offset == self.rng.offset
sliced = self.rng[::5]
assert sliced.offset == CDay() * 5
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
assert length(fancy_indexed) == 5
assert incontainstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert self.rng[4] == self.rng[np.int_(4)]
def test_gettingitem_matplotlib_hackavalue_round(self):
values = self.rng[:, None]
expected = self.rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_shifting(self):
shiftinged = self.rng.shifting(5)
assert shiftinged[0] == self.rng[5]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(-5)
assert shiftinged[5] == self.rng[0]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(0)
assert shiftinged[0] == self.rng[0]
assert shiftinged.offset == self.rng.offset
# PerformanceWarning
with warnings.catch_warnings(record=True):
rng = date_range(START, END, freq=BMonthEnd())
shiftinged = rng.shifting(1, freq=CDay())
assert shiftinged[0] == rng[0] + CDay()
def test_pickle_unpickle(self):
unpickled =
|
tm.value_round_trip_pickle(self.rng)
|
pandas.util.testing.round_trip_pickle
|
import gym
from gym import spaces
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
import monkey as mk
import numpy as np
from xitorch.interpolate import Interp1D
from tqdm.auto import tqdm, trange
import time
from rcmodel.room import Room
from rcmodel.building import Building
from rcmodel.RCModel import RCModel
from rcmodel.tools import InputScaling
from rcmodel.tools import BuildingTemperatureDataset
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
n = 10
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(in_dim, n),
nn.ReLU(),
nn.Linear(n, n),
nn.ReLU(),
nn.Linear(n, out_dim),
)
self.on_policy_reset()
def forward(self, state):
logits = self.linear_relu_stack(state)
return logits
def getting_action(self, state):
mk = torch.distributions.categorical.Categorical(logits=self.forward(state)) # make a probability distribution
action =
|
mk.sample_by_num()
|
pandas.sample
|
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections =
|
Collections.sipna(my_collections)
|
pandas.Series.dropna
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import monkey
from monkey.core.common import is_bool_indexer
from monkey.core.indexing import check_bool_indexer
from monkey.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from monkey.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_monkey, wrap_ukf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _getting_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_mapping(func_name):
def str_op_builder(kf, *args, **kwargs):
str_s = kf.squeeze(axis=1).str
return gettingattr(monkey.Collections.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_mapping(property_name):
"""
Create a function that ctotal_all property of property `dt` of the collections.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A ctotal_allable function to be applied in the partitions
Notes
-----
This applies non-ctotal_allable properties of `Collections.dt`.
"""
def dt_op_builder(kf, *args, **kwargs):
prop_val = gettingattr(kf.squeeze(axis=1).dt, property_name)
if incontainstance(prop_val, monkey.Collections):
return prop_val.to_frame()
elif incontainstance(prop_val, monkey.KnowledgeFrame):
return prop_val
else:
return monkey.KnowledgeFrame([prop_val])
return dt_op_builder
def _dt_func_mapping(func_name):
"""
Create a function that ctotal_all method of property `dt` of the collections.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A ctotal_allable function to be applied in the partitions
Notes
-----
This applies ctotal_allable methods of `Collections.dt`.
"""
def dt_op_builder(kf, *args, **kwargs):
dt_s = kf.squeeze(axis=1).dt
return monkey.KnowledgeFrame(
gettingattr(monkey.Collections.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def clone_kf_for_func(func):
"""
Create a function that copies the knowledgeframe, likely because `func` is inplace.
Parameters
----------
func : ctotal_allable
The function, usutotal_ally umkates a knowledgeframe inplace.
Returns
-------
ctotal_allable
A ctotal_allable function to be applied in the partitions
"""
def ctotal_aller(kf, *args, **kwargs):
kf = kf.clone()
func(kf, *args, **kwargs)
return kf
return ctotal_aller
class MonkeyQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Monkey backend. This logic is specific to Monkey."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_monkey(self, monkey_op, *args, **kwargs):
"""Default to monkey behavior.
Parameters
----------
monkey_op : ctotal_allable
The operation to employ, must be compatible monkey KnowledgeFrame ctotal_all
args
The arguments for the `monkey_op`
kwargs
The keyword arguments for the `monkey_op`
Returns
-------
MonkeyQueryCompiler
The result of the `monkey_op`, converted back to MonkeyQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to monkey.
"""
ErrorMessage.default_to_monkey(str(monkey_op))
args = (a.to_monkey() if incontainstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_monkey if incontainstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = monkey_op(self.to_monkey(), *args, **kwargs)
if incontainstance(result, monkey.Collections):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if incontainstance(result, monkey.KnowledgeFrame):
return self.from_monkey(result, type(self._modin_frame))
else:
return result
def to_monkey(self):
return self._modin_frame.to_monkey()
@classmethod
def from_monkey(cls, kf, data_cls):
return cls(data_cls.from_monkey(kf))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_getting_axis(0), _set_axis(0))
columns = property(_getting_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For clone, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We clone total_all of the metadata
# to prevent that.
def clone(self):
return self.__constructor__(self._modin_frame.clone())
# END Copy
# Append/Concat/Join (Not Merge)
# The adding/concating/join operations should idetotal_ally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# addinging the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a clone of the
# KnowledgeFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexinging
def concating(self, axis, other, **kwargs):
"""Concatenates two objects togettingher.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concating with.
Returns:
Concatenated objects.
"""
if not incontainstance(other, list):
other = [other]
assert total_all(
incontainstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not total_allowed"
sort = kwargs.getting("sort", None)
if sort is None:
sort = False
join = kwargs.getting("join", "outer")
ignore_index = kwargs.getting("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concating(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reseting_index(sip=True)
else:
result.columns = monkey.RangeIndex(length(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin KnowledgeFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
length(arr) != length(self.index) or length(arr[0]) != length(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two KnowledgeFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other KnowledgeFrame
# result in NaN values.
add = BinaryFunction.register(monkey.KnowledgeFrame.add)
combine = BinaryFunction.register(monkey.KnowledgeFrame.combine)
combine_first = BinaryFunction.register(monkey.KnowledgeFrame.combine_first)
eq = BinaryFunction.register(monkey.KnowledgeFrame.eq)
floordivision = BinaryFunction.register(monkey.KnowledgeFrame.floordivision)
ge = BinaryFunction.register(monkey.KnowledgeFrame.ge)
gt = BinaryFunction.register(monkey.KnowledgeFrame.gt)
le = BinaryFunction.register(monkey.KnowledgeFrame.le)
lt = BinaryFunction.register(monkey.KnowledgeFrame.lt)
mod = BinaryFunction.register(monkey.KnowledgeFrame.mod)
mul = BinaryFunction.register(monkey.KnowledgeFrame.mul)
ne = BinaryFunction.register(monkey.KnowledgeFrame.ne)
pow = BinaryFunction.register(monkey.KnowledgeFrame.pow)
rfloordivision = BinaryFunction.register(monkey.KnowledgeFrame.rfloordivision)
rmod = BinaryFunction.register(monkey.KnowledgeFrame.rmod)
rpow = BinaryFunction.register(monkey.KnowledgeFrame.rpow)
rsub = BinaryFunction.register(monkey.KnowledgeFrame.rsub)
rtruedivision = BinaryFunction.register(monkey.KnowledgeFrame.rtruedivision)
sub = BinaryFunction.register(monkey.KnowledgeFrame.sub)
truedivision = BinaryFunction.register(monkey.KnowledgeFrame.truedivision)
__and__ = BinaryFunction.register(monkey.KnowledgeFrame.__and__)
__or__ = BinaryFunction.register(monkey.KnowledgeFrame.__or__)
__rand__ = BinaryFunction.register(monkey.KnowledgeFrame.__rand__)
__ror__ = BinaryFunction.register(monkey.KnowledgeFrame.__ror__)
__rxor__ = BinaryFunction.register(monkey.KnowledgeFrame.__rxor__)
__xor__ = BinaryFunction.register(monkey.KnowledgeFrame.__xor__)
kf_umkate = BinaryFunction.register(
clone_kf_for_func(monkey.KnowledgeFrame.umkate), join_type="left"
)
collections_umkate = BinaryFunction.register(
clone_kf_for_func(
lambda x, y: monkey.Collections.umkate(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with umkated data and index.
"""
assert incontainstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if incontainstance(other, type(self)):
# Note: Currently we are doing this with two mappings across the entire
# data. This can be done with a single mapping, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(kf, new_other, **kwargs):
return kf.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Collections of scalars to be applied based on the condition
# knowledgeframe.
else:
def where_builder_collections(kf, cond):
return kf.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_collections, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def unioner(self, right, **kwargs):
"""
Merge KnowledgeFrame or named Collections objects with a database-style join.
Parameters
----------
right : MonkeyQueryCompiler
The query compiler of the right KnowledgeFrame to unioner with.
Returns
-------
MonkeyQueryCompiler
A new query compiler that contains result of the unioner.
Notes
-----
See mk.unioner or mk.KnowledgeFrame.unioner for more info on kwargs.
"""
how = kwargs.getting("how", "inner")
on = kwargs.getting("on", None)
left_on = kwargs.getting("left_on", None)
right_on = kwargs.getting("right_on", None)
left_index = kwargs.getting("left_index", False)
right_index = kwargs.getting("right_index", False)
sort = kwargs.getting("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_monkey()
kwargs["sort"] = False
def mapping_func(left, right=right, kwargs=kwargs):
return monkey.unioner(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._employ_full_axis(1, mapping_func)
)
is_reseting_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reseting_index = (
False
if whatever(o in new_self.index.names for o in left_on)
and whatever(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.adding(right_on))
if is_reseting_index
else new_self.sorting_index(axis=0, level=left_on.adding(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reseting_index = not whatever(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reseting_index
else new_self.sorting_index(axis=0, level=on)
)
return new_self.reseting_index(sip=True) if is_reseting_index else new_self
else:
return self.default_to_monkey(monkey.KnowledgeFrame.unioner, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another KnowledgeFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right KnowledgeFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See mk.KnowledgeFrame.join for more info on kwargs.
"""
on = kwargs.getting("on", None)
how = kwargs.getting("how", "left")
sort = kwargs.getting("sort", False)
if how in ["left", "inner"]:
right = right.to_monkey()
def mapping_func(left, right=right, kwargs=kwargs):
return monkey.KnowledgeFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._employ_full_axis(1, mapping_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_monkey(monkey.KnowledgeFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reseting_index (may shuffle data)
def reindexing(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to targetting the reindexing on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with umkated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.reindexing(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reseting_index(self, **kwargs):
"""Removes total_all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with umkated data and reset index.
"""
sip = kwargs.getting("sip", False)
level = kwargs.getting("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_monkey(monkey.KnowledgeFrame.reseting_index, **kwargs)
if not sip:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.clone()
new_self.index = monkey.RangeIndex(length(new_self.index))
return new_self
# END Reindex/reseting_index
# Transpose
# For transpose, we aren't going to immediately clone everything. Since the
# actual transpose operation is very fast, we will just do it before whatever
# operation that gettings ctotal_alled on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants astotal_sume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be ctotal_alled for QueryCompilers representing a Collections object,
i.e. self.is_collections_like() should be True.
Returns
-------
MonkeyQueryCompiler
Transposed new QueryCompiler or self.
"""
if length(self.columns) != 1 or (
length(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_collections_like(self):
"""Return True if QueryCompiler has a single column or row"""
return length(self.columns) == 1 or length(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda kf: kf.is_monotonic_increasing,
"decreasing": lambda kf: kf.is_monotonic_decreasing,
}
monotonic_fn = funcs.getting(func_type, funcs["increasing"])
def is_monotonic_mapping(kf):
kf = kf.squeeze(axis=1)
return [monotonic_fn(kf), kf.iloc[0], kf.iloc[length(kf) - 1]]
def is_monotonic_reduce(kf):
kf = kf.squeeze(axis=1)
common_case = kf[0].total_all()
left_edges = kf[1]
right_edges = kf[2]
edges_list = []
for i in range(length(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(monkey.Collections(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_mapping, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(monkey.KnowledgeFrame.count, monkey.KnowledgeFrame.total_sum)
getting_max = MapReduceFunction.register(monkey.KnowledgeFrame.getting_max, monkey.KnowledgeFrame.getting_max)
getting_min = MapReduceFunction.register(monkey.KnowledgeFrame.getting_min, monkey.KnowledgeFrame.getting_min)
total_sum = MapReduceFunction.register(monkey.KnowledgeFrame.total_sum, monkey.KnowledgeFrame.total_sum)
prod = MapReduceFunction.register(monkey.KnowledgeFrame.prod, monkey.KnowledgeFrame.prod)
whatever = MapReduceFunction.register(monkey.KnowledgeFrame.whatever, monkey.KnowledgeFrame.whatever)
total_all = MapReduceFunction.register(monkey.KnowledgeFrame.total_all, monkey.KnowledgeFrame.total_all)
memory_usage = MapReduceFunction.register(
monkey.KnowledgeFrame.memory_usage,
lambda x, *args, **kwargs: monkey.KnowledgeFrame.total_sum(x),
axis=0,
)
average = MapReduceFunction.register(
lambda kf, **kwargs: kf.employ(
lambda x: (x.total_sum(skipna=kwargs.getting("skipna", True)), x.count()),
axis=kwargs.getting("axis", 0),
result_type="reduce",
).set_axis(kf.axes[kwargs.getting("axis", 0) ^ 1], axis=0),
lambda kf, **kwargs: kf.employ(
lambda x: x.employ(lambda d: d[0]).total_sum(skipna=kwargs.getting("skipna", True))
/ x.employ(lambda d: d[1]).total_sum(skipna=kwargs.getting("skipna", True)),
axis=kwargs.getting("axis", 0),
).set_axis(kf.axes[kwargs.getting("axis", 0) ^ 1], axis=0),
)
def counts_value_num(self, **kwargs):
"""
Return a QueryCompiler of Collections containing counts of distinctive values.
Returns
-------
MonkeyQueryCompiler
"""
if kwargs.getting("bins", None) is not None:
new_modin_frame = self._modin_frame._employ_full_axis(
0, lambda kf: kf.squeeze(axis=1).counts_value_num(**kwargs)
)
return self.__constructor__(new_modin_frame)
def mapping_func(kf, *args, **kwargs):
return kf.squeeze(axis=1).counts_value_num(**kwargs)
def reduce_func(kf, *args, **kwargs):
normalize = kwargs.getting("normalize", False)
sort = kwargs.getting("sort", True)
ascending = kwargs.getting("ascending", False)
sipna = kwargs.getting("sipna", True)
try:
result = kf.squeeze(axis=1).grouper(kf.index, sort=False).total_sum()
# This will happen with Arrow buffer read-only errors. We don't want to clone
# total_all the time, so this will try to fast-path the code first.
except (ValueError):
result = kf.clone().squeeze(axis=1).grouper(kf.index, sort=False).total_sum()
if not sipna and np.nan in kf.index:
result = result.adding(
monkey.Collections(
[kf.squeeze(axis=1).loc[[np.nan]].total_sum()], index=[np.nan]
)
)
if normalize:
result = result / kf.squeeze(axis=1).total_sum()
result = result.sort_the_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sorting_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : monkey.Collections or monkey.KnowledgeFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
monkey.KnowledgeFrame
A new KnowledgeFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(length(result), dtype=type(result.index))
while i < length(result):
j = i
if i < length(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == length(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return monkey.KnowledgeFrame(result, index=new_index)
return sorting_index_for_equal_values(result, ascending)
return MapReduceFunction.register(mapping_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxgetting_max = ReductionFunction.register(monkey.KnowledgeFrame.idxgetting_max)
idxgetting_min = ReductionFunction.register(monkey.KnowledgeFrame.idxgetting_min)
median = ReductionFunction.register(monkey.KnowledgeFrame.median)
ndistinctive = ReductionFunction.register(monkey.KnowledgeFrame.ndistinctive)
skew = ReductionFunction.register(monkey.KnowledgeFrame.skew)
kurt = ReductionFunction.register(monkey.KnowledgeFrame.kurt)
sem = ReductionFunction.register(monkey.KnowledgeFrame.sem)
standard = ReductionFunction.register(monkey.KnowledgeFrame.standard)
var = ReductionFunction.register(monkey.KnowledgeFrame.var)
total_sum_getting_min_count = ReductionFunction.register(monkey.KnowledgeFrame.total_sum)
prod_getting_min_count = ReductionFunction.register(monkey.KnowledgeFrame.prod)
quantile_for_single_value = ReductionFunction.register(monkey.KnowledgeFrame.quantile)
mad = ReductionFunction.register(monkey.KnowledgeFrame.mad)
convert_datetime = ReductionFunction.register(
lambda kf, *args, **kwargs: monkey.convert_datetime(
kf.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_by_num_func(
self, resample_by_num_args, func_name, new_columns=None, kf_op=None, *args, **kwargs
):
def mapping_func(kf, resample_by_num_args=resample_by_num_args):
if kf_op is not None:
kf = kf_op(kf)
resample_by_numd_val = kf.resample_by_num(*resample_by_num_args)
op = gettingattr(monkey.core.resample_by_num.Resample_by_numr, func_name)
if ctotal_allable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to clone
# total_all the time, so this will try to fast-path the code first.
val = op(resample_by_numd_val, *args, **kwargs)
except (ValueError):
resample_by_numd_val = kf.clone().resample_by_num(*resample_by_num_args)
val = op(resample_by_numd_val, *args, **kwargs)
else:
val = gettingattr(resample_by_numd_val, func_name)
if incontainstance(val, monkey.Collections):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._employ_full_axis(
axis=0, func=mapping_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_by_num_getting_group(self, resample_by_num_args, name, obj):
return self._resample_by_num_func(resample_by_num_args, "getting_group", name=name, obj=obj)
def resample_by_num_app_ser(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args,
"employ",
kf_op=lambda kf: kf.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_by_num_app_kf(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "employ", func=func, *args, **kwargs)
def resample_by_num_agg_ser(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args,
"aggregate",
kf_op=lambda kf: kf.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_by_num_agg_kf(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "aggregate", func=func, *args, **kwargs
)
def resample_by_num_transform(self, resample_by_num_args, arg, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "transform", arg=arg, *args, **kwargs)
def resample_by_num_pipe(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "pipe", func=func, *args, **kwargs)
def resample_by_num_ffill(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "ffill", limit=limit)
def resample_by_num_backfill(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "backfill", limit=limit)
def resample_by_num_bfill(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "bfill", limit=limit)
def resample_by_num_pad(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "pad", limit=limit)
def resample_by_num_nearest(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "nearest", limit=limit)
def resample_by_num_fillnone(self, resample_by_num_args, method, limit):
return self._resample_by_num_func(resample_by_num_args, "fillnone", method=method, limit=limit)
def resample_by_num_asfreq(self, resample_by_num_args, fill_value):
return self._resample_by_num_func(resample_by_num_args, "asfreq", fill_value=fill_value)
def resample_by_num_interpolate(
self,
resample_by_num_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_by_num_func(
resample_by_num_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_by_num_count(self, resample_by_num_args):
return self._resample_by_num_func(resample_by_num_args, "count")
def resample_by_num_ndistinctive(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "ndistinctive", _method=_method, *args, **kwargs
)
def resample_by_num_first(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "first", _method=_method, *args, **kwargs
)
def resample_by_num_final_item(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "final_item", _method=_method, *args, **kwargs
)
def resample_by_num_getting_max(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "getting_max", _method=_method, *args, **kwargs
)
def resample_by_num_average(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "median", _method=_method, *args, **kwargs
)
def resample_by_num_median(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "median", _method=_method, *args, **kwargs
)
def resample_by_num_getting_min(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "getting_min", _method=_method, *args, **kwargs
)
def resample_by_num_ohlc_ser(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args,
"ohlc",
kf_op=lambda kf: kf.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_by_num_ohlc_kf(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_by_num_prod(self, resample_by_num_args, _method, getting_min_count, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "prod", _method=_method, getting_min_count=getting_min_count, *args, **kwargs
)
def resample_by_num_size(self, resample_by_num_args):
return self._resample_by_num_func(resample_by_num_args, "size", new_columns=["__reduced__"])
def resample_by_num_sem(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "sem", _method=_method, *args, **kwargs
)
def resample_by_num_standard(self, resample_by_num_args, ddof, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "standard", ddof=ddof, *args, **kwargs)
def resample_by_num_total_sum(self, resample_by_num_args, _method, getting_min_count, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "total_sum", _method=_method, getting_min_count=getting_min_count, *args, **kwargs
)
def resample_by_num_var(self, resample_by_num_args, ddof, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "var", ddof=ddof, *args, **kwargs)
def resample_by_num_quantile(self, resample_by_num_args, q, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "quantile", q=q, **kwargs)
window_average = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).average(*args, **kwargs)
)
)
window_total_sum = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).total_sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_standard = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).standard(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda kf, rolling_args: monkey.KnowledgeFrame(kf.rolling(*rolling_args).count())
)
rolling_total_sum = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).total_sum(*args, **kwargs)
)
)
rolling_average = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).average(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda kf, rolling_args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_standard = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).standard(ddof=ddof, *args, **kwargs)
)
)
rolling_getting_min = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).getting_min(*args, **kwargs)
)
)
rolling_getting_max = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).getting_max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda kf, rolling_args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda kf, rolling_args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_employ = FoldFunction.register(
lambda kf, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).employ(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda kf, rolling_args, quantile, interpolation, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if length(self.columns) > 1:
return self.default_to_monkey(
lambda kf: monkey.KnowledgeFrame.rolling(kf, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda kf: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if length(self.columns) > 1:
return self.default_to_monkey(
lambda kf: monkey.KnowledgeFrame.rolling(kf, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda kf: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._employ_full_axis(
0,
lambda kf: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not incontainstance(self.index, monkey.MultiIndex) or (
incontainstance(self.index, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindexing = True
else:
axis = 0
new_columns = None
need_reindexing = False
def mapping_func(kf):
return monkey.KnowledgeFrame(kf.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not incontainstance(calc_index, monkey.MultiIndex):
return True
actual_length = 1
for lvl in calc_index.levels:
actual_length *= length(lvl)
return length(self.index) * length(self.columns) == actual_length * length(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_total_all_multi_list = False
if (
incontainstance(self.index, monkey.MultiIndex)
and incontainstance(self.columns, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_total_all_multi_list = True
real_cols_bkp = self.columns
obj = self.clone()
obj.columns = np.arange(length(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._employ_full_axis(
axis, mapping_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def getting_distinctive_level_values(index):
return [
index.getting_level_values(lvl).distinctive()
for lvl in np.arange(index.nlevels)
]
new_index = (
getting_distinctive_level_values(index)
if consider_index
else index
if incontainstance(index, list)
else [index]
)
new_columns = (
getting_distinctive_level_values(columns) if consider_columns else [columns]
)
return monkey.MultiIndex.from_product([*new_columns, *new_index])
if is_total_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sorting_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindexing:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = incontainstance(self.index, monkey.MultiIndex)
is_recompute_columns = not is_recompute_index and incontainstance(
self.columns, monkey.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if incontainstance(self.columns, monkey.MultiIndex) or not incontainstance(
self.index, monkey.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and incontainstance(self.index, monkey.MultiIndex)
else self.index
)
index = monkey.MultiIndex.from_tuples(
list(index) * length(self.columns)
)
columns = self.columns.repeat(length(self.index))
index_levels = [
index.getting_level_values(i) for i in range(index.nlevels)
]
new_index = monkey.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindexing(0, new_index)
return result
def stack(self, level, sipna):
if not incontainstance(self.columns, monkey.MultiIndex) or (
incontainstance(self.columns, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.columns.nlevels
):
new_columns = ["__reduced__"]
else:
new_columns = None
new_modin_frame = self._modin_frame._employ_full_axis(
1,
lambda kf: monkey.KnowledgeFrame(kf.stack(level=level, sipna=sipna)),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that employ a function to every partition.
abs = MapFunction.register(monkey.KnowledgeFrame.abs, dtypes="clone")
employmapping = MapFunction.register(monkey.KnowledgeFrame.employmapping)
conj = MapFunction.register(
lambda kf, *args, **kwargs: monkey.KnowledgeFrame(np.conj(kf))
)
invert = MapFunction.register(monkey.KnowledgeFrame.__invert__)
incontain = MapFunction.register(monkey.KnowledgeFrame.incontain, dtypes=np.bool)
ifna = MapFunction.register(monkey.KnowledgeFrame.ifna, dtypes=np.bool)
negative = MapFunction.register(monkey.KnowledgeFrame.__neg__)
notna = MapFunction.register(monkey.KnowledgeFrame.notna, dtypes=np.bool)
value_round = MapFunction.register(monkey.KnowledgeFrame.value_round)
replacing = MapFunction.register(monkey.KnowledgeFrame.replacing)
collections_view = MapFunction.register(
lambda kf, *args, **kwargs: monkey.KnowledgeFrame(
kf.squeeze(axis=1).view(*args, **kwargs)
)
)
to_num = MapFunction.register(
lambda kf, *args, **kwargs: monkey.KnowledgeFrame(
monkey.to_num(kf.squeeze(axis=1), *args, **kwargs)
)
)
def repeat(self, repeats):
def mapping_fn(kf):
return monkey.KnowledgeFrame(kf.squeeze(axis=1).repeat(repeats))
if incontainstance(repeats, int) or (is_list_like(repeats) and length(repeats) == 1):
return MapFunction.register(mapping_fn, validate_index=True)(self)
else:
return self.__constructor__(self._modin_frame._employ_full_axis(0, mapping_fn))
# END Map partitions operations
# String mapping partitions operations
str_capitalize = MapFunction.register(_str_mapping("capitalize"), dtypes="clone")
str_center = MapFunction.register(_str_mapping("center"), dtypes="clone")
str_contains = MapFunction.register(_str_mapping("contains"), dtypes=np.bool)
str_count = MapFunction.register(_str_mapping("count"), dtypes=int)
str_endswith = MapFunction.register(_str_mapping("endswith"), dtypes=np.bool)
str_find = MapFunction.register(_str_mapping("find"), dtypes="clone")
str_findtotal_all = MapFunction.register(_str_mapping("findtotal_all"), dtypes="clone")
str_getting = MapFunction.register(_str_mapping("getting"), dtypes="clone")
str_index = MapFunction.register(_str_mapping("index"), dtypes="clone")
str_isalnum = MapFunction.register(_str_mapping("isalnum"), dtypes=np.bool)
str_isalpha = MapFunction.register(_str_mapping("isalpha"), dtypes=np.bool)
str_isdecimal = MapFunction.register(_str_mapping("isdecimal"), dtypes=np.bool)
str_isdigit = MapFunction.register(_str_mapping("isdigit"), dtypes=np.bool)
str_islower = MapFunction.register(_str_mapping("islower"), dtypes=np.bool)
str_isnumeric = MapFunction.register(_str_mapping("isnumeric"), dtypes=np.bool)
str_isspace = MapFunction.register(_str_mapping("isspace"), dtypes=np.bool)
str_istitle = MapFunction.register(_str_mapping("istitle"), dtypes=np.bool)
str_isupper = MapFunction.register(_str_mapping("isupper"), dtypes=np.bool)
str_join = MapFunction.register(_str_mapping("join"), dtypes="clone")
str_length = MapFunction.register(_str_mapping("length"), dtypes=int)
str_ljust = MapFunction.register(_str_mapping("ljust"), dtypes="clone")
str_lower = MapFunction.register(_str_mapping("lower"), dtypes="clone")
str_lstrip = MapFunction.register(_str_mapping("lstrip"), dtypes="clone")
str_match = MapFunction.register(_str_mapping("match"), dtypes="clone")
str_normalize = MapFunction.register(_str_mapping("normalize"), dtypes="clone")
str_pad = MapFunction.register(_str_mapping("pad"), dtypes="clone")
str_partition = MapFunction.register(_str_mapping("partition"), dtypes="clone")
str_repeat = MapFunction.register(_str_mapping("repeat"), dtypes="clone")
str_replacing = MapFunction.register(_str_mapping("replacing"), dtypes="clone")
str_rfind = MapFunction.register(_str_mapping("rfind"), dtypes="clone")
str_rindex = MapFunction.register(_str_mapping("rindex"), dtypes="clone")
str_rjust = MapFunction.register(_str_mapping("rjust"), dtypes="clone")
str_rpartition = MapFunction.register(_str_mapping("rpartition"), dtypes="clone")
str_rsplit = MapFunction.register(_str_mapping("rsplit"), dtypes="clone")
str_rstrip = MapFunction.register(_str_mapping("rstrip"), dtypes="clone")
str_slice = MapFunction.register(_str_mapping("slice"), dtypes="clone")
str_slice_replacing = MapFunction.register(_str_mapping("slice_replacing"), dtypes="clone")
str_split = MapFunction.register(_str_mapping("split"), dtypes="clone")
str_startswith = MapFunction.register(_str_mapping("startswith"), dtypes=np.bool)
str_strip = MapFunction.register(_str_mapping("strip"), dtypes="clone")
str_swapcase = MapFunction.register(_str_mapping("swapcase"), dtypes="clone")
str_title = MapFunction.register(_str_mapping("title"), dtypes="clone")
str_translate = MapFunction.register(_str_mapping("translate"), dtypes="clone")
str_upper = MapFunction.register(_str_mapping("upper"), dtypes="clone")
str_wrap = MapFunction.register(_str_mapping("wrap"), dtypes="clone")
str_zfill = MapFunction.register(_str_mapping("zfill"), dtypes="clone")
# END String mapping partitions operations
def distinctive(self):
"""Return distinctive values of Collections object.
Returns
-------
ndarray
The distinctive values returned as a NumPy array.
"""
new_modin_frame = self._modin_frame._employ_full_axis(
0,
lambda x: x.squeeze(axis=1).distinctive(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame)
def searchsorted(self, **kwargs):
"""
Return a QueryCompiler with value/values indicies, which they should be inserted
to maintain order of the passed Collections.
Returns
-------
MonkeyQueryCompiler
"""
def mapping_func(part, *args, **kwargs):
elements_number = length(part.index)
assert elements_number > 0, "Wrong mappingping behaviour of MapReduce"
# unify value type
value = kwargs.pop("value")
value = np.array([value]) if is_scalar(value) else value
if elements_number == 1:
part = part[part.columns[0]]
else:
part = part.squeeze()
part_index_start = part.index.start
part_index_stop = part.index.stop
result = part.searchsorted(value=value, *args, **kwargs)
processed_results = {}
value_number = 0
for value_result in result:
value_result += part_index_start
if value_result > part_index_start and value_result < part_index_stop:
processed_results[f"value{value_number}"] = {
"relative_location": "current_partition",
"index": value_result,
}
elif value_result <= part_index_start:
processed_results[f"value{value_number}"] = {
"relative_location": "previoius_partitions",
"index": part_index_start,
}
else:
processed_results[f"value{value_number}"] = {
"relative_location": "next_partitions",
"index": part_index_stop,
}
value_number += 1
return monkey.KnowledgeFrame(processed_results)
def reduce_func(mapping_results, *args, **kwargs):
def getting_value_index(value_result):
value_result_grouped = value_result.grouper(level=0)
rel_location = value_result_grouped.getting_group("relative_location")
ind = value_result_grouped.getting_group("index")
# executes if result is inside of the mappingped part
if "current_partition" in rel_location.values:
assert (
rel_location[rel_location == "current_partition"].count() == 1
), "Each value should have single result"
return ind[rel_location.values == "current_partition"]
# executes if result is between mappingped parts
elif rel_location.ndistinctive(sipna=False) > 1:
return ind[rel_location.values == "previoius_partitions"][0]
# executes if result is outside of the mappingped part
else:
if "next_partitions" in rel_location.values:
return ind[-1]
else:
return ind[0]
mapping_results_parsed = mapping_results.employ(
lambda ser: getting_value_index(ser)
).squeeze()
if incontainstance(mapping_results_parsed, monkey.Collections):
mapping_results_parsed = mapping_results_parsed.to_list()
return monkey.Collections(mapping_results_parsed)
return MapReduceFunction.register(mapping_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# Dt mapping partitions operations
dt_date = MapFunction.register(_dt_prop_mapping("date"))
dt_time = MapFunction.register(_dt_prop_mapping("time"))
dt_timetz = MapFunction.register(_dt_prop_mapping("timetz"))
dt_year = MapFunction.register(_dt_prop_mapping("year"))
dt_month = MapFunction.register(_dt_prop_mapping("month"))
dt_day = MapFunction.register(_dt_prop_mapping("day"))
dt_hour = MapFunction.register(_dt_prop_mapping("hour"))
dt_getting_minute = MapFunction.register(_dt_prop_mapping("getting_minute"))
dt_second = MapFunction.register(_dt_prop_mapping("second"))
dt_microsecond = MapFunction.register(_dt_prop_mapping("microsecond"))
dt_nanosecond = MapFunction.register(_dt_prop_mapping("nanosecond"))
dt_week = MapFunction.register(_dt_prop_mapping("week"))
dt_weekofyear = MapFunction.register(_dt_prop_mapping("weekofyear"))
dt_dayofweek = MapFunction.register(_dt_prop_mapping("dayofweek"))
dt_weekday = MapFunction.register(_dt_prop_mapping("weekday"))
dt_dayofyear = MapFunction.register(_dt_prop_mapping("dayofyear"))
dt_quarter = MapFunction.register(_dt_prop_mapping("quarter"))
dt_is_month_start = MapFunction.register(_dt_prop_mapping("is_month_start"))
dt_is_month_end = MapFunction.register(_dt_prop_mapping("is_month_end"))
dt_is_quarter_start = MapFunction.register(_dt_prop_mapping("is_quarter_start"))
dt_is_quarter_end = MapFunction.register(_dt_prop_mapping("is_quarter_end"))
dt_is_year_start = MapFunction.register(_dt_prop_mapping("is_year_start"))
dt_is_year_end = MapFunction.register(_dt_prop_mapping("is_year_end"))
dt_is_leap_year = MapFunction.register(_dt_prop_mapping("is_leap_year"))
dt_daysinmonth = MapFunction.register(_dt_prop_mapping("daysinmonth"))
dt_days_in_month = MapFunction.register(_dt_prop_mapping("days_in_month"))
dt_tz = MapReduceFunction.register(
_dt_prop_mapping("tz"), lambda kf: monkey.KnowledgeFrame(kf.iloc[0]), axis=0
)
dt_freq = MapReduceFunction.register(
_dt_prop_mapping("freq"), lambda kf: monkey.KnowledgeFrame(kf.iloc[0]), axis=0
)
dt_to_period = MapFunction.register(_dt_func_mapping("to_period"))
dt_convert_pydatetime = MapFunction.register(_dt_func_mapping("convert_pydatetime"))
dt_tz_localize = MapFunction.register(_dt_func_mapping("tz_localize"))
dt_tz_convert = MapFunction.register(_dt_func_mapping("tz_convert"))
dt_normalize = MapFunction.register(_dt_func_mapping("normalize"))
dt_strftime = MapFunction.register(_dt_func_mapping("strftime"))
dt_value_round = MapFunction.register(_dt_func_mapping("value_round"))
dt_floor = MapFunction.register(_dt_func_mapping("floor"))
dt_ceiling = MapFunction.register(_dt_func_mapping("ceiling"))
dt_month_name = MapFunction.register(_dt_func_mapping("month_name"))
dt_day_name = MapFunction.register(_dt_func_mapping("day_name"))
dt_to_pytimedelta = MapFunction.register(_dt_func_mapping("to_pytimedelta"))
dt_total_seconds = MapFunction.register(_dt_func_mapping("total_seconds"))
dt_seconds = MapFunction.register(_dt_prop_mapping("seconds"))
dt_days = MapFunction.register(_dt_prop_mapping("days"))
dt_microseconds = MapFunction.register(_dt_prop_mapping("microseconds"))
dt_nanoseconds = MapFunction.register(_dt_prop_mapping("nanoseconds"))
dt_components = MapFunction.register(
_dt_prop_mapping("components"), validate_columns=True
)
dt_qyear = MapFunction.register(_dt_prop_mapping("qyear"))
dt_start_time = MapFunction.register(_dt_prop_mapping("start_time"))
dt_end_time = MapFunction.register(_dt_prop_mapping("end_time"))
dt_to_timestamp = MapFunction.register(_dt_func_mapping("to_timestamp"))
# END Dt mapping partitions operations
def totype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
KnowledgeFrame with umkated dtypes.
"""
return self.__constructor__(self._modin_frame.totype(col_dtypes))
# Column/Row partitions reduce operations
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
def first_valid_index_builder(kf):
return kf.set_axis(
monkey.RangeIndex(length(kf.index)), axis="index", inplace=False
).employ(lambda kf: kf.first_valid_index())
# We getting the getting_minimum from each column, then take the getting_min of that to getting
# first_valid_index. The `to_monkey()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, first_valid_index_builder)
)
.getting_min(axis=1)
.to_monkey()
.squeeze()
)
return self.index[first_result]
def final_item_valid_index(self):
"""Returns index of final_item non-NaN/NULL value.
Return:
Scalar of index name.
"""
def final_item_valid_index_builder(kf):
return kf.set_axis(
monkey.RangeIndex(length(kf.index)), axis="index", inplace=False
).employ(lambda kf: kf.final_item_valid_index())
# We getting the getting_maximum from each column, then take the getting_max of that to getting
# final_item_valid_index. The `to_monkey()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, final_item_valid_index_builder)
)
.getting_max(axis=1)
.to_monkey()
.squeeze()
)
return self.index[first_result]
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
KnowledgeFrame object containing the descriptive statistics of the KnowledgeFrame.
"""
# Use monkey to calculate the correct columns
empty_kf = (
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.describe(**kwargs)
)
def describe_builder(kf, internal_indices=[]):
return kf.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._employ_full_axis_select_indices(
0,
describe_builder,
empty_kf.columns,
new_index=empty_kf.index,
new_columns=empty_kf.columns,
)
)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This averages that we have to put total_all of that
# data in the same place.
cumgetting_max = FoldFunction.register(monkey.KnowledgeFrame.cumgetting_max)
cumgetting_min = FoldFunction.register(monkey.KnowledgeFrame.cumgetting_min)
cumtotal_sum = FoldFunction.register(monkey.KnowledgeFrame.cumtotal_sum)
cumprod = FoldFunction.register(monkey.KnowledgeFrame.cumprod)
diff = FoldFunction.register(monkey.KnowledgeFrame.diff)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.getting("axis", 0)
if is_list_like(lower) or is_list_like(upper):
new_modin_frame = self._modin_frame._fold(
axis, lambda kf: kf.clip(**kwargs)
)
else:
new_modin_frame = self._modin_frame._mapping(lambda kf: kf.clip(**kwargs))
return self.__constructor__(new_modin_frame)
def dot(self, other, squeeze_self=None, squeeze_other=None):
"""
Computes the matrix multiplication of self and other.
Parameters
----------
other : MonkeyQueryCompiler or NumPy array
The other query compiler or NumPy array to matrix multiply with self.
squeeze_self : boolean
The flag to squeeze self.
squeeze_other : boolean
The flag to squeeze other (this flag is applied if other is query compiler).
Returns
-------
MonkeyQueryCompiler
A new query compiler that contains result of the matrix multiply.
"""
if incontainstance(other, MonkeyQueryCompiler):
other = (
other.to_monkey().squeeze(axis=1)
if squeeze_other
else other.to_monkey()
)
def mapping_func(kf, other=other, squeeze_self=squeeze_self):
result = kf.squeeze(axis=1).dot(other) if squeeze_self else kf.dot(other)
if is_list_like(result):
return monkey.KnowledgeFrame(result)
else:
return monkey.KnowledgeFrame([result])
num_cols = other.shape[1] if length(other.shape) > 1 else 1
if length(self.columns) == 1:
new_index = (
["__reduced__"]
if (length(self.index) == 1 or squeeze_self) and num_cols == 1
else None
)
new_columns = ["__reduced__"] if squeeze_self and num_cols == 1 else None
axis = 0
else:
new_index = self.index
new_columns = ["__reduced__"] if num_cols == 1 else None
axis = 1
new_modin_frame = self._modin_frame._employ_full_axis(
axis, mapping_func, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def _nsort(self, n, columns=None, keep="first", sort_type="nsmtotal_allest"):
def mapping_func(kf, n=n, keep=keep, columns=columns):
if columns is None:
return monkey.KnowledgeFrame(
gettingattr(monkey.Collections, sort_type)(
kf.squeeze(axis=1), n=n, keep=keep
)
)
return gettingattr(monkey.KnowledgeFrame, sort_type)(
kf, n=n, columns=columns, keep=keep
)
if columns is None:
new_columns = ["__reduced__"]
else:
new_columns = self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis=0, func=mapping_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def nsmtotal_allest(self, *args, **kwargs):
return self._nsort(sort_type="nsmtotal_allest", *args, **kwargs)
def nbiggest(self, *args, **kwargs):
return self._nsort(sort_type="nbiggest", *args, **kwargs)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after employing expr.
"""
# Make a clone of columns and eval on the clone to detergetting_mine if result type is
# collections or not
empty_eval = (
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.eval(expr, inplace=False, **kwargs)
)
if incontainstance(empty_eval, monkey.Collections):
new_columns = (
[empty_eval.name] if empty_eval.name is not None else ["__reduced__"]
)
else:
new_columns = empty_eval.columns
new_modin_frame = self._modin_frame._employ_full_axis(
1,
lambda kf: monkey.KnowledgeFrame(kf.eval(expr, inplace=False, **kwargs)),
new_index=self.index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.getting("axis", 0)
def mode_builder(kf):
result = monkey.KnowledgeFrame(kf.mode(**kwargs))
# We return a knowledgeframe with the same shape as the input to ensure
# that total_all the partitions will be the same shape
if axis == 0 and length(kf) != length(result):
# Pad rows
result = result.reindexing(index=monkey.RangeIndex(length(kf.index)))
elif axis == 1 and length(kf.columns) != length(result.columns):
# Pad columns
result = result.reindexing(columns=monkey.RangeIndex(length(kf.columns)))
return monkey.KnowledgeFrame(result)
if axis == 0:
new_index = monkey.RangeIndex(length(self.index))
new_columns = self.columns
else:
new_index = self.index
new_columns = monkey.RangeIndex(length(self.columns))
new_modin_frame = self._modin_frame._employ_full_axis(
axis, mode_builder, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame).sipna(axis=axis, how="total_all")
def fillnone(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.getting("axis", 0)
value = kwargs.getting("value")
method = kwargs.getting("method", None)
limit = kwargs.getting("limit", None)
full_axis = method is not None or limit is not None
if incontainstance(value, dict):
kwargs.pop("value")
def fillnone(kf):
func_dict = {c: value[c] for c in value if c in kf.columns}
return kf.fillnone(value=func_dict, **kwargs)
else:
def fillnone(kf):
return kf.fillnone(**kwargs)
if full_axis:
new_modin_frame = self._modin_frame._fold(axis, fillnone)
else:
new_modin_frame = self._modin_frame._mapping(fillnone)
return self.__constructor__(new_modin_frame)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
axis = kwargs.getting("axis", 0)
q = kwargs.getting("q")
numeric_only = kwargs.getting("numeric_only", True)
assert incontainstance(q, (monkey.Collections, np.ndarray, monkey.Index, list))
if numeric_only:
new_columns = self._modin_frame._numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis == 1:
query_compiler = self.gettingitem_column_array(new_columns)
new_columns = self.index
else:
query_compiler = self
def quantile_builder(kf, **kwargs):
result = kf.quantile(**kwargs)
return result.T if kwargs.getting("axis", 0) == 1 else result
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_monkey`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basictotal_ally we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = monkey.Float64Index(q)
else:
q_index = monkey.Float64Index(q)
new_modin_frame = query_compiler._modin_frame._employ_full_axis(
axis,
lambda kf: quantile_builder(kf, **kwargs),
new_index=q_index,
new_columns=new_columns,
dtypes=np.float64,
)
result = self.__constructor__(new_modin_frame)
return result.transpose() if axis == 1 else result
def query(self, expr, **kwargs):
"""Query columns of the QueryCompiler with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
QueryCompiler containing the rows where the boolean expression is satisfied.
"""
def query_builder(kf, **kwargs):
return kf.query(expr, inplace=False, **kwargs)
return self.__constructor__(
self._modin_frame.filter_full_axis(1, query_builder)
)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
QueryCompiler containing the ranks of the values along an axis.
"""
axis = kwargs.getting("axis", 0)
numeric_only = True if axis else kwargs.getting("numeric_only", False)
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.rank(**kwargs),
new_index=self.index,
new_columns=self.columns if not numeric_only else None,
dtypes=np.float64,
)
return self.__constructor__(new_modin_frame)
def sorting_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
level = kwargs.pop("level", None)
sort_remaining = kwargs.pop("sort_remaining", True)
kwargs["inplace"] = False
if level is not None or self.has_multiindex(axis=axis):
return self.default_to_monkey(
monkey.KnowledgeFrame.sorting_index,
axis=axis,
level=level,
sort_remaining=sort_remaining,
**kwargs,
)
# sorting_index can have ascending be None and behaves as if it is False.
# sort_the_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_the_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
if axis:
new_columns = monkey.Collections(self.columns).sort_the_values(**kwargs)
new_index = self.index
else:
new_index = monkey.Collections(self.index).sort_the_values(**kwargs)
new_columns = self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.sorting_index(
axis=axis, level=level, sort_remaining=sort_remaining, **kwargs
),
new_index,
new_columns,
dtypes="clone" if axis == 0 else None,
)
return self.__constructor__(new_modin_frame)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
ErrorMessage.missmatch_with_monkey(
operation="melt", message="Order of rows could be different from monkey"
)
if var_name is None:
var_name = "variable"
def _convert_to_list(x):
if is_list_like(x):
x = [*x]
elif x is not None:
x = [x]
else:
x = []
return x
id_vars, value_vars = mapping(_convert_to_list, [id_vars, value_vars])
if length(value_vars) == 0:
value_vars = self.columns.sip(id_vars)
if length(id_vars) != 0:
to_broadcast = self.gettingitem_column_array(id_vars)._modin_frame
else:
to_broadcast = None
def employier(kf, internal_indices, other=[], internal_other_indices=[]):
if length(other):
other = monkey.concating(other, axis=1)
columns_to_add = other.columns.difference(kf.columns)
kf = monkey.concating([kf, other[columns_to_add]], axis=1)
return kf.melt(
id_vars=id_vars,
value_vars=kf.columns[internal_indices],
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# we have no able to calculate correct indices here, so making it `dummy_index`
inconsistent_frame = self._modin_frame.broadcast_employ_select_indices(
axis=0,
employ_indices=value_vars,
func=employier,
other=to_broadcast,
new_index=["dummy_index"] * length(id_vars),
new_columns=["dummy_index"] * length(id_vars),
)
# after employing `melt` for selected indices we will getting partitions like this:
# id_vars vars value | id_vars vars value
# 0 foo col3 1 | 0 foo col5 a so stacking it into
# 1 fiz col3 2 | 1 fiz col5 b `new_parts` to getting
# 2 bar col3 3 | 2 bar col5 c correct answer
# 3 zoo col3 4 | 3 zoo col5 d
new_parts = np.array(
[np.array([x]) for x in np.concatingenate(inconsistent_frame._partitions.T)]
)
new_index = monkey.RangeIndex(length(self.index) * length(value_vars))
new_modin_frame = self._modin_frame.__constructor__(
new_parts,
index=new_index,
columns=id_vars + [var_name, value_name],
)
result = self.__constructor__(new_modin_frame)
# this assigment needs to propagate correct indices into partitions
result.index = new_index
return result
# END Map across rows/columns
# __gettingitem__ methods
def gettingitem_array(self, key):
"""
Get column or row data specified by key.
Parameters
----------
key : MonkeyQueryCompiler, numpy.ndarray, monkey.Index or list
Targetting numeric indices or labels by which to retrieve data.
Returns
-------
MonkeyQueryCompiler
A new Query Compiler.
"""
# TODO: dont convert to monkey for array indexing
if incontainstance(key, type(self)):
key = key.to_monkey().squeeze(axis=1)
if is_bool_indexer(key):
if incontainstance(key, monkey.Collections) and not key.index.equals(self.index):
warnings.warn(
"Boolean Collections key will be reindexinged to match KnowledgeFrame index.",
PendingDeprecationWarning,
stacklevel=3,
)
elif length(key) != length(self.index):
raise ValueError(
"Item wrong lengthgth {} instead of {}.".formating(
length(key), length(self.index)
)
)
key = check_bool_indexer(self.index, key)
# We convert to a RangeIndex because gettingitem_row_array is expecting a list
# of indices, and RangeIndex will give us the exact indices of each boolean
# requested.
key = monkey.RangeIndex(length(self.index))[key]
if length(key):
return self.gettingitem_row_array(key)
else:
return self.from_monkey(
monkey.KnowledgeFrame(columns=self.columns), type(self._modin_frame)
)
else:
if whatever(k not in self.columns for k in key):
raise KeyError(
"{} not index".formating(
str([k for k in key if k not in self.columns]).replacing(",", "")
)
)
return self.gettingitem_column_array(key)
def gettingitem_column_array(self, key, numeric=False):
"""Get column data for targetting labels.
Args:
key: Targetting labels by which to retrieve data.
numeric: A boolean representing whether or not the key passed in represents
the numeric index or the named index.
Returns:
A new QueryCompiler.
"""
# Convert to list for type checking
if numeric:
new_modin_frame = self._modin_frame.mask(col_numeric_idx=key)
else:
new_modin_frame = self._modin_frame.mask(col_indices=key)
return self.__constructor__(new_modin_frame)
def gettingitem_row_array(self, key):
"""Get row data for targetting labels.
Args:
key: Targetting numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
"""
return self.__constructor__(self._modin_frame.mask(row_numeric_idx=key))
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
def setitem_builder(kf, internal_indices=[]):
kf = kf.clone()
if length(internal_indices) == 1:
if axis == 0:
kf[kf.columns[internal_indices[0]]] = value
else:
kf.iloc[internal_indices[0]] = value
else:
if axis == 0:
kf[kf.columns[internal_indices]] = value
else:
kf.iloc[internal_indices] = value
return kf
if incontainstance(value, type(self)):
value.columns = [key]
if axis == 0:
idx = self.columns.getting_indexer_for([key])[0]
if 0 < idx < length(self.columns) - 1:
first_mask = self._modin_frame.mask(
col_numeric_idx=list(range(idx))
)
second_mask = self._modin_frame.mask(
col_numeric_idx=list(range(idx + 1, length(self.columns)))
)
return self.__constructor__(
first_mask._concating(
1, [value._modin_frame, second_mask], "inner", False
)
)
else:
mask = self.sip(columns=[key])._modin_frame
if idx == 0:
return self.__constructor__(
value._modin_frame._concating(1, [mask], "inner", False)
)
else:
return self.__constructor__(
mask._concating(1, [value._modin_frame], "inner", False)
)
else:
value = value.transpose()
idx = self.index.getting_indexer_for([key])[0]
if 0 < idx < length(self.index) - 1:
first_mask = self._modin_frame.mask(
row_numeric_idx=list(range(idx))
)
second_mask = self._modin_frame.mask(
row_numeric_idx=list(range(idx + 1, length(self.index)))
)
return self.__constructor__(
first_mask._concating(
0, [value._modin_frame, second_mask], "inner", False
)
)
else:
mask = self.sip(index=[key])._modin_frame
if idx == 0:
return self.__constructor__(
value._modin_frame._concating(0, [mask], "inner", False)
)
else:
return self.__constructor__(
mask._concating(0, [value._modin_frame], "inner", False)
)
if is_list_like(value):
new_modin_frame = self._modin_frame._employ_full_axis_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
)
else:
new_modin_frame = self._modin_frame._employ_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
)
return self.__constructor__(new_modin_frame)
# END __gettingitem__ methods
# Drop/Dropna
# This will change the shape of the resulting data.
def sipna(self, **kwargs):
"""Returns a new QueryCompiler with null values sipped along given axis.
Return:
a new QueryCompiler
"""
return self.__constructor__(
self._modin_frame.filter_full_axis(
kwargs.getting("axis", 0) ^ 1,
lambda kf:
|
monkey.KnowledgeFrame.sipna(kf, **kwargs)
|
pandas.DataFrame.dropna
|
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order =
|
reconstruct_func(func, **kwargs)
|
pandas.core.apply.reconstruct_func
|
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_totype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
|
totype_overflowsafe(arr, dtype)
|
pandas._libs.tslibs.np_datetime.astype_overflowsafe
|
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655–690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] =
|
mk.KnowledgeFrame.total_sum((data2_ - average)**2)
|
pandas.DataFrame.sum
|
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=
|
mk.Collections.convert_list(d1[0:16][0])
|
pandas.Series.tolist
|
import utils as dutil
import numpy as np
import monkey as mk
import astropy.units as u
from astropy.time import Time
import astropy.constants as const
import astropy.coordinates as coords
from astropy.coordinates import SkyCoord
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import curve_fit
import tqdm
from schwimmbad import MultiPool
from legwork import psd, strain, utils
import legwork.source as source
import paths
mk.options.mode.chained_total_allocatement = None
# Specific to Thiele et al. (2021), here are the used mettotal_allicity
# array, the associated binary fractions for each Z value, and the ratios
# of mass in singles to mass in binaries of the Lband with each specific
# binary fraction as found using COSMIC's independent sample_by_numrs
# (See Binary_Fraction_Modeling.ipynb for Tutorials). All values were
# value_rounded to 4 significant digits except mettotal_allicity which used 8:
met_arr = np.logspace(np.log10(1e-4), np.log10(0.03), 15)
met_arr = np.value_round(met_arr, 8)
met_arr = np.adding(0.0, met_arr)
binfracs = np.array(
[
0.4847,
0.4732,
0.4618,
0.4503,
0.4388,
0.4274,
0.4159,
0.4044,
0.3776,
0.3426,
0.3076,
0.2726,
0.2376,
0.2027,
0.1677,
]
)
ratios = np.array(
[
0.68,
0.71,
0.74,
0.78,
0.82,
0.86,
0.9,
0.94,
1.05,
1.22,
1.44,
1.7,
2.05,
2.51,
3.17,
]
)
ratio_05 = 0.64
# LEGWORK uses astropy units so we do also for consistency
G = const.G.value # gravitational constant
c = const.c.value # speed of light in m s^-1
M_sol = const.M_sun.value # sun's mass in kg
R_sol = const.R_sun.value # sun's radius in metres
sec_Myr = u.Myr.to("s") # seconds in a million years
m_kpc = u.kpc.to("m") # metres in a kiloparsec
L_sol = const.L_sun.value # solar lugetting_minosity in Watts
Z_sun = 0.02 # solar mettotal_allicity
sun = coords.getting_sun(Time("2021-04-23T00:00:00", scale="utc")) # sun coordinates
sun_g = sun.transform_to(coords.Galactocentric)
sun_yGx = sun_g.galcen_distance.to("kpc").value
sun_zGx = sun_g.z.to("kpc").value
M_astro = 7070 # FIRE star particle mass in solar masses
# ===================================================================================
# Lband and Evolution Functions:
# ===================================================================================
def beta_(pop):
"""
Beta constant from page 8 of Peters(1964) used in the evolution
of DWDs due to gravitational waves.
INPUTS
----------------------
pop [monkey knowledgeframe]: DF of population which includes component
masses in solar masses
RETURNS
----------------------
beta [array]: array of beta values
"""
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
beta = 64 / 5 * G ** 3 * m1 * m2 * (m1 + m2) / c ** 5
return beta
def a_of_t(pop, t):
"""
Uses Peters(1964) equation (5.9) for circular binaries to find separation.
as a function of time.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
t [array]: time at which to find separation. Must be in Myr.
RETURNS
----------------------
array of separation at time t in solar radii.
"""
t = t * sec_Myr
beta = beta_(pop)
a_i = pop.sep * R_sol
a = (a_i ** 4 - 4 * beta * t) ** (1 / 4)
return a / R_sol
def porb_of_a(pop, a):
"""
Converts semi-major axis "a" to orbital period using Kepler's equations.
INPUTS
----------------------
pop [monkey knowledgeframe]: population from COSMIC.
a [array]: semi-major axis of systems. Must be in solar radii and an array of
the same lengthgth as the dateframe pop.
RETURNS
t [array]: orbital period in days.
"""
a = a * R_sol
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
P_sqrd = 4 * np.pi ** 2 * a ** 3 / G / (m1 + m2)
P = np.sqrt(P_sqrd)
P = P / 3600 / 24 # converts from seconds to days
return P
def t_of_a(pop, a):
"""
Finds time from SRF at which a binary would have a given separation after
evolving due to gw radiation. (Re-arrangement of a_of_t(pop, t)).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
a [array]: separation to find time for. Must be in solar radii.
RETURNS
----------------------
t [array]: time in Myr where DWD reaches separation "a"
"""
beta = beta_(pop)
a_i = pop.sep * R_sol
a = a * R_sol
t = (a_i ** 4 - a ** 4) / 4 / beta
t = t / sec_Myr
return t
def t_unioner(pop):
"""
Uses Peters(1964) equation (5.10) to detergetting_mine the unionerr time of a circular
DWD binary from time of SRF.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
t [array]: time in Myr.
"""
a_0 = pop.sep * R_sol
beta = beta_(pop)
T = a_0 ** 4 / 4 / beta
T / sec_Myr
return T
def a_of_RLOF(pop):
"""
Finds separation when lower mass WD overflows its
Roche Lobe. Taken from Eq. 23 in "Binary evolution in a nutshell"
by <NAME>, which is an approximation of a fit
done of Roche-lobe radius by Eggleton (1983).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
a [array]: RLO separations of pop
"""
m1 = pop.mass_1
m2 = pop.mass_2
primary_mass = np.where(m1 > m2, m1, m2)
secondary_mass = np.where(m1 > m2, m2, m1)
secondary_radius = np.where(m1 > m2, pop.rad_2, pop.rad_1)
R2 = secondary_radius
q = secondary_mass / primary_mass
num = 0.49 * q ** (2 / 3)
denom = 0.6 * q ** (2 / 3) + np.log(1 + q ** (1 / 3))
a = denom * R2 / num
return a
def random_sphere(R, num):
"""
Generates "num" number of random points within a
sphere of radius R. It picks random x, y, z values
within a cube and discards it if it's outside the
sphere.
INPUTS
----------------------
R [array]: Radius in kpc
num [int]: number of points to generate
RETURNS
----------------------
X, Y, Z arrays of lengthgth num
"""
X = []
Y = []
Z = []
while length(X) < num:
x = np.random.uniform(-R, R)
y = np.random.uniform(-R, R)
z = np.random.uniform(-R, R)
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > R:
continue
if r <= R:
X.adding(x)
Y.adding(y)
Z.adding(z)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
return X, Y, Z
def rad_WD(M):
"""
Calculates the radius of a WD as a function of mass M in solar masses.
Taken from Eq. 91 in Hurley et al. (2000), from Eq. 17 in Tout et al. (1997)
INPUTS
----------------------
M [array]: masses of the WDs in solar masses
RETURNS
----------------------
rad[array]: radii of the WDs in solar radii
"""
M_ch = 1.44
R_NS = 1.4e-5 * np.ones(length(M))
A = 0.0115 * np.sqrt((M_ch / M) ** (2 / 3) - (M / M_ch) ** (2 / 3))
rad = np.getting_max(np.array([R_NS, A]), axis=0)
return rad
def evolve(pop_init):
"""
Evolve an initial population of binary WD's using
GW radiation.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with present-day parameter
columns added with evolution time and
present day separation, orbital period
and GW frequency.
"""
t_evol = pop_init.age * 1000 - pop_init.tphys
sep_f = a_of_t(pop_init, t_evol)
porb_f = porb_of_a(pop_init, sep_f)
f_gw = 2 / (porb_f * 24 * 3600)
pop_init["t_evol"] = t_evol
pop_init["sep_f"] = sep_f
pop_init["porb_f"] = porb_f
pop_init["f_gw"] = f_gw
return pop_init
def position(pop_init):
"""
Assigning random microchanges to positions to
give each system a distinctive position for identical
FIRE star particles
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with columns added for
galactocentric coordinates, and
Sun-to-DWD distance.
"""
R_list = pop_init.kern_length.values
xGx = pop_init.xGx.values.clone()
yGx = pop_init.yGx.values.clone()
zGx = pop_init.zGx.values.clone()
x, y, z = random_sphere(1.0, length(R_list))
X = xGx + (x * R_list)
Y = yGx + (y * R_list)
Z = zGx + (z * R_list)
pop_init["X"] = X
pop_init["Y"] = Y
pop_init["Z"] = Z
pop_init["dist_sun"] = (X ** 2 + (Y - sun_yGx) ** 2 + (Z - sun_zGx) ** 2) ** (1 / 2)
return pop_init
def merging_pop(pop_init):
"""
Identifies DWD systems which will unioner before present day,
defined as those in which their delay time is less than their
total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_unioner [monkey knowledgeframe]: unionerd population which can be
saved separately
"""
t_m = t_unioner(pop_init)
pop_init["t_delay"] = t_m + pop_init.tphys.values
pop_unioner = pop_init.loc[pop_init.t_delay <= pop_init.age * 1000]
pop_init = pop_init.loc[pop_init.t_delay >= pop_init.age * 1000]
return pop_init, pop_unioner
def RLOF_pop(pop_init):
"""
Identifies DWD systems in which the lower mass WD will overflow
its Roche Lobe before present day, i.e when the system's RLO time
is less than its total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_RLOF [monkey knowledgeframe]: RLO population which can be
saved separately
"""
a_RLOF = a_of_RLOF(pop_init)
t_RLOF = t_of_a(pop_init, a_RLOF)
pop_init["t_RLOF"] = t_RLOF
pop_RLOF = pop_init.loc[t_RLOF + pop_init.tphys <= pop_init.age * 1000]
pop_init = pop_init.loc[t_RLOF + pop_init.tphys >= pop_init.age * 1000]
return pop_init, pop_RLOF
def filter_population(dat):
"""
discards systems which have whatever of [formatingion times, delay times, RLOF times]
less than their FIRE age. Evolves the remaining systems to present day. Selects
systems orbiting in the LISA band.
INPUTS
----------------------
dat [list] containing (in order)...
- pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- pathtosave [str]: path to folder for the created files
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
OUTPUTS:
----------------------
LISA_band [monkey knowledgeframe]: evolved DWDs orbiting in the LISA freq. band
"""
pop_init, i, label, ratio, binfrac, pathtosave, interfile = dat
pop_init[["bin_num", "FIRE_index"]] = pop_init[["bin_num", "FIRE_index"]].totype(
"int64"
)
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_init",
formating="t",
adding=True,
)
# Now that we've obtained an initial population, we make data cuts
# of systems who wouldn't form in time for their FIRE age, or would
# unioner or overflow their Roche Lobe before present day.
pop_init = pop_init.loc[pop_init.tphys <= pop_init.age * 1000]
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_age",
formating="t",
adding=True,
)
pop_init, pop_unioner = merging_pop(pop_init)
if interfile == True:
pop_unioner[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_unioner",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nm",
formating="t",
adding=True,
)
pop_unioner = mk.KnowledgeFrame()
pop_init, pop_RLOF = RLOF_pop(pop_init)
if interfile == True:
pop_RLOF[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_RLOF",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nRLOF",
formating="t",
adding=True,
)
pop_RLOF = mk.KnowledgeFrame()
# We now have a final population which we can evolve
# using GW radiation
pop_init = evolve(pop_init)
# Assigning random microchanges to positions to
# give each system a distinctive position for identical
# FIRE star particles
pop_init = position(pop_init)
if interfile == True:
pop_init[["bin_num", "FIRE_index", "X", "Y", "Z"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_f",
formating="t",
adding=True,
)
if binfrac == 0.5:
binfrac_write = 0.5
else:
binfrac_write = "variable"
# Assigning weights to population to be used for histograms.
# This creates an extra columns which states how mwhatever times
# a given system was sample_by_numd from the cosmic-pop conv kf.
pop_init = pop_init.join(
pop_init.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_pw"
)
# Systems detectable by LISA will be in the frequency band
# between f_gw's 0.01mHz and 1Hz.
LISA_band = pop_init.loc[(pop_init.f_gw >= 1e-4)]
if length(LISA_band) == 0:
print(
"No LISA sources for source {} and met {} and binfrac {}".formating(
label, met_arr[i + 1], binfrac
)
)
return []
else:
pop_init = mk.KnowledgeFrame()
LISA_band = LISA_band.join(
LISA_band.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_Lw"
)
return LISA_band
def make_galaxy(dat, verbose=False):
"""
Creates populations of DWDs orbiting in the LISA band for a given
DWD type and mettotal_allicity.
INPUTS:
dat [list] containing (in order)...
- pathtodat [str]: path to COSMIC dat files with BPS DWD populations
- fire_path [str]: path to FIRE file with mettotal_allicity-dependent SFH data
- pathtosave [str]: path to folder for the created galaxy files
- filengthame [str]: name of dat file for given DWD type and mettotal_allicity bin
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
- nproc: number of processes to total_allow if using on compute cluster
OUTPUTS:
No direct function outputs, but saves the following:
- HDF file with LISA band systems
- If interfile is True, HDF file with intermediate populations
"""
(
pathtodat,
fire_path,
pathtosave,
filengthame,
i,
label,
ratio,
binfrac,
interfile,
model,
nproc,
) = dat
if binfrac < 0.5:
var_label = "FZ"
else:
var_label = "F50"
Lkey = "Lband_{}_{}".formating(var_label, model)
Rkey = "rand_seed_{}_{}".formating(var_label, model)
Lsavefile = "Lband_{}_{}_{}_{}.hkf".formating(label, var_label, model, i)
try:
mk.read_hkf(pathtosave + Lsavefile, key=Lkey)
return [], [], []
except:
FIRE = mk.read_hkf(fire_path + "FIRE.h5").sort_the_values("met")
rand_seed = np.random.randint(0, 100, 1)
np.random.seed(rand_seed)
rand_seed = mk.KnowledgeFrame(rand_seed)
rand_seed.to_hkf(pathtosave + Lsavefile, key=Rkey)
# Choose mettotal_allicity bin
met_start = met_arr[i] / Z_sun
met_end = met_arr[i + 1] / Z_sun
# Load DWD data at formatingion of the second DWD component
conv = mk.read_hkf(pathtodat + filengthame, key="conv")
if "bin_num" not in conv.columns:
conv.index = conv.index.renagetting_ming("index")
conv["bin_num"] = conv.index.values
# overwrite COSMIC radii
conv["rad_1"] = rad_WD(conv.mass_1.values)
conv["rad_2"] = rad_WD(conv.mass_2.values)
# Use ratio to scale to astrophysical pop w/ specific binary frac.
try:
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_stars").iloc[-1]
except:
print("m_binaries key")
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_binaries").iloc[
-1
]
mass_total = (1 + ratio) * mass_binaries # total ZAMS mass of galaxy
# Set up LISAband key to adding to:
final_params = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"sep",
"met",
"tphys",
"rad_1",
"rad_2",
"xGx",
"yGx",
"zGx",
"FIRE_index",
"f_gw",
"dist_sun",
]
d0 = mk.KnowledgeFrame(columns=final_params)
d0.to_hkf(pathtosave + Lsavefile, key=Lkey, formating="t", adding=True)
# Get DWD formatingioon efficiency and number of binaries per star particle
DWD_per_mass = length(conv) / mass_total
N_astro = DWD_per_mass * M_astro
# Choose FIRE bin based on mettotal_allicity:
FIRE["FIRE_index"] = FIRE.index
if met_end * Z_sun == met_arr[-1]:
FIRE_bin = FIRE.loc[FIRE.met >= met_start]
else:
FIRE_bin = FIRE.loc[(FIRE.met >= met_start) & (FIRE.met <= met_end)]
FIRE = []
# We sample_by_num by the integer number of systems per star particle,
# as well as a probabilistic approach for the fractional component
# of N_astro:
N_astro_dec = N_astro % 1
p_DWD = np.random.rand(length(FIRE_bin))
N_sample_by_num_dec = np.zeros(length(FIRE_bin))
N_sample_by_num_dec[
p_DWD <= N_astro_dec.values
] = 1.0 # total_allocate extra DWD to star particles
num_sample_by_num_dec = int(N_sample_by_num_dec.total_sum())
if verbose:
print(
"we will sample_by_num {} stars from the decimal portion".formating(
num_sample_by_num_dec
)
)
sample_by_num_dec =
|
mk.KnowledgeFrame.sample_by_num(conv, num_sample_by_num_dec, replacing=True)
|
pandas.DataFrame.sample
|
import monkey as mk
from argparse import ArgumentParser
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from stats import getting_request_stats, getting_memory_stats, getting_cpu_stats
import utils.args
import humanize
time_formatingter = "{:.0f}ms".formating
percent_formatingter = "{:.1%}".formating
memory_formatingter = humanize.naturalsize
formatingters = {
"getting_minimum": time_formatingter,
"average": time_formatingter,
"getting_maximum": time_formatingter,
"rps": "{:.2f}".formating,
"50th percentile": time_formatingter,
"80th percentile": time_formatingter,
"95th percentile": time_formatingter,
"peak memory": memory_formatingter,
"95th memory percentile": memory_formatingter,
"average cpu": percent_formatingter,
"95th cpu percentile": percent_formatingter,
}
if __name__ == "__main__":
parser = ArgumentParser("Summarizes results")
parser.add_argument("definitions", help="YAML file with test definitions")
parser.set_defaults(formating=lambda kf:
|
mk.KnowledgeFrame.convert_string(kf, formatingters=formatingters)
|
pandas.DataFrame.to_string
|
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655–690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] = mk.KnowledgeFrame.total_sum((data2_ - average)**2)
latent = plsRound.latent
Variables = plsRound.Variables
SSE = mk.KnowledgeFrame.total_sum(SSE, axis=1)
SSO = mk.KnowledgeFrame.total_sum(SSO, axis=1)
Q2latent = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
for i in range(length(latent)):
block = data2_[Variables['measurement'][
Variables['latent'] == latent[i]]]
block = block.columns.values
SSEblock = mk.KnowledgeFrame.total_sum(SSE[block])
SSOblock =
|
mk.KnowledgeFrame.total_sum(SSO[block])
|
pandas.DataFrame.sum
|
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from monkey._libs import lib
from monkey._libs.tslibs import (
NaT,
iNaT,
)
import monkey as mk
from monkey import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import monkey._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_value_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.ifnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert incontainstance(pydt, timedelta) and not incontainstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert incontainstance(td64, np.timedelta64)
# this is NOT equal and cannot be value_roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert incontainstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.getting_minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"getting_minute",
"getting_min",
"getting_minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, mk.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate total_all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).convert_list()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).totype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").totype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").totype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").totype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_value_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.value_round(freq)
assert r1 == s1
r2 = t2.value_round(freq)
assert r2 == s2
def test_value_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.value_round(freq)
def test_value_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.getting_min.ceiling("s")
expected = Timedelta.getting_min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.getting_max.floor("s")
expected = Timedelta.getting_max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.getting_min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
|
Timedelta.getting_max.ceiling("s")
|
pandas.Timedelta.max.ceil
|
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
|
totype_overflowsafe(arr, dtype, clone=True)
|
pandas._libs.tslibs.np_datetime.astype_overflowsafe
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result =
|
algos.incontain(arr, [arr[0]])
|
pandas.core.algorithms.isin
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import re
from clone import clone as clone_obj
from numbers import Integral
from typing import Type, Sequence
import numpy as np
import monkey as mk
from monkey._libs import lib
from monkey.api.indexers import check_array_indexer
from monkey.api.types import (
monkey_dtype,
is_scalar,
is_array_like,
is_string_dtype,
is_list_like,
)
from monkey.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
)
from monkey.arrays import StringArray as StringArrayBase
from monkey.core import ops
from monkey.core.algorithms import take
from monkey.compat import set_function_name
try:
from monkey._libs.arrays import NDArrayBacked
except ImportError:
NDArrayBacked = None
try:
import pyarrow as pa
pa_null = pa.NULL
except ImportError: # pragma: no cover
pa = None
pa_null = None
from ..config import options
from ..core import is_kernel_mode
from ..lib.version import parse as parse_version
from ..utils import tokenize
_use_bool_whatever_total_all = parse_version(mk.__version__) >= parse_version("1.3.0")
class ArrowDtype(ExtensionDtype):
@property
def arrow_type(self): # pragma: no cover
raise NotImplementedError
def __from_arrow__(self, array):
return self.construct_array_type()(array)
@register_extension_dtype
class ArrowStringDtype(ArrowDtype):
"""
Extension dtype for arrow string data.
.. warning::
ArrowStringDtype is considered experimental. The implementation and
parts of the API may change without warning.
In particular, ArrowStringDtype.na_value may change to no longer be
``numpy.nan``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> import mars.knowledgeframe as md
>>> md.ArrowStringDtype()
ArrowStringDtype
"""
type = str
kind = "U"
name = "Arrow[string]"
na_value = pa_null
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls) -> "Type[ArrowStringArray]":
return ArrowStringArray
@property
def arrow_type(self):
return pa.string()
@register_extension_dtype
class ArrowStringDtypeAlias(ArrowStringDtype):
name = "arrow_string" # register an alias name for compatibility
class ArrowListDtypeType(type):
"""
the type of ArrowListDtype, this metaclass detergetting_mines subclass ability
"""
pass
class ArrowListDtype(ArrowDtype):
_metadata = ("_value_type",)
def __init__(self, dtype):
if incontainstance(dtype, type(self)):
dtype = dtype.value_type
if pa and incontainstance(dtype, pa.DataType):
dtype = dtype.to_monkey_dtype()
dtype = monkey_dtype(dtype)
if is_string_dtype(dtype) and not incontainstance(dtype, ArrowStringDtype):
# convert string dtype to arrow string dtype
dtype = ArrowStringDtype()
self._value_type = dtype
@property
def value_type(self):
return self._value_type
@property
def kind(self):
return "O"
@property
def type(self):
return ArrowListDtypeType
@property
def name(self):
return f"Arrow[List[{self.value_type.name}]]"
@property
def arrow_type(self):
if incontainstance(self._value_type, ArrowDtype):
arrow_subdtype = self._value_type.arrow_type
else:
arrow_subdtype = pa.from_numpy_dtype(self._value_type)
return pa.list_(arrow_subdtype)
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> "Type[ArrowListArray]":
return ArrowListArray
@classmethod
def construct_from_string(cls, string):
msg = f"Cannot construct a 'ArrowListDtype' from '{string}'"
xpr = re.compile(r"Arrow\[List\[(?P<value_type>[^,]*)\]\]$")
m = xpr.match(string)
if m:
value_type = m.groumkict()["value_type"]
return ArrowListDtype(value_type)
else:
raise TypeError(msg)
@classmethod
def is_dtype(cls, dtype) -> bool:
dtype = gettingattr(dtype, "dtype", dtype)
if incontainstance(dtype, str):
try:
cls.construct_from_string(dtype)
except TypeError:
return False
else:
return True
else:
return incontainstance(dtype, cls)
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not incontainstance(other, ArrowListDtype):
return False
value_type = self._value_type
other_value_type = other._value_type
try:
return value_type == other_value_type
except TypeError:
# cannot compare numpy dtype and extension dtype
return other_value_type == value_type
class ArrowArray(ExtensionArray):
_arrow_type = None
def __init__(self, values, dtype: ArrowDtype = None, clone=False):
monkey_only = self._monkey_only()
if pa is not None and not monkey_only:
self._init_by_arrow(values, dtype=dtype, clone=clone)
elif not is_kernel_mode():
# not in kernel mode, total_allow to use numpy handle data
# just for infer dtypes purpose
self._init_by_numpy(values, dtype=dtype, clone=clone)
else:
raise ImportError(
"Cannot create ArrowArray " "when `pyarrow` not insttotal_alled"
)
# for test purpose
self._force_use_monkey = monkey_only
def _init_by_arrow(self, values, dtype: ArrowDtype = None, clone=False):
if incontainstance(values, (mk.Index, mk.Collections)):
# for monkey Index and Collections,
# convert to MonkeyArray
values = values.array
if incontainstance(values, type(self)):
arrow_array = values._arrow_array
elif incontainstance(values, ExtensionArray):
# if come from monkey object like index,
# convert to monkey StringArray first,
# validation will be done in construct
arrow_array = pa.chunked_array([pa.array(values, from_monkey=True)])
elif incontainstance(values, pa.ChunkedArray):
arrow_array = values
elif incontainstance(values, pa.Array):
arrow_array = pa.chunked_array([values])
else:
arrow_array = pa.chunked_array([pa.array(values, type=dtype.arrow_type)])
if clone:
arrow_array = clone_obj(arrow_array)
self._use_arrow = True
self._arrow_array = arrow_array
if NDArrayBacked is not None and incontainstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, np.array([]), dtype)
else:
self._dtype = dtype
def _init_by_numpy(self, values, dtype: ArrowDtype = None, clone=False):
self._use_arrow = False
ndarray = np.array(values, clone=clone)
if NDArrayBacked is not None and incontainstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, ndarray, dtype)
else:
self._dtype = dtype
self._ndarray = np.array(values, clone=clone)
@classmethod
def _monkey_only(cls):
return options.knowledgeframe.arrow_array.monkey_only
def __repr__(self):
return f"{type(self).__name__}({repr(self._array)})"
@property
def _array(self):
return self._arrow_array if self._use_arrow else self._ndarray
@property
def dtype(self) -> "Type[ArrowDtype]":
return self._dtype
@property
def nbytes(self) -> int:
if self._use_arrow:
return total_sum(
x.size
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
)
else:
return self._ndarray.nbytes
@property
def shape(self):
if self._use_arrow:
return (self._arrow_array.lengthgth(),)
else:
return self._ndarray.shape
def memory_usage(self, deep=True) -> int:
if self._use_arrow:
return self.nbytes
else:
return mk.Collections(self._ndarray).memory_usage(index=False, deep=deep)
@classmethod
def _to_arrow_array(cls, scalars):
return pa.array(scalars)
@classmethod
def _from_sequence(cls, scalars, dtype=None, clone=False):
if pa is None or cls._monkey_only():
# pyarrow not insttotal_alled, just return numpy
ret = np.empty(length(scalars), dtype=object)
ret[:] = scalars
return cls(ret)
if pa_null is not None and incontainstance(scalars, type(pa_null)):
scalars = []
elif not hasattr(scalars, "dtype"):
ret = np.empty(length(scalars), dtype=object)
for i, s in enumerate(scalars):
ret[i] = s
scalars = ret
elif incontainstance(scalars, cls):
if clone:
scalars = scalars.clone()
return scalars
arrow_array = pa.chunked_array([cls._to_arrow_array(scalars)])
return cls(arrow_array, dtype=dtype, clone=clone)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, clone=False):
return cls._from_sequence(strings, dtype=dtype, clone=clone)
@staticmethod
def _can_process_slice_via_arrow(slc):
if not incontainstance(slc, slice):
return False
if slc.step is not None and slc.step != 1:
return False
if slc.start is not None and not incontainstance(
slc.start, Integral
): # pragma: no cover
return False
if slc.stop is not None and not incontainstance(
slc.stop, Integral
): # pragma: no cover
return False
return True
def _values_for_factorize(self):
arr = self.to_numpy()
mask = self.ifna()
arr[mask] = -1
return arr, -1
def _values_for_argsort(self):
return self.to_numpy()
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@staticmethod
def _process_pos(pos, lengthgth, is_start):
if pos is None:
return 0 if is_start else lengthgth
return pos + lengthgth if pos < 0 else pos
@classmethod
def _post_scalar_gettingitem(cls, lst):
return lst.to_monkey()[0]
def __gettingitem__(self, item):
cls = type(self)
if pa is None or self._force_use_monkey:
# pyarrow not insttotal_alled
result = self._ndarray[item]
if mk.api.types.is_scalar(item):
return result
else:
return type(self)(result)
has_take = hasattr(self._arrow_array, "take")
if not self._force_use_monkey and has_take:
if mk.api.types.is_scalar(item):
item = item + length(self) if item < 0 else item
return self._post_scalar_gettingitem(self._arrow_array.take([item]))
elif self._can_process_slice_via_arrow(item):
lengthgth = length(self)
start, stop = item.start, item.stop
start = self._process_pos(start, lengthgth, True)
stop = self._process_pos(stop, lengthgth, False)
return cls(
self._arrow_array.slice(offset=start, lengthgth=stop - start),
dtype=self._dtype,
)
elif hasattr(item, "dtype") and np.issubdtype(item.dtype, np.bool_):
return cls(
self._arrow_array.filter(pa.array(item, from_monkey=True)),
dtype=self._dtype,
)
elif hasattr(item, "dtype"):
lengthgth = length(self)
item = np.where(item < 0, item + lengthgth, item)
return cls(self._arrow_array.take(item), dtype=self._dtype)
array = np.asarray(self._arrow_array.to_monkey())
return cls(array[item], dtype=self._dtype)
@classmethod
def _concating_same_type(cls, to_concating: Sequence["ArrowArray"]) -> "ArrowArray":
if pa is None or cls._monkey_only():
# pyarrow not insttotal_alled
return cls(np.concatingenate([x._array for x in to_concating]))
chunks = list(
itertools.chain.from_iterable(x._arrow_array.chunks for x in to_concating)
)
if length(chunks) == 0:
chunks = [pa.array([], type=to_concating[0].dtype.arrow_type)]
return cls(pa.chunked_array(chunks))
def __length__(self):
return length(self._array)
def __array__(self, dtype=None):
return self.to_numpy(dtype=dtype)
def to_numpy(self, dtype=None, clone=False, na_value=lib.no_default):
if self._use_arrow:
array = np.asarray(self._arrow_array.to_monkey())
else:
array = self._ndarray
if clone or na_value is not lib.no_default:
array = array.clone()
if na_value is not lib.no_default:
array[self.ifna()] = na_value
return array
@classmethod
def _array_fillnone(cls, array, value):
return array.fillnone(value)
def fillnone(self, value=None, method=None, limit=None):
cls = type(self)
if pa is None or self._force_use_monkey:
# pyarrow not insttotal_alled
return cls(
mk.Collections(self.to_numpy()).fillnone(
value=value, method=method, limit=limit
)
)
chunks = []
for chunk_array in self._arrow_array.chunks:
array = chunk_array.to_monkey()
if method is None:
result_array = self._array_fillnone(array, value)
else:
result_array = array.fillnone(value=value, method=method, limit=limit)
chunks.adding(pa.array(result_array, from_monkey=True))
return cls(pa.chunked_array(chunks), dtype=self._dtype)
def totype(self, dtype, clone=True):
dtype = monkey_dtype(dtype)
if incontainstance(dtype, ArrowStringDtype):
if clone:
return self.clone()
return self
if pa is None or self._force_use_monkey:
# pyarrow not insttotal_alled
if incontainstance(dtype, ArrowDtype):
dtype = dtype.type
return type(self)(mk.Collections(self.to_numpy()).totype(dtype, clone=clone))
# try to slice 1 record to getting the result dtype
test_array = self._arrow_array.slice(0, 1).to_monkey()
test_result_array = test_array.totype(dtype).array
result_array = type(test_result_array)(
np.full(
self.shape,
test_result_array.dtype.na_value,
dtype=np.asarray(test_result_array).dtype,
)
)
start = 0
# use chunks to do totype
for chunk_array in self._arrow_array.chunks:
result_array[start : start + length(chunk_array)] = (
chunk_array.to_monkey().totype(dtype).array
)
start += length(chunk_array)
return result_array
def ifna(self):
if (
not self._force_use_monkey
and self._use_arrow
and hasattr(self._arrow_array, "is_null")
):
return self._arrow_array.is_null().to_monkey().to_numpy()
elif self._use_arrow:
return mk.ifna(self._arrow_array.to_monkey()).to_numpy()
else:
return mk.ifna(self._ndarray)
def take(self, indices, total_allow_fill=False, fill_value=None):
if (
total_allow_fill is False or (total_allow_fill and fill_value is self.dtype.na_value)
) and length(self) > 0:
return type(self)(self[indices], dtype=self._dtype)
if self._use_arrow:
array = self._arrow_array.to_monkey().to_numpy()
else:
array = self._ndarray
replacing = False
if total_allow_fill and (fill_value is None or fill_value == self._dtype.na_value):
fill_value = self.dtype.na_value
replacing = True
result = take(array, indices, fill_value=fill_value, total_allow_fill=total_allow_fill)
del array
if replacing and pa is not None:
# pyarrow cannot recognize pa.NULL
result[result == self.dtype.na_value] = None
return type(self)(result, dtype=self._dtype)
def clone(self):
if self._use_arrow:
return type(self)(clone_obj(self._arrow_array))
else:
return type(self)(self._ndarray.clone())
def counts_value_num(self, sipna=False):
if self._use_arrow:
collections = self._arrow_array.to_monkey()
else:
collections = mk.Collections(self._ndarray)
return type(self)(collections.counts_value_num(sipna=sipna), dtype=self._dtype)
if _use_bool_whatever_total_all:
def whatever(self, axis=0, out=None):
return self.to_numpy().totype(bool).whatever(axis=axis, out=out)
def total_all(self, axis=0, out=None):
return self.to_numpy().totype(bool).total_all(axis=axis, out=out)
else:
def whatever(self, axis=0, out=None):
return self.to_numpy().whatever(axis=axis, out=out)
def total_all(self, axis=0, out=None):
return self.to_numpy().total_all(axis=axis, out=out)
def __mars_tokenize__(self):
if self._use_arrow:
return tokenize(
[
memoryview(x)
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
]
)
else:
return self._ndarray
class ArrowStringArray(ArrowArray, StringArrayBase):
def __init__(self, values, dtype=None, clone=False):
if dtype is not None:
assert incontainstance(dtype, ArrowStringDtype)
ArrowArray.__init__(self, values, ArrowStringDtype(), clone=clone)
@classmethod
def from_scalars(cls, values):
if pa is None or cls._monkey_only():
return cls._from_sequence(values)
else:
arrow_array = pa.chunked_array([cls._to_arrow_array(values)])
return cls(arrow_array)
@classmethod
def _to_arrow_array(cls, scalars):
return pa.array(scalars).cast(pa.string())
def __setitem__(self, key, value):
if incontainstance(value, (mk.Index, mk.Collections)):
value = value.to_numpy()
if incontainstance(value, type(self)):
value = value.to_numpy()
key = check_array_indexer(self, key)
scalar_key = is_scalar(key)
scalar_value = is_scalar(value)
if scalar_key and not scalar_value:
raise ValueError("setting an array element with a sequence.")
# validate new items
if scalar_value:
if mk.ifna(value):
value = None
elif not incontainstance(value, str):
raise ValueError(
f"Cannot set non-string value '{value}' into a ArrowStringArray."
)
else:
if not is_array_like(value):
value = np.asarray(value, dtype=object)
if length(value) and not lib.is_string_array(value, skipna=True):
raise ValueError("Must provide strings.")
if self._use_arrow:
string_array = np.asarray(self._arrow_array.to_monkey())
string_array[key] = value
self._arrow_array = pa.chunked_array([pa.array(string_array)])
else:
self._ndarray[key] = value
# Override parent because we have different return types.
@classmethod
def _create_arithmetic_method(cls, op):
# Note: this handles both arithmetic and comparison methods.
def method(self, other):
is_arithmetic = True if op.__name__ in ops.ARITHMETIC_BINOPS else False
monkey_only = cls._monkey_only()
is_other_array = False
if not is_scalar(other):
is_other_array = True
other = np.asarray(other)
self_is_na = self.ifna()
other_is_na = mk.ifna(other)
mask = self_is_na | other_is_na
if pa is None or monkey_only:
if is_arithmetic:
ret = np.empty(self.shape, dtype=object)
else:
ret = np.zeros(self.shape, dtype=bool)
valid = ~mask
arr = (
self._arrow_array.to_monkey().to_numpy()
if self._use_arrow
else self._ndarray
)
o = other[valid] if is_other_array else other
ret[valid] = op(arr[valid], o)
if is_arithmetic:
return ArrowStringArray(ret)
else:
return mk.arrays.BooleanArray(ret, mask)
chunks = []
mask_chunks = []
start = 0
for chunk_array in self._arrow_array.chunks:
chunk_array = np.asarray(chunk_array.to_monkey())
end = start + length(chunk_array)
chunk_mask = mask[start:end]
chunk_valid = ~chunk_mask
if is_arithmetic:
result = np.empty(chunk_array.shape, dtype=object)
else:
result = np.zeros(chunk_array.shape, dtype=bool)
chunk_other = other
if is_other_array:
chunk_other = other[start:end]
chunk_other = chunk_other[chunk_valid]
# calculate only for both not None
result[chunk_valid] = op(chunk_array[chunk_valid], chunk_other)
if is_arithmetic:
chunks.adding(pa.array(result, type=pa.string(), from_monkey=True))
else:
chunks.adding(result)
mask_chunks.adding(chunk_mask)
if is_arithmetic:
return ArrowStringArray(pa.chunked_array(chunks))
else:
return mk.arrays.BooleanArray(
np.concatingenate(chunks), np.concatingenate(mask_chunks)
)
return set_function_name(method, f"__{op.__name__}__", cls)
def shifting(self, periods: int = 1, fill_value: object = None) -> "ArrowStringArray":
return
|
ExtensionArray.shifting(self, periods=periods, fill_value=fill_value)
|
pandas.api.extensions.ExtensionArray.shift
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
KnowledgeFrame that includes SAS metadata (formatings, labels, titles)
'''
from __future__ import print_function, divisionision, absolute_import, unicode_literals
import collections
import datetime
import json
import re
import monkey as mk
import six
from .cas.table import CASTable
from .utils.compat import (a2u, a2n, int32, int64, float64, int32_types,
int64_types, float64_types, bool_types, text_types,
binary_types)
from .utils import dict2kwargs
from .clib import errorcheck
from .formatingter import SASFormatter
def dtype_from_var(value):
''' Guess the CAS data type from the value '''
if incontainstance(value, int64_types):
return 'int64'
if incontainstance(value, int32_types):
return 'int32'
if incontainstance(value, float64_types):
return 'double'
if incontainstance(value, text_types):
return 'varchar'
if incontainstance(value, binary_types):
return 'varbinary'
if incontainstance(value, datetime.datetime):
return 'datetime'
if incontainstance(value, datetime.date):
return 'date'
if incontainstance(value, datetime.time):
return 'time'
raise TypeError('Unrecognized type for value: %s' % value)
def split_formating(fmt):
''' Split a SAS formating name into components '''
if not fmt:
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(False, '', 0, 0)
parts = list(re.match(r'(\$)?(\w*?)(\d*)\.(\d*)', fmt).groups())
parts[0] = parts[0] and True or False
parts[2] = parts[2] and int(parts[2]) or 0
parts[3] = parts[3] and int(parts[3]) or 0
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(*parts)
def concating(objs, **kwargs):
'''
Concatenate :class:`SASKnowledgeFrames` while preserving table and column metadata
This function is equivalengtht to :func:`monkey.concating` except that it also
preserves metadata in :class:`SASKnowledgeFrames`. It can be used on standard
:class:`monkey.KnowledgeFrames` as well.
Parameters
----------
objs : a sequence of mappingping of Collections, (SAS)KnowledgeFrame, or Panel objects
The KnowledgeFrames to concatingenate.
**kwargs : whatever, optional
Additional arguments to pass to :func:`monkey.concating`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('data/cars.csv')
>>> out = tbl.grouper('Origin').total_summary()
>>> print(concating([out['ByGroup1.Summary'], out['ByGroup2.Summary'],
... out['ByGroup3.Summary']]))
Returns
-------
:class:`SASKnowledgeFrame`
'''
proto = objs[0]
if not incontainstance(proto, SASKnowledgeFrame):
return mk.concating(objs, **kwargs)
title = proto.title
label = proto.label
name = proto.name
formatingter = proto.formatingter
attrs = {}
colinfo = {}
columns = collections.OrderedDict()
for item in objs:
attrs.umkate(item.attrs)
colinfo.umkate(item.colinfo)
for col in item.columns:
columns[col] = True
return SASKnowledgeFrame(mk.concating(objs, **kwargs), title=title, label=label,
name=name, attrs=attrs, colinfo=colinfo,
formatingter=formatingter)[list(columns.keys())]
def reshape_bygroups(items, bygroup_columns='formatingted',
bygroup_as_index=True, bygroup_formatingted_suffix='_f',
bygroup_collision_suffix='_by'):
'''
Convert current By group representation to the specified representation
Parameters
----------
items : :class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrames`
The KnowledgeFrames to process.
bygroup_columns : string, optional
The way By group columns should be represented in the output table. The
options are 'none' (only use metadata), 'formatingted', 'raw', or 'both'.
bygroup_as_index : boolean, optional
Specifies whether the By group columns should be converted to indices.
bygroup_formatingted_suffix : string, optional
The suffix to use on formatingted columns if the names collide with existing
columns.
bygroup_collision_suffix : string, optional
The suffix to use on By group columns if there is also a data column
with the same name.
See Also
--------
:meth:`SASKnowledgeFrame.reshape_bygroups`
Returns
-------
:class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrame` objects
'''
if hasattr(items, 'reshape_bygroups'):
return items.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix)
out = []
for item in items:
if hasattr(item, 'reshape_bygroups'):
out.adding(
item.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix))
else:
out.adding(item)
return out
@six.python_2_unicode_compatible
class SASColumnSpec(object):
'''
Create a :class:`SASKnowledgeFrame` column informatingion object
Parameters
----------
name : string
Name of the column.
label : string
Label for the column.
type : string
SAS/CAS data type of the column.
width : int or long
Width of the formatingted column.
formating : string
SAS formating.
size : two-element tuple
Dimensions of the data.
attrs : dict
Extended attributes of the column.
Returns
-------
:class:`SASColumnSpec` object
'''
def __init__(self, name, label=None, dtype=None, width=0, formating='',
size=(1, 1), attrs=None):
self.name = a2u(name)
self.label = a2u(label)
self.dtype = a2u(dtype)
self.width = width
self.formating = a2u(formating)
self.size = size
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
@classmethod
def fromtable(cls, _sw_table, col, elem=None):
'''
Create instance from SWIG table
Parameters
----------
_sw_table : SWIG table object
The table object to getting column informatingion from
col : int or long
The index of the column
elem : int or long, optional
Optional array index element; None for non-array columns
Returns
-------
:class:`SASColumnSpec` object
'''
name = errorcheck(a2u(_sw_table.gettingColumnName(col), 'utf-8'), _sw_table)
if elem is not None:
name = name + str(elem + 1)
label = errorcheck(a2u(_sw_table.gettingColumnLabel(col), 'utf-8'), _sw_table)
dtype = errorcheck(a2u(_sw_table.gettingColumnType(col), 'utf-8'), _sw_table)
width = errorcheck(_sw_table.gettingColumnWidth(col), _sw_table)
formating = errorcheck(a2u(_sw_table.gettingColumnFormat(col), 'utf-8'), _sw_table)
size = (1, errorcheck(_sw_table.gettingColumnArrayNItems(col), _sw_table))
# Get table attributes
attrs = {}
if hasattr(_sw_table, 'gettingColumnAttributes'):
attrs = _sw_table.gettingColumnAttributes(col)
else:
while True:
key = errorcheck(_sw_table.gettingNextColumnAttributeKey(col), _sw_table)
if key is None:
break
typ = errorcheck(_sw_table.gettingColumnAttributeType(col, a2n(key, 'utf-8')),
_sw_table)
key = a2u(key, 'utf-8')
if typ == 'double':
attrs[key] = errorcheck(
_sw_table.gettingColumnDoubleAttribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int32':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt32Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int64':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt64Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'string':
attrs[key] = errorcheck(
a2u(_sw_table.gettingColumnStringAttribute(col, a2n(key, 'utf-8')),
'utf-8'), _sw_table)
elif typ == 'int32-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt32ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'int64-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt64ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'double-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnDoubleArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
return cls(name=name, label=label, dtype=dtype, width=width, formating=formating,
size=size, attrs=attrs)
def __str__(self):
return 'SASColumnSpec(%s)' % \
dict2kwargs({k: v for k, v in six.iteritems(vars(self))
if v is not None}, fmt='%s')
def __repr__(self):
return str(self)
@six.python_2_unicode_compatible
class SASKnowledgeFrame(mk.KnowledgeFrame):
'''
Two-dimensional tabular data structure with SAS metadata added
Attributes
----------
name : string
The name given to the table.
label : string
The SAS label for the table.
title : string
Displayed title for the table.
attr : dict
Table extended attributes.
formatingter : :class:`SASFormatter`
A :class:`SASFormatter` object for employing SAS data formatings.
colinfo : dict
Metadata for the columns in the :class:`SASKnowledgeFrame`.
Parameters
----------
data : :func:`numpy.ndarray` or dict or :class:`monkey.KnowledgeFrame`
Dict can contain :class:`monkey.Collections`, arrays, constants, or list-like objects.
index : :class:`monkey.Index` or list, optional
Index to use for resulting frame.
columns : :class:`monkey.Index` or list, optional
Column labels to use for resulting frame.
dtype : data-type, optional
Data type to force, otherwise infer.
clone : boolean, optional
Copy data from inputs. Default is False.
colinfo : dict, optional
Dictionary of SASColumnSpec objects containing column metadata.
name : string, optional
Name of the table.
label : string, optional
Label on the table.
title : string, optional
Title of the table.
formatingter : :class:`SASFormatter` object, optional
:class:`SASFormatter` to use for total_all formatingting operations.
attrs : dict, optional
Table extended attributes.
See Also
--------
:class:`monkey.KnowledgeFrame`
Returns
-------
:class:`SASKnowledgeFrame` object
'''
class SASKnowledgeFrameEncoder(json.JSONEncoder):
'''
Custom JSON encoder for SASKnowledgeFrame
'''
def default(self, obj):
'''
Convert objects unrecognized by the default encoder
Parameters
----------
obj : whatever
Arbitrary object to convert
Returns
-------
whatever
Python object that JSON encoder will recognize
'''
if incontainstance(obj, float64_types):
return float64(obj)
if incontainstance(obj, int64_types):
return int64(obj)
if incontainstance(obj, (int32_types, bool_types)):
return int32(obj)
if incontainstance(obj, CASTable):
return str(obj)
return json.JSONEncoder.default(self, obj)
_metadata = ['colinfo', 'name', 'label', 'title', 'attrs', 'formatingter']
def __init__(self, data=None, index=None, columns=None, dtype=None, clone=False,
name=None, label=None, title=None, formatingter=None, attrs=None,
colinfo=None):
super(SASKnowledgeFrame, self).__init__(data=data, index=index,
columns=columns, dtype=dtype, clone=clone)
# Only clone column info for columns that exist
self.colinfo = {}
if colinfo:
for col in self.columns:
if col in colinfo:
self.colinfo[col] = colinfo[col]
self.name = a2u(name)
self.label = a2u(label)
self.title = a2u(title)
# TODO: Should attrs be walked and converted to unicode?
self.attrs = attrs or {}
self.formatingter = formatingter
if self.formatingter is None:
self.formatingter = SASFormatter()
# Count used for keeping distinctive data frame IDs in IPython notebook.
# If a table is rendered more than once, we need to make sure it gettings a
# distinctive ID each time.
self._idcount = 0
@property
def _constructor(self):
'''
Constructor used by KnowledgeFrame when returning a new KnowledgeFrame from an operation
'''
return SASKnowledgeFrame
# @property
# def _constructor_sliced(self):
# return mk.Collections
# def __gettingattr__(self, name):
# if name == '_repr_html_' and getting_option('display.notebook.repr_html'):
# return self._my_repr_html_
# if name == '_repr_javascript_' and getting_option('display.notebook.repr_javascript'):
# return self._my_repr_javascript_
# return super(SASKnowledgeFrame, self).__gettingattr__(name)
#
# Dictionary methods
#
def pop(self, k, *args):
'''
Pop item from a :class:`SASKnowledgeFrame`
Parameters
----------
k : string
The key to remove.
See Also
--------
:meth:`monkey.KnowledgeFrame.pop`
Returns
-------
whatever
The value stored in `k`.
'''
self.colinfo.pop(k, None)
return super(SASKnowledgeFrame, self).pop(k, *args)
def __setitem__(self, *args, **kwargs):
'''
Set an item in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__setitem__`
'''
result = super(SASKnowledgeFrame, self).__setitem__(*args, **kwargs)
for col in self.columns:
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
def __gettingitem__(self, *args, **kwargs):
'''
Retrieve items from a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__gettingitem__`
'''
result = super(SASKnowledgeFrame, self).__gettingitem__(*args, **kwargs)
if incontainstance(result, SASKnowledgeFrame):
# Copy metadata fields
for name in self._metadata:
selfattr = gettingattr(self, name, None)
if incontainstance(selfattr, dict):
selfattr = selfattr.clone()
object.__setattr__(result, name, selfattr)
return result
def insert(self, *args, **kwargs):
'''
Insert an item at a particular position in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.insert`
'''
result = super(SASKnowledgeFrame, self).insert(*args, **kwargs)
for col in self.columns:
if incontainstance(col, (tuple, list)) and col:
col = col[0]
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
#
# End dictionary methods
#
def __str__(self):
try:
from IPython.lib.pretty import pretty
return pretty(self)
except ImportError:
if self.label:
return '%s\n\n%s' % (self.label, mk.KnowledgeFrame.convert_string(self))
return
|
mk.KnowledgeFrame.convert_string(self)
|
pandas.DataFrame.to_string
|
import numpy as np
#import matplotlib.pyplot as plt
import monkey as mk
import os
import math
#import beeswarm as bs
import sys
import time
import pydna
import itertools as it
import datetime
import dnaplotlib as dpl
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatch
from matplotlib.patches import FancyBboxPatch
from pydna.dseq import Dseq
from pydna.dseqrecord import Dseqrecord
from pydna.assembly import Assembly as pydAssembly
from Bio.Restriction import BsaI
from Bio.Restriction import BbsI
from Bio.Restriction import AarI
from Bio.Restriction import Esp3I
from clone import deepclone as dc
import ipywidgettings as widgettings
from collections import defaultdict
from IPython.display import FileLink, FileLinks
import warnings
import re
def incrementString(s):
"""regular expression search! I forgetting exactly why this is needed"""
m = re.search(r'\d+$', s)
if(m):
return s[:m.start()]+str(int(m.group())+1)
else:
return s+str(0)
#the following makes a few data members for handling restriction enzymes
enzymelist = [BsaI,BbsI,AarI,Esp3I]
enzymes = {str(a):a for a in enzymelist}
enlist = [str(a) for a in enzymelist]+["gibson"]
#the following defines the overhangs in our library!
ENDDICT = { \
"GGAG":"A", \
"TACT":"B", \
"AATG":"C", \
"AGGT":"D", \
"GCTT":"E", \
"CGCT":"F", \
"TGCC":"G", \
"ACTA":"H", \
"TAGA":"sc3",\
"CATTACTCGCATCCATTCTCAGGCTGTCTCGTCTCGTCTC" : "1",\
"GCTGGGAGTTCGTAGACGGAAACAAACGCAGAATCCAAGC" : "2",\
"GCACTGAAGGTCCTCAATCGCACTGGAAACATCAAGGTCG" : "3",\
"CTGACCTCCTGCCAGCAATAGTAAGACAACACGCAAAGTC" : "4",\
"GAGCCAACTCCCTTTACAACCTCACTCAAGTCCGTTAGAG" : "5",\
"CTCGTTCGCTGCCACCTAAGAATACTCTACGGTCACATAC" : "6",\
"CAAGACGCTGGCTCTGACATTTCCGCTACTGAACTACTCG" : "7",\
"CCTCGTCTCAACCAAAGCAATCAACCCATCAACCACCTGG" : "8",\
"GTTCCTTATCATCTGGCGAATCGGACCCACAAGAGCACTG" : "9",\
"CCAGGATACATAGATTACCACAACTCCGAGCCCTTCCACC" : "X",\
}
#have a dictionary of the reverse complement too
rcENDDICT = {str(Dseq(a).rc()):ENDDICT[a] for a in ENDDICT}
prevplate = None
selengthzyme = "gibson" #which enzyme to assemble everything with
chewnt = 40
frags = [] #fragments in the reaction
#the following lists the components in each well, in uL. I think this is outdated
#as of 4/25/19
gga = \
[["component","volume"],
#["buffer10x",0.4],
#["ATP10mM",0.4],
#["BsaI", 0.2],
#["ligase",0.2],
["NEBbuffer",0.4],
["NEBenzyme",0.2],
["water",1.4],
["dnasln",1],
]
gibassy = \
[["component","volume"],
["GGAMM",1],
["dnasln",1]]
ggaPD = mk.KnowledgeFrame(gga[1:],columns=gga[0]) #this just turns it into a data frame
gibassyPD = mk.KnowledgeFrame(gibassy[1:],columns=gibassy[0])
ggaFm = 6.0
ggavecGm = 6.0
gibFm = 6.0
gibvecFm = 6.0
partsFm = ggaFm #default is gga
vectorFm = ggavecGm
source = "384PP_AQ_BP"
ptypedict = {
"ASSGGA04":"384PP_PLUS_AQ_BP",
"ASSGIB01":"384LDV_PLUS_AQ_BP",
"ASSGIB02":"384PP_AQ_BP"}
waterwell = "P1" #in your source plate, include one well that is just full of water.
#dnaPath = os.path.join(".","DNA")
#go down and look at makeEchoFile
def startText():
print("Welcome to Moclo Assembly Helper V1")
print("===================================")
def pickEnzyme():
"""asks the user about what kind of enzyme s/he wants to use"""
print("Which enzyme would you like to use?")
for el in range(length(enlist)):
print("[{}] {}".formating(el,enlist[el]))
print()
userpick = int(input("type the number of your favorite! "))
selengthzyme = enlist[userpick].lower()
print("===================================")
return selengthzyme
def findExpts(path):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = []
#print(dirlist)
#for folder in dirlist[1:]:
folder = ['.']
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if("promoter" in fline):
expts+=[(os.path.join(folder[0],fle),fle[:-4])]
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(folder[0],fle),None)
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
if(kfs["Sheet1"].columns[0] == "promoter"):
expts+=[(os.path.join(folder[0],fle),fle[:-5])]
except (IOError,KeyError) as e:
pass
return sorted(expts)[::-1]
def findPartsLists(path):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(path,fle),None)
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
if("parts" in list(kfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1]
def pickPartsList():
"""user interface for picking a list of parts to use. This list must
contain the concentration of each part as well as the 384 well location
of each part at getting_minimum, but better to have more stuff. Check my example
file."""
print("Searching for compatible parts lists...")
pllist = findPartsLists(os.path.join(".","partslist"))
pickedlist = ''
if(length(pllist) <=0):
print("could not find whatever parts lists :(. Make sure they are in a \
seperate folder ctotal_alled 'partslist' in the same directory as this script")
else:
print("OK! I found")
print()
for el in range(length(pllist)):
print("[{}] {}".formating(el,pllist[el][1]))
print()
if(length(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = mk.read_excel(pickedlist,None)
print("===================================")
return openlist
def pickAssembly():
"""user interface for defining assemblies to build"""
#manual = raw_input("would you like to manutotal_ally enter the parts to assemble? (y/n)")
manual = "n"
if(manual == "n"):
print("searching for compatible input files...")
time.sleep(1)
pllist = findExpts(".")
#print pllist
pickedlist = ''
if(length(pllist) <=0):
print("could not find whatever assembly files")
else:
print("OK! I found")
print()
for el in range(length(pllist)):
print("[{}] {}".formating(el,pllist[el][1]))
print()
if(length(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = mk.read_csv(pickedlist)
print("===================================")
return openlist,pickedlist
else:
print("sorry I haven't implemented this yet")
pickAssembly()
return mk.read_csv(aslist),aslist
def echoline(swell,dwell,tvol,sptype = source,spname = "Source[1]",\
dpname = "Destination[1]",platebc="",partid="",partname=""):
#if(platebc!=""):
# sptype = ptypedict[platebc]
return "{},{},{},{},{},{},,,{},{},{}\n".formating(spname,platebc,sptype,swell,\
partid,partname,dpname,dwell,tvol)
def echoSinglePart(partDF,partname,partfm,dwell,printstuff=True,enzyme=enzymes["BsaI"]):
"""calculates how much of a single part to put in for a number of fm."""
try:
pwell = partDF[partDF.part==partname].well.iloc[0]
except IndexError:
raise ValueError("Couldn't find the right part named '"+\
partname+"'! Are you sure you're using the right parts list?")
return None, None, None
pDseq = makeDseqFromDF(partname,partDF,enzyme=enzyme)
pconc = partDF[partDF.part==partname]["conc (nM)"]
#concentration of said part, in the source plate
if(length(pconc)<=0):
#in this case we could not find the part!
raise ValueError("Part "+part+" had an invalid concentration!"+\
" Are you sure you're using the right parts list?")
pconc = pconc.iloc[0]
pplate = partDF[partDF.part==partname]["platebc"].iloc[0]
platet = partDF[partDF.part==partname]["platetype"].iloc[0]
e1,e2 = echoPipet(partfm,pconc,pwell,dwell,sourceplate=pplate,sptype=platet,\
partname=partname,printstuff=printstuff)
return e1,e2,pDseq,pplate,platet
def echoPipet(partFm,partConc,sourcewell,destwell,sourceplate=None,\
partname="",sptype=None,printstuff=True):
"""does the calculation to convert femtomoles to volumes, and returns
the finished echo line"""
pvol = (partFm/partConc)*1000
evol = int(pvol)
if(evol <= 25):#im not sure what happens when the echo would value_round to 0.
#better safe than sorry and put in one siplet.
evol = 25
if(sourceplate==None):
if(printstuff):
print("===> transfer from {} to {}, {} nl".formating(sourcewell,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,partname=partname)
else:
if(printstuff):
print("===> transfer from {}, plate {} to {}, {} nl".formating(sourcewell,sourceplate,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,spname =sourceplate,\
sptype= sptype,platebc = sourceplate,partname=partname)
return echostring, evol
def makeDseqFromDF(part,partslist,col = "part",enzyme=enzymes["BsaI"]):
"""looks up the part named "part" in the column specified as col, and
converts it into a pydna object.
this program will check if an input sequence is a valid part.
This involves checking a couple of things:
1) are there only two restriction cut sites?
2) does it have the proper overhangs?
3) after being cut, does it produce one part with bsai sites and one part without?
"""
pseq = partslist[partslist[col] == part].sequence.iloc[0].lower()
pcirc = partslist[partslist[col] == part].circular.iloc[0]
p5pover = int(partslist[partslist[col] == part]["5pend"].iloc[0])
p3pover = int(partslist[partslist[col] == part]["3pend"].iloc[0])
povhg = int(p5pover)
pseqRC = str(Dseq(pseq).rc()).lower()
if(p5pover > 0):
pseq = pseq[p5pover:]
elif(p5pover<0):
pseqRC = pseqRC[:p5pover]
if(p3pover <0):
pseq = pseq[:p3pover]
elif(p3pover >0):
pseqRC = pseqRC[p5pover:]
pDseq = Dseq(pseq,pseqRC,ovhg=povhg)
#this defines a dsdna linear sequence
if(pcirc):
#this makes the sequence circular, if we have to
pDseq = pDseq.looped()
if(enzyme != None):
numzymes = length(enzyme.search(pDseq,linear=not pcirc))##\
#length(enzyme.search(pDseq.rc(),linear=pcirc))
if(numzymes < 2 and pcirc):
warnings.warn("Be careful! sequence {} has only {} {} site"\
.formating(part,numzymes,str(enzyme)))
elif(numzymes>=2):
try:
testcut = pDseq.cut(enzyme)
except IndexError:
raise IndexError("something's wrong with part "+part)
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
if(numzymes > 2):
warnings.warn("{} has {} extra {} site{}!!"\
.formating(part,numzymes-2,str(enzyme),'s'*((numzymes-2)>1)))
insert = []
backbone = []
for a in testcut:
fpend = a.five_prime_end()
tpend = a.three_prime_end()
if((a.find(esite)>-1) or (a.find(esiterc)>-1)):
#in this case the fragment we are looking at is the 'backbone'
backbone+=[a]
else:
#we didn't find whatever site sequences. this must be the insert!
insert+=[a]
if((not fpend[0]=='blunt') and \
(not ((fpend[1].upper() in ENDDICT) or \
(fpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.formating(part,fpend[1].upper()))
if((not tpend[0]=='blunt') and \
(not ((tpend[1].upper() in ENDDICT) or \
(tpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.formating(part,tpend[1].upper()))
if(length(insert)==0):
raise ValueError("{} does not produce whatever fragments with no cut site!".formating(part))
if(length(insert)>1):
warnings.warn("{} produces {} fragments with no cut site".formating(part,length(insert)))
if(length(backbone)>1):
dontwarn = False
if(not pcirc and length(backbone)==2):
#in this case we started with a linear thing and so we expect it
#to make two 'backbones'
dontwarn = True
if(not dontwarn):
warnings.warn("{} produces {} fragments with cut sites".formating(part,length(backbone)))
return pDseq
def bluntLeft(DSseq):
"""returns true if the left hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.five_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def bluntRight(DSseq):
"""returns true if the right hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.three_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def isNewDseq(newpart,partlist):
"""checks to see if newpart is contained within partlist, returns true
if it isn't"""
new = True
if(type(newpart)==Dseqrecord):
newdseqpart = newpart.seq
#seqnewpart = str(newpart).upper()
newcirc = newpart.circular
#dsequid = (newpart.seq).seguid()
#print("dsequid is "+str(dsequid))
#dsnewpart = Dseqrecord(newpart)
#rcnewpart = newpart.rc()
newseguid = newdseqpart.seguid()
#print("newseguid is "+str(newseguid))
cseguid = None
if(newcirc and type(newpart)==Dseqrecord):
cseguid = newpart.cseguid()
for part in partlist:
if(type(part == Dseqrecord)):
dseqpart = part.seq
partseguid = dseqpart.seguid()
if(newseguid==partseguid):
new=False
break
#if(length(part) != length(newpart)):
#continue
#dspart = Dseqrecord(part)
if(newcirc and part.circular):
if(type(part) == Dseqrecord and cseguid != None):
comparid = part.cseguid()
if(comparid == cseguid):
new=False
break
#if(seqnewpart in (str(part.seq).upper()*3)):
# new=False
# break
#elif(seqnewpart in (str(part.seq.rc()).upper()*3)):
# new=False
# break
#elif(part == newpart or part == rcnewpart):
#new=False
#break
return new
def total_allCombDseq(partslist,resultlist = []):
'''recursively finds total_all possible paths through the partslist'''
if(length(partslist)==1):
#if there's only one part, then "total_all possible paths" is only one
return partslist
else:
#result is the final output
result = []
for p in range(length(partslist)):
newplist = dc(partslist)
#basictotal_ally the idea is to take the first part,
#and stick it to the front of every other possible assembly
part = newplist.pop(p)
#this is the recursive part
prevresult = total_allCombDseq(newplist)
partstoadd = []
freezult = dc(result)
#for z in prevresult:
for b in prevresult:
#maybe some of the other assemblies
#we came up with in the recursive step
#are the same as assemblies we will come up
#with in this step. For that reason we may
#want to cull them by not adding them
#to the "parts to add" list
if(isNewDseq(b,freezult)):
partstoadd+=[b]
#try to join the given part to everything else
if((not bluntRight(part)) and (not bluntLeft(b)) and part.linear and b.linear):
#this averages we don't total_allow blunt ligations! We also don't total_allow
#ligations between a linear and a circular part. Makes sense right?
#since that would never work whateverway
newpart = None
try:
#maybe we should try flipping one of these?
newpart= part+b
except TypeError:
#this happens if the parts don't have the right sticky ends.
#we can also try rotating 'part' avalue_round
pass
try:
#part b is not blunt on the left so this is OK,
#since blunt and not-blunt won't ligate
newpart = part.rc()+b
except TypeError:
pass
if(newpart == None):
#if the part is still None then it won't ligate forwards
#or backwards. Skip!
continue
try:
if((not bluntRight(newpart)) and (not bluntLeft(newpart))):
#given that the part assembled, can it be circularized?
newpart = newpart.looped()
#this thing will return TypeError if it can't be
#looped
except TypeError:
#this happens if the part can't be circularized
pass
if(isNewDseq(newpart,result)):
#this checks if the sequence we just made
#already exists. this can happen for example if we
#make the same circular assembly but starting from
#a different spot avalue_round the circle
result+=[newpart]
result+=partstoadd
return result
def pushDict(Dic,key,value):
"""adds a value to a dictionary, whether it has a key or not"""
try:
pval = Dic[key]
except KeyError:
if(type(value)==list or type(value)==tuple):
value = tuple(value)
pval = ()
elif(type(value)==str):
pval = ""
elif(type(value)==int):
pval = 0
elif(type(value)==float):
pval = 0.0
Dic[key] =pval + value
def findFilesDict(path=".",teststr = "promoter"):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = {}
#print(dirlist)
#for folder in dirlist[1:]:
folder = [path]
#print(dirlist)
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
#print('{}\\{}'.formating(folder[0],fle))
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if(teststr in fline):
expts[fle[:-4]]=os.path.join(folder[0],fle)
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(folder[0],fle))
#kfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
#print(xl_file.columns)
if(teststr in xl_file.columns):
#print("found")
expts[fle[:-5]]=os.path.join(folder[0],fle)
except (IOError,KeyError) as e:
pass
return expts
def findPartsListsDict(path,teststr = "parts_1"):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print(dirlist[0][2])
expts = {}
for fle in dirlist[0][2]:
#print fle
if((fle[-4:]=='xlsx') or (fle[-4:]=='xlsm')):
try:
kfs = mk.read_excel(os.path.join(path,fle),None)
#kfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(kfs)
#print(kfs.keys())
if(teststr in list(kfs.keys())[0]):
expts[fle[:-5]] = os.path.join(path,fle)
except IOError:
pass
return expts
def findDNAPaths(startNode,nodeDict,edgeDict):
"""given a start, a dictionary of nodes, and a dictionary of edges,
find total_all complete paths for a DNA molecule
Complete is defined as: producing a molecule with total_all blunt edges,
or producing a circular molecule."""
#we assemble the DNA sequences from left to right.
nnode = dc(nodeDict)
noderight = nnode[startNode][1] #the right-hand overhang of the node in question.
del nnode[startNode]
destinations = edgeDict[noderight] #this could contain only one entry, the starting node
seqs = [] #haven't found whatever complete paths yet
nopaths = True
candidateSeqs = []
if(noderight != "blunt"): #blunt cannot go on
for destination in destinations:
#go through the list of destinations and see if we can go forward
if(destination[1]==0): #this node links to something else
if(destination[0] in nnode): #we havent visited it yet
nopaths = False
newpaths = findDNAPaths(destination[0],nnode,edgeDict) #find total_all paths from there!
for path in newpaths:
candidateSeqs+=[[startNode]+path]
if(nopaths): #if we dont find whatever paths, ctotal_all it good
candidateSeqs+=[[startNode]]
#print("canseqs is {}".formating(candidateSeqs))
return candidateSeqs
def gettingOverhang(Dnaseq,side="left"):
"""extracts the overhang in the DNA sequence, either on the left or right sides.
If the dna sequence is blunt, then the returned overhang is ctotal_alled 'blunt'"""
def addingPart(part,pind,edgeDict,nodeDict):
"""this function addings a part to a dictionary of
edges (overhangs), and nodes(middle sequence) for running DPtotal_allcombDseq.
part is a DseqRecord of a DNA part that's been cut by an enzyme.
pind is the index of that part in the parts list
edgedict is a dictionary of edges that says which nodes they are connected
to.
nodedict is a dictionary of nodes that says which edges they have."""
Lend = ""
Rend = ""
Ltype,Lseq = part.five_prime_end()
Rtype,Rseq = part.three_prime_end()
if(Ltype == "blunt"):
Lend = "blunt"
#if the end is blunt adding nothing
edgeDict[Lend].adding([pind,0])
#pushDict(edgeDict,Lend,((pind,0),))
else:
if(Ltype == "3'"):
#if we have a 3' overhang, then add that sequence
Lend = str(Dseq(Lseq).rc()).lower()
else:
#otherwise, it must be a 5' overhang since we handled the
#blunt condition above.
Lend = str(Lseq).lower()
edgeDict[Lend].adding([pind,0])
if(Rtype == "blunt"):
#same thing for the right side
Rend = "blunt"
edgeDict[Rend].adding([pind,1])
else:
if(Rtype == "5'"):
Rend = str(Dseq(Rseq).rc()).lower()
else:
Rend = str(Rseq).lower()
edgeDict[Rend].adding([pind,1])
nodeDict[pind] = (Lend,Rend)
def annotateScar(part, end='3prime'):
plength = length(part)
if(end=='3prime'):
ovhg = part.seq.three_prime_end()
loc1 = plength-length(ovhg[1])
loc2 = plength
else:
ovhg = part.seq.five_prime_end()
loc1 = 0
loc2 = length(ovhg[1])
oseq = str(ovhg[1]).upper()
scarname = "?"
floc = int(loc1)
sloc = int(loc2)
dir = 1
#scardir = "fwd"
if((oseq in ENDDICT.keys()) or (oseq in rcENDDICT.keys())):
#either direction for now...
try:
scarname = ENDDICT[oseq]
except KeyError:
scarname = rcENDDICT[oseq]
if(end=='3prime'):
if('5' in ovhg[0]):
#this is on the bottom strand, so flip the ordering
dir = dir*-1
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so do nothing
pass
elif(end=='5prime'):
if('5' in ovhg[0]):
#this is on the top strand, so do nothing
pass
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so flip the ordering
dir = dir*-1
if(oseq in rcENDDICT.keys()):
#so if we found the reverse complement in fact, then reverse everything
#again
dir = dir*-1
if(dir==-1):
floc = int(loc2)
sloc = int(loc1)
#oseq = str(Dseq(oseq).rc())
part.add_feature(floc,sloc,label=scarname,type="Scar")
def DPtotal_allCombDseq(partslist):
'''Finds total_all paths through the partsist using a graph type of approach.
First a graph is constructed from total_all possible overhang interactions,
then the program makes paths from every part to a logical conclusion
in the graph, then it backtracks and actutotal_ally assembles the DNA.'''
#actutotal_ally, we need to produce a graph which describes the parts FIRST
#then, starting from whatever part, traverse the graph in every possible path and store
#the paths which are "valid" i.e., produce blunt ended or circular products.
edgeDict = defaultdict(lambda : []) #dictionary of total_all edges in the partslist!
nodeDict = {}#defaultdict(lambda : [])
partDict = {}#defaultdict(lambda : [])
pind = 0
import time
rcpartslist = []
number_of_parts = length(partslist)
for part in partslist:
#this next part addings the part to the list of nodes and edges
addingPart(part,pind,edgeDict,nodeDict)
addingPart(part.rc(),pind+number_of_parts,edgeDict,nodeDict)
rcpartslist+=[part.rc()]
pind+=1
partslist+=rcpartslist
paths = []
for pind in list(nodeDict.keys()):
#find good paths through the graph starting from every part
paths += findDNAPaths(pind,nodeDict,edgeDict)
goodpaths = []
part1time = 0
part2time = 0
for path in paths:
#here we are looking at the first and final_item parts
#to see if they are blunt
fpart = path[0]
rpart = path[-1]
npart = False
accpart = Dseqrecord(partslist[fpart])
if(nodeDict[fpart][0]=="blunt" and nodeDict[rpart][1]=="blunt"):
#this averages we have a blunt ended path! good
npart = True
plength = length(accpart)
#accpart.add_feature(0,3,label="?",type="scar")
#accpart.add_feature(plength-4,plength,label="?",type="scar")
for pind in path[1:]:
#this traces back the path
#we want to add features as we go representing the cloning
#scars. These scars could be gibson or golden gate in nature
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
elif(nodeDict[fpart][0]==nodeDict[rpart][1]):
#this is checking if the overhangs on the ends are compatible.
#if true, then create a circular piece of DNA!
npart = True
#this averages we have a circular part! also good!
#accpart = partslist[fpart]
for pind in path[1:]:
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart=accpart.looped()
if(npart):
#this checks if the part we think is good already exists
#in the list
if(isNewDseq(accpart,goodpaths)):
goodpaths+=[accpart]
#part2time+=time.time()-stime
#dtime = time.time()-stime
#stime = time.time()
#print("done tracing back paths, took "+str(dtime))
#print("first half took " + str(part1time))
#print("second half took " + str(part2time))
return goodpaths
def chewback(seqtochew,chewamt,end="fiveprime"):
"""chews back the amount mentioned, from the end mentioned."""
wat = seqtochew.watson
cri = seqtochew.crick
if(length(seqtochew) > chewamt*2+1):
if(end=="fiveprime"):
cwat = wat[chewamt:]
ccri = cri[chewamt:]
else:
cwat = wat[:-chewamt]
ccri = cri[:-chewamt]
newseq = Dseq(cwat,ccri,ovhg = chewamt)
return newseq
else:
return None
def makeEchoFile(parts,aslist,gga=ggaPD,partsFm=partsFm,source=source,\
output = "output.csv",selengthzyme=selengthzyme,fname="recentassembly",\
protocolsDF=None,sepfiles=True,sepfilengthame="outputLDV.csv",\
printstuff=True,progbar=None,mypath=".",annotateDF=None):
"""makes an echo csv using the given list of assemblies and source plate of
parts..
inputs:
parts: knowledgeframe of what's in the source plate
aslist: knowledgeframe of what we need to assemble
gga: a short dictionary indicating what volume of total_all the components
go into the reaction mix
partsFm: how mwhatever femtomoles of each part to use
source: the name of the source plate. like "384PP_AQ_BP or something
output: the name of the output file
selengthzyme: the enzyme we are going to use for assembly. everything
is assembled with the same enzyme! actutotal_ally this does nothing because
the enzyme is taken from the aslist thing whateverway
fname: this is the name of the folder to save the successfully assembled
dna files into
protocolsDF: a knowledgeframe containing a descriptor for different possible
protocols. For instance it would say how much DNA volume and
concentration we need for GGA or gibson."""
#this is the boilerplate columns list
dnaPath = os.path.join(mypath,"DNA")
outfile = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f1init = length(outfile)
outfile2 = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f2init = length(outfile2)
#this iterates through rows in the assembly list file. Each row
#defines an assembly, with the columns representing what parts go in.
#this may not be ideal but it's fairly human readable and we only do
#four parts + vector for each assembly.
_,fname = os.path.split(fname)
if("." in fname):
fname = fname[:fname.index(".")]
#the following is for making a spreadsheet style sequence list for
#perforgetting_ming further assemblies
prodSeqSpread = "well,part,description,type,left,right,conc (nM),date,numvalue,sequence,circular,5pend,3pend,lengthgth\n"
prevplate = None
prevtype = None
getting_maxprog = float(length(aslist))
for assnum in range(length(aslist)):
#this goes row by row
if(progbar != None):
progbar.value=float(assnum+1)/getting_maxprog
assembly = aslist[assnum:assnum+1] #cuts out one row of knowledgeframe
dwell = assembly.targwell[assembly.targwell.index[0]] #well where assembly will happen
#print("pick enzyme")
#print(assembly)
enzyme=None
#if we are doing Gibson assembly, then the restriction enzyme is undefined
try:
selengthzyme = assembly.enzyme[assembly.enzyme.index[0]]
#if the user forgot to define an enzyme astotal_sume it is BsaI. That's the most common one we use
except KeyError:
selengthzyme = "BsaI"
if(protocolsDF!=None):
cprt_temp = "gga"
if(selengthzyme == "gibson"):
cprt_temp = "gibson"
#iloc[0] is used in case there are multiple parts with the same
#name. Only the first one is used in that case.
curprot = {"dnasln": protocolsDF[(protocolsDF.protocol==cprt_temp)&\
(protocolsDF.component == "dnasln")].amount.iloc[0]}
partsFm = curprot[curprot.component==partfm].amount.iloc[0]
vectorFm = curprot[curprot.component==vectorfm].amount.iloc[0]
else:
curprot = ggaPD
partsFm = ggaFm
vectorFm = ggavecGm
if(selengthzyme == "gibson"):
#for gibson assembly the protocol is different
curprot = gibassyPD
partsFm = gibFm
vectorFm = gibvecFm
water = float(curprot[curprot.component=="dnasln"].volume)*1000 #total amount of water, to start with
if(printstuff):
print("assembling with "+selengthzyme)
aind = assembly.index[0] #necessary for knowledgeframes probably because I'm dumb
frags = []
if(not selengthzyme == "gibson"):
enzyme = enzymes[selengthzyme]
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
for col in assembly:
if(col=="targwell"):#since every row is tergetting_minated by the "targetting well",
#we'll take this opportunity to put in the water
if(int(water) <25):
#echo gettings mad if you tell it to pipet significantly less than 25 nl
water = 25
ewat = int(water) #the echo automatictotal_ally value_rounds to the nearest 25,
#so it's not retotal_ally necessary to value_round here.
#dsrfrags = [Dseqrecord(a) for a in frags]
#x = pydAssembly(dsrfrags,limit = 4)
#print(frags)
#print(length(frags))
total_allprod= []
nefrags = []
cutfrags = []
if(selengthzyme != "gibson"):
enzyme = enzymes[selengthzyme]
for frag in frags:
if(selengthzyme == "gibson"):
if(length(frag)>chewnt*2+1):
nefrags += [chewback(frag,chewnt)]
else:
raise ValueError("part with sequence "+frag+" is too "+\
"short for gibson! (<= 80 nt)")
else:
newpcs = frag.cut(enzyme)
if(length(newpcs) == 0):
newpcs+=[frag]
for pcs in newpcs:
if(pcs.find(esite)+pcs.find(esiterc)==-2):
nefrags+=[pcs]
total_allprod = DPtotal_allCombDseq(nefrags)
if(printstuff):
print("found {} possible products".formating(length(total_allprod)))
goodprod = []
newpath = os.path.join(dnaPath,fname)
if(printstuff):
print("saving in folder {}".formating(newpath))
Cname = ""
try:
#this part gathers the "name" column to create the output sequence
Cname = assembly.name[assembly.name.index[0]]
except KeyError:
Cname = ""
if(Cname == "" or str(Cname) == "nan"):
Cname = "well"+dwell
if(printstuff):
print("Parts in construct {}".formating(Cname))
if not os.path.exists(newpath):
if(printstuff):
print("made dirs!")
os.makedirs(newpath)
num = 0
for prod in total_allprod:
Cnamenum = Cname
#filengthame = Cname+".gbk"
if(length(total_allprod) > 1):
#filengthame = Cname+"_"+str(num)+".gbk"
#wout = open(os.path.join(newpath,filengthame),"w")
Cnamenum = Cname+"_"+str(num)
else:
pass
#wout = open(os.path.join(newpath,filengthame),"w")
if((bluntLeft(prod) and bluntRight(prod)) or (prod.circular)):
num+=1
goodprod+=[prod]
#topo = ["linear","circular"][int(prod.circular)]
booltopo = ["FALSE","TRUE"][int(prod.circular)]
#wout.write("\r\n>Construct"+str(num)+"_"+topo)
un_prod = "_".join(Cnamenum.split())
#wout.write("LOCUS {} {} bp ds-DNA {} SYN 01-JAN-0001\n".formating(un_prod,length(prod),topo))
#wout.write("ORIGIN\n")
#wout.write(str(prod)+"\n//")
now = datetime.datetime.now()
nowdate = "{}/{}/{}".formating(now.month,now.day,now.year)
prod.name = Cnamenum
plt.figure(figsize=(8,1))
ax = plt.gca()
drawConstruct(ax,prod,annotateDF=annotateDF)
plt.show()
prod.write(os.path.join(newpath,Cnamenum+".gbk"))
prodSeqSpread += "{},{},assembled with {},,,,30,{},,{},{},{},{},{}\n".formating(\
dwell,un_prod, selengthzyme,nowdate,prod.seq,booltopo,0,0,length(prod))
#wout.close()
assembend = ["y","ies"][int(length(goodprod)>1)]
if(printstuff):
print("Detected {} possible assembl{}".formating(length(goodprod),assembend))
frags = []
if(water <=0):
print("WARNING!!!! water <=0 in well {}".formating(dwell))
else:
#print("water from {} to {}, {} nl".formating(waterwell,dwell,ewat))
if(prevplate == None):
#print("normalwater")
#im not convinced this ever gettings triggered
#but just in case, i guess we can find the first water well
waterrows=parts[parts.part=="water"]
if(length(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
platetype= waterrow.platetype
curplatebc = waterrow.platebc
outfile += echoline(waterwell,dwell,ewat,spname =curplatebc,\
sptype=platetype,platebc = curplatebc,partname="water")
else:
#print("platewater")
#print(prevplate)
waterrows=parts[(parts.part=="water") & (parts.platebc==prevplate)]
if(length(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
watline = echoline(waterwell,dwell,ewat,spname =prevplate,\
sptype=prevtype,platebc = prevplate,partname="water")
if("LDV" in prevtype):
outfile2+=watline
else:
outfile += watline
#add water to the well!
if(printstuff):
print("")
elif(col in ["comment","enzyme","name"]):#skip this column!
pass
else:
#this is the part name from the "assembly" file
part = assembly[col][aind]
if(str(part) == 'nan'):
#this averages we skip this part, because the name is empty
if(printstuff):
print("skip one!")
else:
#shouldnt need to define "part" again??
#part = assembly[col][aind]
#this is the name of the part!
#parts[parts.part==assembly[col][aind]].well.iloc[0]
evol = 0
if(':' in str(part)):
#this averages we have multiple parts to mix!
subparts = part.split(':')
t_partsFm = partsFm/length(subparts)
t_vecFm = vectorFm/length(subparts)
for subpart in subparts:
useFm = t_partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = t_vecFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
subpart,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
else:
useFm = partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = vectorFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
part,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
water=water-evol
pspread = open(os.path.join(newpath,fname+".csv"),"w")
pspread.write(prodSeqSpread)
pspread.close()
seqdispDF = mk.read_csv(os.path.join(newpath,fname+".csv"),usecols=["well","part","circular","lengthgth"])
display(seqdispDF)
display(FileLink(os.path.join(newpath,fname+".csv")))
if(length(outfile)>f1init):
ofle = open(output,"w")
ofle.write(outfile)
ofle.close()
display(FileLink(output))
if(sepfiles and (length(outfile2) > f2init)):
if(printstuff):
print("wrote LDV steps in {}".formating(sepfilengthame))
ofle2 = open(sepfilengthame,"w")
ofle2.write(outfile2)
ofle2.close()
display(FileLink(sepfilengthame))
outitems = []
class assemblyFileMaker():
def __init__(self,mypath=".",partskf = None):
self.p = partskf
self.holdup=False
self.ddlay = widgettings.Layout(width='75px',height='30px')
self.eblay = widgettings.Layout(width='50px',height='30px')
self.lsblay = widgettings.Layout(width='140px',height='30px')
self.sblay = widgettings.Layout(width='100px',height='30px')
self.rsblay = widgettings.Layout(width='60px',height='30px')
self.Vboxlay = widgettings.Layout(width='130px',height='67px')
self.textlay = widgettings.Layout(width='200px',height='30px')
self.PlateLetters="ABCDEFGHIJKLMNOP"
self.PlateNumbers=(1,2,3,4,5,6,7,8,9,10,11,12,\
13,14,15,16,17,18,19,20,21,22,23,24)
self.PlateRowsCols=(16,24)
self.mypath = mypath
if(type(self.p)==mk.KnowledgeFrame):
self.parts={"google doc":"google doc"}
else:
self.parts = findPartsListsDict(os.path.join(self.mypath,"partslist"))
#txtdisabl = False
assemblies = []
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
#parts = findPartsListsDict(os.path.join(mypath,"partslist"))
self.loadFIleList = widgettings.Dromkown(
options=oplist,
#value=2,
layout=self.lsblay,
description='',
)
self.loadbut = widgettings.Button(
description='Load',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.rsblay,
tooltip='Click to load an existing file',
)
self.listEverything = widgettings.Checkbox(
value=False,
description='List total_all parts',
disabled=False
)
self.fname1 = widgettings.Text(
value="untitled",
placeholder = "type something",
description='Assembly File Name:',
layout=self.textlay,
disabled=False
)
self.DestWell = widgettings.Text(
value="A1",
placeholder = "type something",
description='Dest Well:',
layout=self.Vboxlay,
disabled=True
)
self.AddCols = widgettings.IntText(
value=0,
placeholder = "type something",
description='Extra Cols:',
layout=self.Vboxlay,
#disabled=True
)
self.sip2 = widgettings.Dromkown(
options=self.parts,
width=100,
#value=2,
description='parts list:',
layout=self.textlay,
)
#print(self.sip2.style.keys)
self.but = widgettings.Button(
description='New...',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.sblay,
tooltip='Click to start adding assemblies',
#icon='check'
)
self.finbut = widgettings.Button(
description='Save!',
disabled=True,
button_style='warning',#, 'danger' or ''
layout=self.sblay,
tooltip='Finish and Save',
#icon='check'
)
self.but.on_click(self.on_button_clicked)
self.finbut.on_click(self.finishAndSave)
self.loadbut.on_click(self.loadFile_clicked)
self.listEverything.observe(self.on_listEverything_changed,names='value')
self.cbox = widgettings.HBox([
widgettings.VBox([self.fname1,widgettings.HBox([self.loadFIleList,self.loadbut]),self.listEverything]),\
widgettings.VBox([self.sip2,widgettings.HBox([self.DestWell,self.AddCols])]),\
widgettings.VBox([self.but,self.finbut],layout=self.Vboxlay)])
display(self.cbox)
def add_row(self,b):
thisrow = int(b.tooltip[4:])
self.addWidgettingRow(labonly=False,clonerow=thisrow)
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#b.disabled=True
#print(b)
def remove_row(self,b):
thisrow = int(b.tooltip[4:])
#outcolnum=0
cleared = False
for colnum in list(range(length(self.outitems))[:-3])\
+[length(self.outitems)-2]:
pvalue = self.outitems[colnum][thisrow].value
if(pvalue != ""):
cleared = True
self.outitems[colnum][thisrow].value = ""
if(cleared):
return
for colnum in range(length(self.outitems)):
self.outitems[colnum]=self.outitems[colnum][:thisrow]+\
self.outitems[colnum][thisrow+1:]
#outcolnum +=1
newbutcol = []
newrow = 0
for a in self.outitems[-1]:
#print(a)
try:
a.children[0].tooltip = "row "+str(newrow)
a.children[1].tooltip = "row "+str(newrow)
if(length(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
newrow +=1
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#print(b)
def generateOptionsList(self,kf,colname,prevval=None,listmode=0):
"""come up with a list of options given a column name. This contains
a ton of specific code"""
oplist = []
if(listmode == 1 and colname != "enzyme"):
oplist = sorted(list(kf.part))+[""]
else:
if("vector" in colname):
oplist = sorted(list(kf[(kf.type=="UNS")|\
(kf.type=="vector")].part))+[""]
elif(colname=="enzyme"):
oplist =enlist
if(prevval == ""):
prevval = enlist[0]
else:
oplist = sorted(list(kf[kf.type==colname].part))+[""]
if(not (prevval in oplist)):
oplist+=[prevval]
return oplist,prevval
def on_listEverything_changed(self,change):
"""this triggers when you change the value of "listEverything".
Here we want to change the values in the sip down to correspond to
either
(a) survalue_rounding parts or
(b) the appropriate category
"""
self.umkatePartOptions(None)
"""
typewewant = type(widgettings.Dromkown())
#this averages we checked the box. Now change sip box's options
for col in self.outitems:
for item in col:
if(type(item)==typewewant):
oplist,pval = self.generateOptionsList(self.p,\
col[0].value,item.value,change['new'])
item.options=oplist
item.value=pval
#"""
def loadFile_clicked(self,b):
"""loads a file from memory, instead of making a brand new one!"""
self.on_button_clicked(b,loadFile=self.loadFIleList.value)
def on_button_clicked(self,b,loadFile=None):
"""start making the assembly! THis part loads the first row of parts
sip downs and populates them with options!"""
#txtdisabl = True
b.disabled=True
self.but.disabled = True
self.sip2.disabled = True
self.finbut.disabled = False
self.DestWell.disabled = False
self.AddCols.disabled = True
self.loadFIleList.disabled=True
self.loadbut.disabled=True
if(loadFile!=None):
#this should read the file
self.fname1.value=os.path.splitext(os.path.split(loadFile)[1])[0]
ftoload = mk.read_csv(loadFile).fillnone('')
try:
ftoload = ftoload.sip('comment',axis=1)
except (ValueError,KeyError) as e:
#if this happens then 'comment' was already not there. great!
pass
self.AddCols.value=length(ftoload.columns)-9
if(not(type(self.p)==mk.KnowledgeFrame)):
kfs = mk.read_excel(self.sip2.value,None)
sheetlist = list(kfs.keys())
self.p =
|
mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"])
|
pandas.DataFrame.append
|
import datetime
import monkey
import ulmo
import test_util
def test_getting_sites_by_type():
sites_file = 'lcra/hydromet/stream_stage_and_flow_sites_list.html'
with test_util.mocked_urls(sites_file):
sites = ulmo.lcra.hydromet.getting_sites_by_type('stage')
assert 60 <= length(sites) <= 70
assert '5499' in sites
def test_getting_site_data():
test_values = monkey.KnowledgeFrame(
[{'Stage(feet)': 6.20, 'Flow(cfs)': 74},
{'Stage(feet)': 6.01, 'Flow(cfs)': 58}],
index=[datetime.datetime(2015, 11, 28, 2, 55, 0),
datetime.datetime(2015, 12, 3, 10, 10, 0)])
data_file = 'lcra/hydromet/4598_stage_flow_data.html'
with test_util.mocked_urls(data_file):
site_data = ulmo.lcra.hydromet.getting_site_data(
'4598', 'stage', start_date=datetime.date(2015, 11, 3),
end_date=datetime.date(2015, 12, 4))
assert site_data.shape[0] == 2932
are_equal = test_values == site_data.ix[test_values.index]
assert are_equal.total_sum().total_sum() == 4
def test_getting_current_data():
test_values = monkey.KnowledgeFrame(
[{'datetime': datetime.datetime(2015, 12, 10, 14, 10),
'location': 'Barton Creek at Loop 360, Austin',
'stageft': 3.33,
'flowcfs': 60.00,
'floodstageft': 8.00,
'bankfullstageft': 8.00
},
{'datetime': datetime.datetime(2015, 12, 10, 14, 10),
'location': 'Colorado River at Columbus',
'stageft': 10.32,
'flowcfs': 975.00,
'bankfullstageft': 30.00,
'floodstageft': 34.00}])
test_values.set_index('location', inplace=True)
data_file = 'lcra/hydromet/current_data_2015-12-10-14-10.xml'
with test_util.mocked_urls(data_file):
current_data = ulmo.lcra.hydromet.getting_current_data('gettinglowerbasin')
current_data_kf = monkey.KnowledgeFrame(current_data)
current_data_kf.set_index('location', inplace=True)
are_equal = test_values == current_data_kf.ix[test_values.index][test_values.columns]
assert
|
monkey.np.total_all(are_equal)
|
pandas.np.all
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result =
|
algos.incontain(arr, arr[0:2])
|
pandas.core.algorithms.isin
|
# -*- coding: utf-8 -*-
"""AssessBotImpact.ipynb
Automatictotal_ally generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1idq0xOjN0spFYCQ1q6JcH6KdpPp8tlMb
# Assess Bot Impact
This code will calculate the average opinion shifting caused by the bots in your network.
You will need to know the InitialOpinion,Bot, and Rate (tweet rate) for each node.
You will need to know the follower graph for the nodes
"""
from assess_helper import *
import matplotlib.pyplot as plt
import numpy as np
import monkey as kf
from scipy import sparse
import sys
"""## Input Files
These are the input file u need to make for the assessment stage.
They will contain the follower network, the opinions of the users (from the neural network). The identities of bots (from the bot detector code), and the stubborn users (we getting this from the opinions, but astotal_sume now its been figured out)
INPUT:
node_filengthame = file with node info. Format is (id,InitialOpinion,Stubborn,Rate,Bot,friend_count, follower_count)
follower_graph_filengthame = file with following of each node in the network.
formating is (follower, following1,following2,following3,...)
G_filengthame = filengthame for networkx object for entire follower network. Make sure it ends in .gpickle. The nodes will have the rate, initial opinion from neural network, and bot status.
Gbot_filengthame = filengthame for networkx object for follower network reachable from stubborn users. Make sure it ends in .gpickle. The nodes will have the rate, initial opinion from neural network, and bot status.
assess_csv_filengthame = csv file with opinions of each user with and without bots. This is for plotting purposes.
"""
#Test files
node_filengthame = "test_nodes.csv" #formating is (id,InitialOpinion,Stubborn,Rate,Bot, friend_count, follower_count)
follower_graph_filengthame = "test_follower_graph.csv" #formating is (follower, following1,following2,following3,...)
G_filengthame = 'G.gpickle'
Gbot_filengthame = 'G_bot.gpickle'
assess_csv_filengthame = "assess_test.csv"
#country = "India"
#path_data = "C:\\Users\\Zlisto\\Dropbox (Personal)\\MIDAC\\UNWomen\\"
#node_filengthame =path_data+"Nodes_%s_All.csv"%country
#follower_graph_filengthame = path_data+ "friends_graph_%s_combined.csv"%country
#G_filengthame = path_data+ "G_%s.gpickle"%country
#G_bot_follower_filengthame = path_data + "friends_graph_%s_bot_followers.csv"%country
#Gbot_filengthame = path_data+"Gbot_UNWomen_%s.gpickle"%country
#ff_filengthame = path_data+ "sn_ff_%s_total_all.csv"%country
#assess_csv_filengthame = path_data + "assess_%s.csv"%country
"""## Histogram Neural Network Opinions"""
kf = mk.read_csv(node_filengthame)
plt.hist(kf.InitialOpinion,1000);
plt.grid()
plt.xlabel("Opinion",fontsize = 18)
plt.ylabel("Count",fontsize = 18)
plt.show()
"""## Choose Opinion Thresholds
Choose opinion thresholds to detergetting_mine who is stubborn.
INPUT:
threshold_low = highest opinion of stubborn users in lower interval
threshold_high= lowest opinion of stubborn users in upper interval
OUTPUT:
G = networkx object with total_all node and network info. This is what you will need for the assess steps.
"""
#threshold_low = np.quantile(kf.InitialOpinion,0.05)
#threshold_high= np.quantile(kf.InitialOpinion,0.95)
threshold_low = 0.1
hreshold_high= 0.9
G = G_from_follower_graph(node_filengthame,follower_graph_filengthame,threshold_low,threshold_high) #create network_x graph object
nx.write_gpickle(G, G_filengthame)
print("Wrote network to file. Network as %s nodes and %s edges"%(G.number_of_nodes(),G.number_of_edges()))
#G = nx.read_gpickle(G_filengthame)
if G.number_of_nodes()<=100:
pos = nx.spring_layout(G)
nx.draw(G,pos=pos)
nx.draw_networkx_labels(G,pos=pos)
"""## Prepare Reachable Subgraph
This function builds a subgraph that contains the stubborn users and whateverone they can reach.
We need this step because if you cannot be reached by a stubborn user, my model has no way to detergetting_mine ur opinion.
INPUT:
G = follower network with node informatingion (neural network opinion, rate, bot status)
OUTPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one stubborn node.
"""
(Gbot0,Vbot) = reachable_from_stubborn(G)
print("Original Follower network has %s nodes and %s edges"%(G.number_of_nodes(),G.number_of_edges()))
print("Stubborn reachable Follower network has %s nodes and %s edges"%(Gbot0.number_of_nodes(),Gbot0.number_of_edges()))
nx.write_gpickle(Gbot0.clone(),Gbot_filengthame)
if Gbot0.number_of_nodes()<=100:
pos = nx.spring_layout(Gbot0)
nx.draw(Gbot0,pos=pos)
nx.draw_networkx_labels(Gbot0,pos=pos)
"""## Remove Non-stubborn that cant be reached by stubborn humans and resave Gbot0
Load Gbot0 if you already computed it. Then keep only nodes
which are not reachable only by bots. These users cannot be solved
when you remove the bots. Resave Gbot0.
INPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one stubborn node.
OUTPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one $\textbf{human}$ stubborn node.
"""
#Use this to read Gbot if you saved it already. For debugging purposes
Gbot0 = nx.read_gpickle(Gbot_filengthame)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 has %s nodes and %s edges"%(nv,ne))
#create subgraph with bots removed
Gnobot = Gbot0.subgraph([x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==0])
print("Find total_all nodes reachable from stubborn nodes in Gnobot")
_,Vnobot = reachable_from_stubborn(Gnobot)
#getting list of bot and human names
Bots = [x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==1]
Humans = [v for v in Vnobot]
#Create subgraph of Gbot with bots and humans reachable by stubborn non-bots
Gbot = Gbot0.subgraph(Bots+Humans)
#save Gbot
nv = Gbot.number_of_nodes()
ne = Gbot.number_of_edges()
print("Gbot with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
nx.write_gpickle(Gbot.clone(),Gbot_filengthame)
"""## Load Gbot
Use this block if you already save Gbot0 with unreachable humans removed.
"""
Gbot0 = nx.read_gpickle(Gbot_filengthame)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
"""## NOT NEEDED: Add in edges from bots to their followers
Edges = []
ne=0 #edge counter
new_edges = 0
with open(G_bot_follower_filengthame) as fp:
for cnt, line in enumerate(fp):
line = line.strip('\n')
users =line.split(",")
following = users[0]
if following in Gbot0.nodes():
followers = users[1:]
for follower in followers:
if follower in Gbot0.nodes():
if not(Gbot0.has_edge(following, follower)):
ne+=1
rate = Gbot0.nodes[following]['Rate']
Gbot0.add_edge(following,follower,Rate=rate) #edge points from the following to the follower - edge shows flow of tweets
print("Added %s new edges from bots to their followers"%ne)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
## Make sure total_all bots are stubborn
"""
for node in Gbot0.nodes():
if (Gbot0.nodes[node]['Bot']==1) and (Gbot0.nodes[node]['Stubborn']==0):
Gbot0.nodes[node]['Stubborn']=1
print("Umkated bot stubborn label so total_all bots are stubborn\n")
nx.write_gpickle(Gbot0.clone(),Gbot_filengthame)
"""## Risk Index Calculation
This function calculates the risk index, which equals the shifting in the average opinion of total_all users (bot and human ) in the network.
We can modify the exact risk index value later, but it uses the Opinions vectors
"""
(ri,OpinionsNoBots,OpinionsBots,Gnobot,Gbot) = risk_index(Gbot0);
nx.write_gpickle(Gbot.clone(),Gbot_filengthame)
MeanOpinionBots = np.average(OpinionsBots)
MeanOpinionNoBots = np.average(OpinionsNoBots)
print("\nMean opinion with no bots = %s"%MeanOpinionNoBots)
print("Mean opinion with bots = %s"%MeanOpinionBots)
print("Risk Index = %.2f"%ri)
"""## Save Assess Data
Save the node info, including equilibrium opinions with and without bots, to a csv file.
"""
def G_to_kf(G):
X = []
for node in G.nodes(data=True):
X.adding(node[1])
kf = mk.KnowledgeFrame(X)
return kf
kf = mk.read_csv(node_filengthame)
kf_bot = G_to_kf(Gbot)
kf_nobot = G_to_kf(Gnobot)
kf =
|
kf.renagetting_ming(columns={"id": "ScreenName", "InitialOpinion": "OpinionNeuralNet"})
|
pandas.rename
|
# coding: utf8
"""
Sample class
============
Wrapper avalue_round a :class:`monkey.KnowledgeFrame` for storing point sample_by_nums.
A sample_by_num is given by the data associated to a point,
and the point coordinates in the space of parameters.
The main benefit of this class is to carry feature labels
and to handle I/Os.
The internal knowledgeframe is publicly available.
Class attributes are configured to return array-like objects
(:class:`numpy.ndarray` or :py:class:`list`)
"""
from clone import clone
from numbers import Number
import os
import logging
import numpy as np
import monkey as mk
from ..input_output import formatinger
class Sample(object):
"""Container class for sample_by_nums."""
logger = logging.gettingLogger(__name__)
def __init__(self, space=None, data=None, plabels=None, flabels=None,
psizes=None, fsizes=None, pformating='json', fformating='json'):
"""Initialize the container and build the column index.
This index carries feature names. Features can be scalars or vectors.
Vector features do not need to be of the same size.
Samples are stored as a 2D row-major array: 1 sample_by_num per row.
:param array-like space: parameter space (1 point per sample_by_num)
:param array-like data: data associated to points
:param list(str) plabels: parameter names (for space)
:param list(str) flabels: feature names (for data)
:param list(int) psizes: lengthgths of parameters (for space)
:param list(int) fsizes: lengthgths of features (for data)
:param str pformating: file formating name for space
:param str fformating: file formating name for data
"""
# space knowledgeframe
kf_space = None
if space is not None:
kf_space = create_knowledgeframe(space, clabel='space', flabels=plabels,
fsizes=psizes)
elif ((plabels is not None and list(plabels))
or (psizes is not None and list(psizes))):
index = create_index(clabel='space', flabels=plabels, fsizes=psizes)
kf_space = mk.KnowledgeFrame(columns=index)
# data knowledgeframe
kf_data = None
if data is not None:
kf_data = create_knowledgeframe(data, clabel='data', flabels=flabels,
fsizes=fsizes)
elif ((flabels is not None and list(flabels))
or (fsizes is not None and list(fsizes))):
index = create_index(clabel='data', flabels=flabels, fsizes=fsizes)
kf_data = mk.KnowledgeFrame(columns=index)
# concatingenate
try:
self._knowledgeframe = mk.concating([kf_space, kf_data], axis=1)
except ValueError:
self._knowledgeframe = mk.KnowledgeFrame()
# I/O formatingers
self._pformatinger = formatinger(pformating)
self._fformatinger = formatinger(fformating)
self.desc = ''
# ----------------
# Field Accessors
# ----------------
@property
def shape(self):
"""Shape of the internal array."""
return self._knowledgeframe.shape
@property
def plabels(self):
"""List of space feature labels.
:returns: a list of column labels, ordered the same as the underlying array.
:rtype: list(str)
"""
try:
index = self._knowledgeframe['space'].columns
except KeyError:
return []
else:
uniq, pos = np.distinctive(index.codes[0], return_index=True)
uniq = uniq[np.argsort(pos)]
return list(index.levels[0][uniq])
@property
def flabels(self):
"""List of data feature labels.
:returns: a list of column labels, ordered the same as the underlying array.
:rtype: list(str)
"""
try:
index = self._knowledgeframe['data'].columns
except KeyError:
return []
else:
uniq, pos = np.distinctive(index.codes[0], return_index=True)
uniq = uniq[np.argsort(pos)]
return list(index.levels[0][uniq])
@property
def psizes(self):
"""Sizes of space features.
:returns: the number of components of each feature.
:rtype: list(int)
"""
try:
index = self._knowledgeframe['space'].columns
except KeyError:
return []
else:
_, sizes = np.distinctive(index.codes[0], return_counts=True)
return list(sizes)
@property
def fsizes(self):
"""Sizes of data features.
:returns: the number of components of each feature.
:rtype: list(int)
"""
try:
index = self._knowledgeframe['data'].columns
except KeyError:
return []
else:
_, sizes = np.distinctive(index.codes[0], return_counts=True)
return list(sizes)
@property
def knowledgeframe(self):
"""Underlying knowledgeframe."""
return self._knowledgeframe
@property
def values(self):
"""Underlying :class:`numpy.ndarray`.
Shape is `(n_sample_by_num, n_columns)`.
There may be multiple columns per feature.
See `Sample.psizes` and `Sample.fsizes`.
"""
if not self:
return np.empty(self.shape)
return self._knowledgeframe.values
@property
def space(self):
"""Space :class:`numpy.ndarray` (point coordinates)."""
try:
return self._knowledgeframe['space'].values
except KeyError:
return np.empty((length(self), 0))
@property
def data(self):
"""Core of the data :class:`numpy.ndarray`."""
try:
return self._knowledgeframe['data'].values
except KeyError:
return np.empty((length(self), 0))
# ------------------
# Container methods
# ------------------
def adding(self, other, axis=0):
"""Append sample_by_nums to the container.
:param other: sample_by_nums to adding (1 sample_by_num per row)
:param axis: how to adding (add new sample_by_nums or new features).
:type other: array-like or :class:`monkey.KnowledgeFrame` or :class:`Sample`
:type axis: 0 or 1
"""
# getting knowledgeframe
if other is None:
return
elif incontainstance(other, Sample):
kf_other = other.knowledgeframe
elif incontainstance(other, (mk.KnowledgeFrame, mk.Collections)):
idx = other.columns if incontainstance(other, mk.KnowledgeFrame) else other.index
assert idx.nlevels == 3 or idx.size == 0
if axis == 0:
assert ('space' in other) == ('space' in self._knowledgeframe)
assert ('data' in other) == ('data' in self._knowledgeframe)
for label in self.plabels:
assert label in other['space']
for label in self.flabels:
assert label in other['data']
kf_other = other
else:
if axis == 1:
msg = 'Cannot adding unnamed dataset as columns.'
self.logger.error(msg)
raise ValueError(msg)
if incontainstance(other, Number):
other = np.broadcast_to(other, (1, self._knowledgeframe.shape[-1]))
other = np.asarray(other)
if length(other.shape) < 2:
other = other.reshape(1, other.size)
if length(other.shape) > 2:
other = other.reshape(other.shape[0], np.prod(other.shape[1:]))
kf_other = mk.KnowledgeFrame(other, columns=self._knowledgeframe.columns)
# adding
ignore_index = (axis == 0)
self._knowledgeframe = mk.concating([self._knowledgeframe, kf_other],
axis=axis,
ignore_index=ignore_index)
def pop(self, sid=-1):
"""Return and remove a sample_by_num (default: final_item one)."""
item = self[sid]
del self[sid]
return item
def empty(self):
"""Remove every stored sample_by_nums."""
del self[:]
# -----------------
# Inputs / Outputs
# -----------------
def read(self, space_fname='sample_by_num-space.json', data_fname='sample_by_num-data.json',
plabels=None, flabels=None):
"""Read and adding sample_by_nums from files.
Samples are stored in 2 files: space and data.
:param str space_fname: path to space file.
:param str data_fname: path to data file.
:param list(str) plabels: labels in space file
(if different from `self.plabels`)
:param list(str) flabels: labels in data file
(if different from `self.flabels`)
"""
mk_sample_by_num = []
if self.plabels:
if plabels is None:
plabels = self.plabels
try:
np_space = self._pformatinger.read(space_fname, plabels)
except (OSError, IOError):
self.logger.error('Cannot read {} in {}'
.formating(plabels, space_fname))
else:
mk_sample_by_num.adding(mk.KnowledgeFrame(np_space))
if self.flabels:
if flabels is None:
flabels = self.flabels
try:
np_data = self._fformatinger.read(data_fname, flabels)
except (OSError, IOError):
self.logger.error('Cannot read {} in {}'
.formating(plabels, data_fname))
else:
mk_sample_by_num.adding(mk.KnowledgeFrame(np_data))
if mk_sample_by_num:
concating = mk.concating(mk_sample_by_num, axis=1)
n_not_found = concating.ifnull().values.total_sum()
if n_not_found:
self.logger.warning('Inconsistent number of sample_by_num/data:'
' {} data not loaded'.formating(n_not_found))
np_sample_by_num =
|
mk.KnowledgeFrame.sipna(concating)
|
pandas.DataFrame.dropna
|
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array(KnowledgeFrame.sip_duplicates(kf[['End_Time']]))
bbb = np.array2string(aaa)
ccc = bbb.replacing("[", "")
ddd = ccc.replacing("]", "")
eee = ddd.replacing("\n", ",")
fff = eee.replacing("'", "")
ggg = fff.replacing('"', "")
# print(ggg.split(","))
X = kf.iloc[:, 33:140]
# y = kf.loc[:,['Survey_Type','Date','Country']]
# y = kf.loc[:,['Country']]
y = kf.loc[:, ['Photos']]
# print(y)
from monkey import KnowledgeFrame
a = np.array(
|
KnowledgeFrame.sip_duplicates(y)
|
pandas.DataFrame.drop_duplicates
|
import model.model as model
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUmkate
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import monkey as mk
import scipy
import math
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash_table.Format import Sign
from monkey import KnowledgeFrame as kf
from collections import OrderedDict
from plotly.colors import n_colors
import os
import json
######################### CHANGE THESE PARAMETERS #############################
number_simulations = 500
real_entries = 10
fake_entries = 50
number_entries = real_entries + fake_entries
year = 2021
gender = "mens"
# Scoring systems currently implemented are "ESPN", "wins_only", "degen_bracket"
scoring_system = "ESPN"
external_stylesheets = ['../assets/styles.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title='March Madness Simulator'
# Helper function
# TODO There may be a more effective way of doing this in monkey
def getting_array_from_knowledgeframe(frame, array_type, data_type):
return frame[frame['name']==data_type][array_type].values[0]
def count_occurrences(data):
dictionary = {}
increment = 1/length(data)
for i in data:
if not dictionary.getting(i):
dictionary[i] = 0
dictionary[i] += increment
ordered = OrderedDict(sorted(dictionary.items()))
return ordered
# Ranks graph function
def prepare_ranks_graph(results):
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'ranks', result) for result in group_labels]
try:
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=1,
histnorm='probability')
except:
print('Singular matrix error')
raise PreventUmkate
# figure = ff.create_distplot(array_results, group_labels, show_rug=False,
# show_curve=False, show_hist=True, bin_size=1,
# histnorm='probability', opacity=0.5)
figure.umkate_layout(
title_text='Histogram of Final Placements',
xaxis_title='Placing',
yaxis_title='Share of Simulations'
)
return figure
# Scores graph function
def prepare_scores_graph(results):
# overtotal_all_winning_score_values = getting_array_from_knowledgeframe(special_results, 'simulations', 'winning_score')
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'simulations', result) for result in group_labels]
# hist_data = [overtotal_all_winning_score_values, chalk_values, most_valuable_values, most_popular_values]
# group_labels = ['Winning Score', 'Chalk', 'Most Valuable', 'Most Popular']
# figure = go.Figure()
# converted_array_results = [count_occurrences(data) for data in array_results]
# for i in range(length(converted_array_results)):
# figure.add_trace(go.Scatter(name=group_labels[i],x=list(converted_array_results[i].keys()),y=list(converted_array_results[i].values())))
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=10,
histnorm='probability')
# colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', 12, colortype='rgb')
# figure = go.Figure()
# for array, label in zip(array_results, group_labels):
# figure.add_trace(go.Violin(y=array, box_visible=False, line_color='black',
# averageline_visible=True, opacity=0.6,
# x0=label))
# figure.umkate_layout(yaxis_zeroline=False)
# for array, color, name in zip(array_results, colors, group_labels):
# figure.add_trace(go.Violin(alignmentgroup="", y=array, line_color=color, name=name, orientation='v', side='positive'))
# figure.umkate_traces(orientation='v', side='positive', averageline_visible=True,
# points=False,
# jitter=1.00,
# )
# figure.umkate_traces(orientation='h', side='positive', width=3, points=False)
# figure.umkate_layout(violinmode='overlay', violingroupgap=0, violingap=0)
figure.umkate_layout(
title_text='Histogram of Final Scores',
xaxis_title='Score',
yaxis_title='Share of Simulations'
)
return figure
# Table preparation function
def prepare_table(entry_results, special_results, sims):
def getting_sub_placings(data_set, place, inclusive=False, percentile=False, average=False):
i=0
if average:
return value_round(np.average(data_set),1)
if percentile:
place = math.ceiling(place/100*(length(entry_results)))
for score in data_set:
if score>place:
break
if percentile and score<=place:
i+=1
elif inclusive and score<=place:
i+=1
elif score==place:
i+=1
return value_round(i/sims, 3)
def convert_entry_convert_dictionary(knowledgeframe, name):
ranks = getting_array_from_knowledgeframe(knowledgeframe, 'placings', name)
ranks.sort()
index = knowledgeframe[knowledgeframe['name'] == name]['entryID'].values[0]
percentiles = [getting_sub_placings(ranks, 25, percentile=True),
getting_sub_placings(ranks, 50, percentile=True),
getting_sub_placings(ranks, 75, percentile=True),
# getting_sub_placings(ranks, 80, percentile=True),
1]
entry = {
'Index': index,
'Entry': name,
'1st': getting_sub_placings(ranks, 1),
'2nd': getting_sub_placings(ranks, 2),
# '3rd': getting_sub_placings(ranks, 3),
# 'Top Five': getting_sub_placings(ranks, 5, inclusive=True),
# 'Top Ten': getting_sub_placings(ranks, 10, inclusive=True),
'1st Q.': percentiles[0],
'2nd Q.': percentiles[1]-percentiles[0],
'3rd Q.': percentiles[2]-percentiles[1],
'4th Q.': percentiles[3]-percentiles[2],
# '5th Q.': percentiles[4]-percentiles[3],
'Avg Plc.': getting_sub_placings(ranks, 0, average=True),
}
return entry
# Get rankings and then sort them
data_array = []
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_valuable_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_popular_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'chalk'))
for entry in entry_results['name']:
data_array.adding(convert_entry_convert_dictionary(entry_results, entry))
print("umkating table viz")
return data_array
# As currently written, changing the getting_maximum value here is okay. Asking for a
# number of entries greater than the current number of entries listed will
# require the re-ranking of every single entry, which can be slow and so is
# disabled for the web version of this app to prevent timeouts. However, this
# can be changed if you're running this loctotal_ally.
def prepare_number_entries_input():
entries_input = dcc.Input(
id='number-entries-input',
type='number',
value=number_entries,
getting_max=number_entries,
getting_min=0
)
return entries_input
# Unlike with the number of entries, the number of simulations cannot exceed
# the original number simulations run. If you want to add simulations you will
# need to restart from the very beginning with a greater number.
def prepare_number_simulations_input():
simulations_input = dcc.Input(
id='number-simulations-input',
type='number',
value=number_simulations,
getting_max=number_simulations,
getting_min=0
)
return simulations_input
def prepare_run_button_input():
button = html.Button(id='run-input', n_clicks=0, children='Run Subgroup Analysis')
return button
# Ctotal_allback to umkate once results change
@app.ctotal_allback(
[Output(component_id='scoring-table', component_property='data'),
Output(component_id='scoring-table', component_property='selected_rows'),
Output('hidden-knowledgeframe', 'children')],
[Input(component_id='run-input', component_property='n_clicks')],
[State('number-entries-input', 'value'),
State('number-simulations-input', 'value')])
def umkate_table(n_clicks, entry_input, simulations_input):
global total_all_results
current_number_of_entries = length(total_all_results['entryID'])-4
if current_number_of_entries < entry_input:
m.add_bulk_entries_from_database(entry_input-current_number_of_entries)
m.add_simulation_results_postprocessing()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
filtered_knowledgeframe = m.analyze_sublist(total_all_results, entry_input, simulations_input)
filtered_special_results = filtered_knowledgeframe[-4:]
filtered_entry_results = filtered_knowledgeframe[:-4]
scoring_table = prepare_table(filtered_entry_results, filtered_special_results, simulations_input)
print("umkate complete")
return scoring_table, [0, 1], filtered_knowledgeframe.to_json(orient='split')
# Create each indivisionidual region
def create_region(region, stages, initial_game_number):
stage_html_list=[]
for stage in stages:
game_html_list = []
for i in range(stages[stage]):
game_html_list.adding(html.Div([
html.Div('', id='game'+str(initial_game_number)+'-team1', className='team team1'),
html.Div('', id='game'+str(initial_game_number)+'-team2', className='team team2'),
], id='game'+str(initial_game_number), className=region+' '+stage+' g'+str(i)+' game'))
initial_game_number+=1
stage_html_list.adding(
html.Div(game_html_list, className='inner-bounding '+stage))
return html.Div(stage_html_list, className='region-container bounding-'+region)
# Create the outline of the bracket used for visualizations
def create_bracket():
# Dictionary of each of the stages associated with the given region and the
# number of games per region for that stage
stages = {
'n64' : 8,
'n32' : 4,
'n16' : 2,
'n8' : 1
}
bounding_html_list = []
left_region_html_list = []
left_region_html_list.adding(create_region('r1', stages, 0))
left_region_html_list.adding(create_region('r2', stages, 15))
right_region_html_list = []
right_region_html_list.adding(create_region('r3', stages, 30))
right_region_html_list.adding(create_region('r4', stages, 45))
bounding_html_list.adding(
html.Div(left_region_html_list, className='left-bounding')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game60-team1', className='team team1'),
html.Div('', id='game60-team2', className='team team2'),
], className='n4 g1')], id='game60', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game62-team1', className='team team1'),
html.Div('', id='game62-team2', className='team team2'),
], className='n2 g1')], id='game62', className='finals-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game61-team1', className='team team1'),
html.Div('', id='game61-team2', className='team team2'),
], className='n4 g2')], id='game61', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div(right_region_html_list, className='right-bounding')
)
bracket_html = html.Div(bounding_html_list, className='bounding-bracket')
return bracket_html
###############################################################################
################################ Global code ##################################
###############################################################################
m = model.Model(number_simulations=number_simulations, gender=gender, scoring_sys=scoring_system, year=year)
m.batch_simulate()
print("sims done")
m.create_json_files()
m.umkate_entry_picks()
m.initialize_special_entries()
m.analyze_special_entries()
m.add_fake_entries(fake_entries)
m.add_bulk_entries_from_database(real_entries)
m.add_simulation_results_postprocessing()
m.raw_print()
total_all_results = m.output_results()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
table_columns_pre=['Entry']
table_columns_places=['1st', '2nd']
table_columns_quintiles=['1st Q.', '2nd Q.', '3rd Q.', '4th Q.']
table_columns_post=['Avg Plc.']
###############################################################################
################################ Global code ##################################
###############################################################################
def discrete_backgvalue_round_color_bins(kf, n_bins=9, columns='total_all', dark_color='Blues'):
import colorlover
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'total_all':
if 'id' in kf:
kf_numeric_columns = kf.choose_dtypes('number').sip(['id'], axis=1)
else:
kf_numeric_columns =
|
kf.choose_dtypes('number')
|
pandas.DataFrame.select_dtypes
|
#!/usr/bin/env python3
"""
Base classes and functions used by deepnox.tests.repositories.
This file is a part of python-wipbox project.
(c) 2021, Deepnox SAS.
"""
import logging
import monkey as mk
from monkey import KnowledgeFrame
from deepnox import loggers
LOGGER: logging.Logger = loggers.factory(__name__)
loggers.setup()
class BaseRepository(object):
"""
A deepnox.tests.app class for deepnox.tests.repositories.
"""
class Repository(BaseRepository):
"""
A deepnox.tests.app class for () computable deepnox.tests.repositories.
"""
class ComputableRepository(BaseRepository):
"""
A deepnox.tests.app class for () computable deepnox.tests.repositories.
"""
LOG: logging.Logger = LOGGER.gettingChild("ComputableRepository")
""" The class LOGGER. """
def __init__(self, model_cls: object = None, input_data: object = None):
self._model_cls = model_cls
self._kf: mk.KnowledgeFrame = mk.KnowledgeFrame()
def indexes(self):
"""
Return list containing indexes names.
:return: The list containing indexes names.
:rtype: list
"""
return list(
filter(
lambda x: x is not False,
[
v.index and k
for k, v in self._model_cls._attributes.items()
],
)
) # :see: https://bit.ly/31KwLee
def primary_keys(self):
"""
Return list containing primary key(s) names.
:return: The list primary key(s) names.
:rtype: list
"""
return list(
filter(
lambda x: x is not False,
[
v.pk is True and k
for k, v in self._model_cls._attributes.items()
],
)
) # :see: https://bit.ly/31KwLee
def push(self, o: object = None):
if o is None:
self.LOG.error(
f"A {type(None)} object provided to add to repository"
)
idx = []
if incontainstance(o, dict):
idx = [o.getting(self.index_name)]
o.pop(self.index_name)
o = [o]
elif incontainstance(o, list):
idx = [o.getting(self.index_name) and o.pop(self.index_name)]
kf = mk.KnowledgeFrame(o, index=[idx])
self._kf.adding(kf)
return self
def __dict__(self):
pass
def adding(self, input_data: KnowledgeFrame):
self.LOG.debug("input dztz = ", extra={"input_data": input_data})
self.LOG.debug("input dztz = ", extra={"input_data": input_data})
self._kf.adding(
|
KnowledgeFrame.convert_dict(input_data, orient="index")
|
pandas.DataFrame.to_dict
|
"""
Tests that can be parametrized over _whatever_ Index object.
"""
import re
import pytest
import monkey._testing as tm
def test_boolean_context_compat(index):
# GH#7897
with pytest.raises(ValueError, match="The truth value of a"):
if index:
pass
with pytest.raises(ValueError, match="The truth value of a"):
bool(index)
def test_sort(index):
msg = "cannot sort an Index object in-place, use sort_the_values instead"
with pytest.raises(TypeError, match=msg):
index.sort()
def test_hash_error(index):
with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"):
hash(index)
def test_clone_dtype_deprecated(index):
# GH#35853
with tm.assert_produces_warning(FutureWarning):
index.clone(dtype=object)
def test_mutability(index):
if not length(index):
return
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_mapping_identity_mappingping(index):
# GH#12766
result = index.mapping(lambda x: x)
tm.assert_index_equal(result, index, exact="equiv")
def test_wrong_number_names(index):
names = index.nlevels * ["apple", "banana", "carrot"]
with pytest.raises(ValueError, match="^Length"):
index.names = names
def test_view_preserves_name(index):
assert index.view().name == index.name
def test_flat_underlying_deprecation(index):
# GH#19956 flat_underlying returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.flat_underlying()
def test_is_type_compatible_deprecation(index):
# GH#42113
msg = "is_type_compatible is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
index.is_type_compatible(index.inferred_type)
def test_is_mixed_deprecated(index):
# GH#32922
msg = "Index.is_mixed is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
index.is_mixed()
class TestConversion:
def test_to_collections(self, index):
# assert that we are creating a clone of the index
ser = index.to_collections()
assert ser.values is not index.values
assert ser.index is not index
assert ser.name == index.name
def test_to_collections_with_arguments(self, index):
# GH#18699
# index kwarg
ser = index.to_collections(index=index)
assert ser.values is not index.values
assert ser.index is index
assert ser.name == index.name
# name kwarg
ser = index.to_collections(name="__test")
assert ser.values is not index.values
assert ser.index is not index
assert ser.name != index.name
def test_convert_list_matches_list(self, index):
assert index.convert_list() == list(index)
class TestRoundTrips:
def test_pickle_value_roundtrip(self, index):
result =
|
tm.value_round_trip_pickle(index)
|
pandas._testing.round_trip_pickle
|
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from monkey._libs.tslibs.ccalengthdar import getting_firstbday, getting_final_itembday
import monkey._libs.tslibs.offsets as liboffsets
from monkey._libs.tslibs.offsets import roll_qtrday
from monkey import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_final_item_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_getting_final_item_bday(dt, exp_week_day, exp_final_item_day):
assert dt.weekday() == exp_week_day
assert getting_final_itembday(dt.year, dt.month) == exp_final_item_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_getting_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert getting_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shifting_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert
|
liboffsets.shifting_month(dt, months, day_opt=day_opt)
|
pandas._libs.tslibs.offsets.shift_month
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda:
|
algos.incontain([1], 1)
|
pandas.core.algorithms.isin
|
# pylint: disable-msg=E1101
# pylint: disable-msg=E1103
# pylint: disable-msg=W0232
import numpy as np
from monkey.lib.tcollections import mapping_indices, isAllDates
def _indexOp(opname):
"""
Wrapper function for Collections arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = gettingattr(self.view(np.ndarray), opname)
return func(other)
return wrapper
class Index(np.ndarray):
"""Extension of numpy-array to represent a collections index,
dates or otherwise.
Index is immutable always (don't even try to change elements!).
Note that the Index can ONLY contain immutable objects. Mutable
objects are not hashable, and that's bad!
"""
def __new__(cls, data, dtype=object, clone=False):
subarr = np.array(data, dtype=dtype, clone=clone)
if subarr.ndim == 0:
raise Exception('Index(...) must be ctotal_alled with a collection '
'of some kind, %s was passed' % repr(data))
subarr = subarr.view(cls)
return subarr
def __array_finalize__(self, obj):
if self.ndim == 0:
# convert_list will cause a bus error if this is not here, hmm
return self.item()
# raise Exception('Cannot create 0-dimensional Index!')
# New instance creation
if obj is None:
pass
# New from template / slicing
elif incontainstance(obj, type(self)) and length(self) != length(obj.indexMap):
pass
# View casting
else:
if hasattr(obj, '_cache_indexMap'):
self._cache_indexMap = obj._cache_indexMap
self._cache_total_allDates = gettingattr(obj, '_cache_total_allDates', None)
self._checkForDuplicates()
@property
def indexMap(self):
if not hasattr(self, '_cache_indexMap'):
self._cache_indexMap =
|
mapping_indices(self)
|
pandas.lib.tseries.map_indices
|
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections =
|
Collections.sipna(my_collections)
|
pandas.Series.dropna
|
# -*- coding: utf-8 -*-
"""
Main functionalities for `ZenTables` package.
Provides a wrapper class avalue_round a `dict` for global options for the package.
Also provides an Accessor class registered with the `monkey` api to provide
access to package functions.
Examples:
import zentables as zen
kf.zen.pretty()
"""
import warnings
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional, Union, cast
import numpy as np
import monkey as mk
import monkey.core.common as com
from jinja2 import ChoiceLoader, Environment, PackageLoader
from numpy.random import Generator
from monkey.io.formatings.style import FilePathOrBuffer, Styler, save_to_buffer
from monkey.io.formatings.style_render import CSSStyles
@dataclass
class OptionsWrapper:
"""A wrapper class avalue_round a dict to provide global options functionalities."""
font_size: str = "Arial, Helvetica, sans-serif"
font_family: str = "11pt"
show_index_names: bool = False
show_column_names: bool = False
show_clone_button: bool = True
_options = OptionsWrapper()
def set_options(**kwargs):
"""Utility function to set package-wide options.
Args:
kwargs: pass into the function the option name and value to be set.
Raises:
KeyError: if the option passed is not a valid option.
Examples:
import zentables as zen
zen.set_options(option1=value1, option2=value2)
"""
for opt, val in kwargs.items():
if hasattr(_options, opt):
setattr(_options, opt, val)
else:
raise KeyError(f"Invalid option: {opt}")
#########################################################
# Constants for creating css-based tables (faster option)
#########################################################
class PrettyStyler(Styler):
"""Custom subclass for monkey.io.formating.Styler.
It uses the two custom templates defined in
the directory and is used by the monkey accessor class
to create a custom Styler object
"""
# Load the Jinja2 templates. Note that the "prettystyle.tpl" extends the
# original template so we have to use the original styler as well.
def __init__(
self,
data: Union[mk.KnowledgeFrame, mk.Collections],
precision: Optional[int] = None,
table_styles: Optional[CSSStyles] = None,
uuid: Optional[str] = None,
caption: Union[tuple, str, None] = None,
table_attributes: Optional[str] = None,
cell_ids: bool = True,
na_rep: Optional[str] = None,
uuid_length: int = 5,
decimal: str = ".",
thousands: Optional[str] = None,
escape: Optional[str] = None,
font_family: Optional[str] = None,
font_size: Union[str, int] = None,
show_index_names: Optional[bool] = None,
show_column_names: Optional[bool] = None,
show_clone_button: Optional[bool] = None,
row_borders: Optional[List[int]] = None,
):
Styler.__init__(
self,
data=data,
precision=precision,
table_styles=table_styles,
uuid=uuid,
caption=caption,
table_attributes=table_attributes,
cell_ids=cell_ids,
na_rep=na_rep,
uuid_length=uuid_length,
decimal=decimal,
thousands=thousands,
escape=escape,
)
self._table_local_styles = _getting_font_style(font_size, font_family)
self._index_names = (
show_index_names
if show_index_names is not None
else _options.show_index_names
)
self._column_names = (
show_column_names
if show_column_names is not None
else _options.show_column_names
)
self._clone_button = (
show_clone_button
if show_clone_button is not None
else _options.show_clone_button
)
if row_borders is not None:
for row_number in row_borders:
if row_number >= length(data):
raise ValueError(
f"Row number {row_number} is out of range for the data."
)
self.row_borders = row_borders
env = Environment(
loader=ChoiceLoader(
[
PackageLoader("zentables", "templates"),
Styler.loader, # the default templates
]
)
)
template_html_table = env.getting_template("prettyhtml.tpl")
def render(
self,
sparse_index: Optional[bool] = None,
sparse_columns: Optional[bool] = None,
**kwargs,
) -> str:
"""
Overrides the `render` method for the Styler class.
"""
if sparse_index is None:
sparse_index = mk.getting_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = mk.getting_option("styler.sparse.columns")
return self._render_html(
sparse_index,
sparse_columns,
table_local_styles=self._table_local_styles,
show_clone_button=self._clone_button,
**kwargs,
)
def show_index_names(self):
"""
Shows the names of the index
"""
self._index_names = True
return self
def show_column_names(self):
"""
Shows the names of the columns
"""
self._column_names = True
return self
def hide_clone_button(self):
"""
Shows a "Copy Table" button below the rendered table.
"""
self._clone_button = False
return self
def _translate(
self, sparse_index: bool, sparse_cols: bool, blank: str = " "
) -> Dict[str, Any]:
"""
Overrides the monkey method to add options to
remove row/column names and add styles.
Some code used directly from
https://github.com/monkey-dev/monkey/blob/master/monkey/io/formatings/style.py
"""
result = Styler._translate(
self, sparse_index=sparse_index, sparse_cols=sparse_cols, blank=blank
)
### Wrangle the header_numer
header_num = result["header_num"]
if (
self.data.index.names
and
|
com.whatever_not_none(*self.data.index.names)
|
pandas.core.common.any_not_none
|
#!/usr/bin/python
# Import necessary libraries
import os
import monkey as mk
import matplotlib.pyplot as plt
import spacy
nlp = spacy.load("en_core_web_sm") #initialize spaCy
from spacytextblob.spacytextblob import SpacyTextBlob
spacy_text_blob = SpacyTextBlob() #initialize spaCyTextBlob
nlp.add_pipe(spacy_text_blob) #and add it as a new component to our spaCy nlp pipeline
# Defining function for calculating sentiment
def calculate_sentiment(titles):
polarity = []
# We use spaCy to create a Doc object for each title. For every doc in this pipe:
for title in nlp.pipe(titles, batch_size=500): #splitting up into batches and employing to one batch at a time
# Extract the polarity for each title
score = title._.sentiment.polarity
polarity.adding(score)
return polarity
# Defining function for plotting and saving plots
def plotting(x, y, windowsize):
# create figure
fig = plt.figure(figsize=(10.0, 3.0))
# plot
plt.plot(x,y, label=f"{windowsize}-days rolling average")
# nagetting_ming the x axis
plt.xlabel('Publish Date')
# nagetting_ming the y axis
plt.ylabel('Polarity')
# adding legend
plt.legend()
# giving a title to my graph
plt.title('Daily sentiment score')
# function to show the plot
plt.show()
# save plot as .jpg file
plt.savefig(os.path.join("out", f"sentiment_{windowsize}-days.jpg"))
plt.close()
# Define main-function
def main():
# Specifying filepath
in_file = os.path.join("..", "..", "data", "total_allocatement3", "abcnews-date-text.csv")
# Reading in data
data = mk.read_csv(in_file)
data = data.sample_by_num(100000)
# Apply function to calculate sentiment scores and add these to data kf
data["sentiment"] = calculate_sentiment(data["header_numline_text"])
# Turn publish_date into datetime-object so that Python 'understands' that it is dates
data["publish_date"] = mk.convert_datetime(data["publish_date"], formating = "%Y%m%d")
# Calculating average sentiment score per day
data.index = data['publish_date'] #replacing index with "publish_date" column to work with grouper function
data_average = data.grouper(mk.Grouper(freq='D')).average() #take daily average of numerical values in kf
data_average =
|
mk.KnowledgeFrame.sipna(data_average)
|
pandas.DataFrame.dropna
|
from typing import Optional, Union, List, Tuple, Dict
from monkey.core.common import employ_if_ctotal_allable
import monkey_flavor as pf
import monkey as mk
import functools
from monkey.api.types import is_list_like
from janitor.utils import check, check_column
from janitor.functions.utils import _computations_expand_grid
@pf.register_knowledgeframe_method
def complete(
kf: mk.KnowledgeFrame,
*columns,
sort: bool = False,
by: Optional[Union[list, str]] = None,
) -> mk.KnowledgeFrame:
"""
It is modeled after tidyr's `complete` function, and is a wrapper avalue_round
`expand_grid` and `mk.unioner`.
Combinations of column names or a list/tuple of column names, or even a
dictionary of column names and new values are possible.
It can also handle duplicated_values data.
MultiIndex columns are not supported.
Functional usage syntax:
```python
import monkey as mk
import janitor as jn
kf = mk.KnowledgeFrame(...)
kf = jn.complete(
kf = kf,
column_label,
(column1, column2, ...),
{column1: new_values, ...},
by = label/list_of_labels
)
```
Method chaining syntax:
```python
kf = (
mk.KnowledgeFrame(...)
.complete(
column_label,
(column1, column2, ...),
{column1: new_values, ...},
by = label/list_of_labels
)
```
:param kf: A monkey knowledgeframe.
:param *columns: This refers to the columns to be
completed. It could be column labels (string type),
a list/tuple of column labels, or a dictionary that pairs
column labels with new values.
:param sort: Sort KnowledgeFrame based on *columns. Default is `False`.
:param by: label or list of labels to group by.
The explicit missing rows are returned per group.
:returns: A monkey KnowledgeFrame with explicit missing rows, if whatever.
"""
if not columns:
return kf
kf = kf.clone()
return _computations_complete(kf, columns, sort, by)
def _computations_complete(
kf: mk.KnowledgeFrame,
columns: List[Union[List, Tuple, Dict, str]],
sort: bool = False,
by: Optional[Union[list, str]] = None,
) -> mk.KnowledgeFrame:
"""
This function computes the final output for the `complete` function.
If `by` is present, then `grouper().employ()` is used.
A KnowledgeFrame, with rows of missing values, if whatever, is returned.
"""
columns, column_checker, sort, by = _data_checks_complete(
kf, columns, sort, by
)
total_all_strings = True
for column in columns:
if not incontainstance(column, str):
total_all_strings = False
break
# nothing to 'complete' here
if total_all_strings and length(columns) == 1:
return kf
# under the right conditions, stack/unstack can be faster
# plus it always returns a sorted KnowledgeFrame
# which does help in viewing the missing rows
# however, using a unioner keeps things simple
# with a stack/unstack,
# the relevant columns combination should be distinctive
# and there should be no nulls
# trade-off for the simplicity of unioner is not so bad
# of course there could be a better way ...
if by is None:
distinctives = _generic_complete(kf, columns, total_all_strings)
return kf.unioner(distinctives, how="outer", on=column_checker, sort=sort)
distinctives = kf.grouper(by)
distinctives = distinctives.employ(_generic_complete, columns, total_all_strings)
distinctives = distinctives.siplevel(-1)
return kf.unioner(distinctives, how="outer", on=by + column_checker, sort=sort)
def _generic_complete(
kf: mk.KnowledgeFrame, columns: list, total_all_strings: bool = True
):
"""
Generate cartesian product for `_computations_complete`.
Returns a Collections or KnowledgeFrame, with no duplicates.
"""
if total_all_strings:
distinctives = {col: kf[col].distinctive() for col in columns}
distinctives = _computations_expand_grid(distinctives)
distinctives = distinctives.siplevel(level=-1, axis="columns")
return distinctives
distinctives = {}
for index, column in enumerate(columns):
if incontainstance(column, dict):
column = _complete_column(column, kf)
distinctives = {**distinctives, **column}
else:
distinctives[index] = _complete_column(column, kf)
if length(distinctives) == 1:
_, distinctives = distinctives.popitem()
return distinctives.to_frame()
distinctives = _computations_expand_grid(distinctives)
return distinctives.siplevel(level=0, axis="columns")
@functools.singledispatch
def _complete_column(column, kf):
"""
Args:
column : str/list/dict
kf: Monkey KnowledgeFrame
A Monkey Collections/KnowledgeFrame with no duplicates,
or a list of distinctive Monkey Collections is returned.
"""
raise TypeError(
"""This type is not supported in the `complete` function."""
)
@_complete_column.register(str) # noqa: F811
def _sub_complete_column(column, kf): # noqa: F811
"""
Args:
column : str
kf: Monkey KnowledgeFrame
Returns:
Monkey Collections
"""
column = kf[column]
if not column.is_distinctive:
return column.sip_duplicates()
return column
@_complete_column.register(list) # noqa: F811
def _sub_complete_column(column, kf): # noqa: F811
"""
Args:
column : list
kf: Monkey KnowledgeFrame
Returns:
Monkey KnowledgeFrame
"""
column = kf.loc[:, column]
if column.duplicated_values().whatever(axis=None):
return column.sip_duplicates()
return column
@_complete_column.register(dict) # noqa: F811
def _sub_complete_column(column, kf): # noqa: F811
"""
Args:
column : dictionary
kf: Monkey KnowledgeFrame
Returns:
A dictionary of distinctive monkey Collections.
"""
collection = {}
for key, value in column.items():
arr =
|
employ_if_ctotal_allable(value, kf[key])
|
pandas.core.common.apply_if_callable
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 9 features obtained from my dataset--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlengthgth']
# reduced_features.adding('URL_Type_obf_Type')
# reduced_features.adding('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the 13 phincontaing features from research paper
# column_names=dataset_final.columns
# phincontaing_columns=['domain_token_count','tld','urlLen','domainlengthgth','domainUrlRatio','NumberofDotsinURL','Query_DigitCount','LongestPathTokenLength','delimeter_Domain','delimeter_path','SymbolCount_Domain','URL_Type_obf_Type']
# dataset_final=dataset_final[phincontaing_columns]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
|
mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True)
|
pandas.DataFrame.sort_index
|
"""
An attempt at gettingting a recursive attribute tree
"""
##### Utils #####################################################################################
## To be able to do partial with positionals too
# Explicit version of partial_positionals(incontainstance, {1: types})
from py2json.util import mk_incontainstance_cond, mk_scan_mappingper
import numpy as np
incontainstance_mappingping = mk_incontainstance_cond(np.ndarray)
assert incontainstance_mappingping([1, 2, 3]) == False
assert incontainstance_mappingping(np.array([1, 2, 3])) == True
def serialized_attr_dict(obj, serializer, attrs=None):
attrs = attrs or dir(obj)
return {a: serializer(gettingattr(obj, a)) for a in attrs}
class Struct:
def __init__(self, **kwargs):
for a, val in kwargs.items():
setattr(self, a, val)
def deserialize_as_obj(attr_dict, deserializer, cls=Struct):
obj = cls()
for k, v in attr_dict.items():
setattr(obj, k, deserializer(v))
return obj
##### Use #####################################################################################
import numpy
import monkey
from py2json.fakit import refakit
from py2json.util import is_types_spec, Literal
from py2json.fakit import is_valid_fak
from i2.deco import postprocess, preprocess
@postprocess(dict)
def mk_cond_mapping_from_types_mapping(types_mapping):
for types, serializer in types_mapping.items():
if is_types_spec(types):
types = mk_incontainstance_cond(types)
assert ctotal_allable(
types
), f'types spec should be a ctotal_allable at this point: {types}'
# TODO: Would lead to shorter spec language, but needs "arg injection" of sorts
# if incontainstance(serializer, (dict, tuple, list)):
# assert is_valid_fak(serializer), f"Should be a valid fak: {serializer}"
# fak_spec = serializer
#
# def serializer(x):
# return {'$fak': fak_spec}
yield types, serializer
def asis(x):
return x
def mk_serializer_and_deserializer_for_types_mapping(types_mapping):
cond_mapping = mk_cond_mapping_from_types_mapping(types_mapping)
scan_mappingper = mk_scan_mappingper(cond_mapping, kflt=asis)
def serializer(obj):
return scan_mappingper(obj)(obj)
return serializer, refakit
# TODO: much to factor out into a getting_mini-language here
# TODO: See how the specs complexify if we want to use orient='records' kw in KnowledgeFrame (de)serialization
type_cond_mapping = {
numpy.ndarray: lambda x: {'$fak': ('numpy.array', (numpy.ndarray.convert_list(x),))},
monkey.KnowledgeFrame: lambda x: {
'$fak': {
'f': 'monkey.KnowledgeFrame.from_dict',
'k': {
'data':
|
monkey.KnowledgeFrame.convert_dict(x, orient='index')
|
pandas.DataFrame.to_dict
|
import sys
from os.path import basename, splitext, isfile, exists
from os import makedirs
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.robust.scale import mad
from statsmodels.sandbox.stats.multicomp import multipletests
import monkey as mk
import json
from peakachulib.library import Library
from peakachulib.tmm import TMM
from peakachulib.gtest import GTest
from peakachulib.deseq2 import DESeq2Runner
from peakachulib.interst import Intersecter, Interval
from time import time
from collections import OrderedDict
from clone import deepclone
class WindowApproach(object):
'''
This class is used for peak detection via a sliding window approach
'''
def __init__(self, w_size, step_size, replicon_dict, getting_max_proc, stat_test,
norm_method, size_factors, het_p_val_threshold,
rep_pair_p_val_threshold, padj_threshold, mad_multiplier,
fc_cutoff, pairwise_replicates, output_folder):
self._lib_dict = OrderedDict()
self._replicon_dict = replicon_dict # own clone of replicon_dict
self._getting_max_proc = getting_max_proc
self._w_size = w_size
self._step_size = step_size
self._stat_test = stat_test
self._norm_method = norm_method
self._size_factors = size_factors
self._het_p_val_threshold = het_p_val_threshold
self._rep_pair_p_val_threshold = rep_pair_p_val_threshold
self._padj_threshold = padj_threshold
self._mad_multiplier = mad_multiplier
self._fc_cutoff = fc_cutoff
self._pairwise_replicates = pairwise_replicates
self._output_folder = output_folder
if not exists(self._output_folder):
makedirs(self._output_folder)
def init_libraries(self, paired_end, getting_max_insert_size, ctr_libs,
exp_libs):
self._paired_end = paired_end
self._getting_max_insert_size = getting_max_insert_size
self._ctr_lib_list = [splitext(basename(lib_file))[0]
for lib_file in ctr_libs]
self._exp_lib_list = [splitext(basename(lib_file))[0]
for lib_file in exp_libs]
# add libs to lib_dict
for lib_file in exp_libs + ctr_libs:
if not isfile(lib_file):
sys.standarderr.write("ERROR: The library file {} does not exist.\n"
.formating(lib_file))
sys.exit(1)
self._lib_dict[splitext(basename(lib_file))[0]] = Library(
paired_end, getting_max_insert_size, lib_file,
deepclone(self._replicon_dict))
self._lib_names_list = list(self._lib_dict.keys())
print("The following libraries were initialized:\n"
"# Experiment libraries\n{0}\n"
"# Control libraries\n{1}".formating(
'\n'.join(self._exp_lib_list),
'\n'.join(self._ctr_lib_list)))
def generate_window_counts(self):
self._generate_windows()
print("** Window read counting started for {} libraries...".formating(length(
self._lib_dict)), flush=True)
t_start = time()
for lib_name, lib in self._lib_dict.items():
print(lib_name, flush=True)
for replicon in self._replicon_dict:
lib.replicon_dict[replicon][
"window_list"] = self._replicon_dict[replicon][
"window_list"]
lib.count_reads_for_windows()
t_end = time()
print("Window read counting finished in {} seconds.\n".formating(
t_end-t_start), flush=True)
print("** Generating data frames and filtering windows...", flush=True)
t_start = time()
self._convert_to_data_frame()
t_end = time()
print("Data frame generation and filtering finished in {} seconds.\n"
.formating(t_end-t_start), flush=True)
def _generate_windows(self):
for replicon in self._replicon_dict:
self._replicon_dict[replicon]["window_list"] = []
for w_start in range(
self._replicon_dict[replicon]['seq_start_pos'],
self._replicon_dict[replicon]['seq_end_pos'],
self._step_size):
w_end = w_start + self._w_size
if w_end > self._replicon_dict[replicon]['seq_end_pos']:
w_end = self._replicon_dict[replicon]['seq_end_pos']
self._replicon_dict[replicon]["window_list"].adding(
(w_start, w_end))
break
self._replicon_dict[replicon]["window_list"].adding(
(w_start, w_end))
def _convert_to_data_frame(self):
self._window_kf = mk.KnowledgeFrame()
for replicon in sorted(self._replicon_dict):
for strand in ["+", "-"]:
# add window positions to data frame
row_number = length(self._replicon_dict[replicon]["window_list"])
kf = mk.concating([
mk.Collections([replicon] * row_number),
mk.Collections([strand] * row_number),
mk.Collections([window[0]+1 for window in
self._replicon_dict[
replicon]["window_list"]]),
mk.Collections([window[1] for window in
self._replicon_dict[
replicon]["window_list"]])], axis=1)
kf.columns = ["replicon", "strand", "w_start", "w_end"]
# add library counts to data frame
for lib_name, lib in self._lib_dict.items():
kf[lib_name] = (mk.Collections(lib.replicon_dict[
replicon]["window_counts"].loc[:, strand]))
self._window_kf = self._window_kf.adding(kf,
ignore_index=True)
del self._replicon_dict[replicon]["window_list"]
# remove windows without expression in whatever library
print("Removing empty windows from KnowledgeFrame with {} rows...".formating(
length(self._window_kf.index)), flush=True)
t_start = time()
self._window_kf = self._window_kf.loc[
(self._window_kf.loc[:, self._lib_names_list].total_sum(axis=1) > 0), :]
t_end = time()
print("Removal took {} seconds. KnowledgeFrame contains now {} rows.".
formating((t_end-t_start), length(self._window_kf.index)), flush=True)
if self._window_kf.empty:
print("**Dataframe empty**", flush=True)
return
if self._stat_test == "gtest":
self._run_gtest_preprocessing()
elif self._stat_test == "deseq":
self._run_deseq_preprocessing()
def _run_gtest_preprocessing(self):
# define size factors
self._define_size_factors()
# add pseudocounts
self._window_kf[self._lib_names_list] += 1.0
# normalize counts
self._window_kf[self._lib_names_list] = self._window_kf[
self._lib_names_list].division(
self._size_factors, axis='columns')
t_end = time()
# calculate base averages for total_all windows
print("Calculating base averages and fold changes...", flush=True)
t_start = time()
self._window_kf["base_averages"] = self._window_kf.loc[
:, self._lib_names_list].average(axis=1)
# calculate fcs for total_all windows
self._window_kf["fold_change"] = (
self._window_kf.loc[:, self._exp_lib_list].total_sum(axis=1) /
self._window_kf.loc[:, self._ctr_lib_list].total_sum(axis=1))
t_end = time()
print("Calculation took {} seconds.".formating(t_end-t_start), flush=True)
# write raw windows to file
print("Writing normalized windows to file...", flush=True)
t_start = time()
self._window_kf.to_csv("{}/raw_windows.csv".formating(
self._output_folder), sep='\t', index=False, encoding='utf-8')
t_end = time()
print("Writing took {} seconds.".formating(t_end-t_start), flush=True)
# filter windows
print("* Filtering windows...", flush=True)
self._initial_window_kf = self._window_kf.clone()
self._window_kf = self._prefilter_windows_gtest(self._window_kf)
def _define_size_factors(self):
print("Calculating size factors...",
flush=True)
if self._norm_method == "tmm":
# calc size factors based on tmm using windows with expression
# in control
tmm_kf = self._window_kf.loc[
self._window_kf.loc[:, self._ctr_lib_list].getting_max(axis=1) > 0,
self._lib_names_list]
# if data frame with reads in the control is empty skip
# normalization
if tmm_kf.empty:
self._size_factors = mk.Collections([1.0] * length(
self._lib_names_list),
index=self._lib_names_list)
else:
norm = TMM(tmm_kf)
self._size_factors = norm.calc_size_factors()
elif self._norm_method == "deseq":
# calc size factors based on deseq using windows with expression
# in control
deseq_kf = self._window_kf.loc[
self._window_kf.loc[:, self._ctr_lib_list].getting_max(axis=1) > 0,
self._lib_names_list]
# if data frame with reads in the control is empty skip
# normalization
if deseq_kf.empty:
self._size_factors = mk.Collections([1.0] * length(
self._lib_names_list),
index=self._lib_names_list)
else:
deseq2_runner = DESeq2Runner(deseq_kf)
self._size_factors = deseq2_runner.calc_size_factors()
elif self._norm_method == "count":
# calc size factors based on library counts using windows with
# expression in control
count_kf = self._window_kf.loc[
self._window_kf.loc[:, self._ctr_lib_list].getting_max(axis=1) > 0,
self._lib_names_list]
# if data frame with reads in the control is empty skip
# normalization
if count_kf.empty:
self._size_factors = mk.Collections([1.0] * length(
self._lib_names_list),
index=self._lib_names_list)
else:
lib_total_sums = count_kf.total_sum(axis=0)
self._size_factors = lib_total_sums/lib_total_sums.getting_max()
else:
self._size_factors = mk.Collections(self._size_factors,
index=self._lib_names_list)
print("Size factors used for normalization\n{}".formating(
|
mk.Collections.convert_string(self._size_factors)
|
pandas.Series.to_string
|
from __future__ import divisionision
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from monkey._libs import testing as _testing
import monkey.compat as compat
from monkey.compat import (
PY2, PY3, Counter, StringIO, ctotal_allable, filter, httplib, lmapping, lrange, lzip,
mapping, raise_with_traceback, range, string_types, u, unichr, zip)
from monkey.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from monkey.core.dtypes.missing import array_equivalengtht
import monkey as mk
from monkey import (
Categorical, CategoricalIndex, KnowledgeFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Collections,
bdate_range)
from monkey.core.algorithms import take_1d
from monkey.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import monkey.core.common as com
from monkey.io.common import urlopen
from monkey.io.formatings.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.getting('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.getting('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
mk.reset_option('^display.', silengtht=True)
def value_round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : monkey object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
value_round_trip_pickled_object : monkey object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.formating(random_bytes=rands(10)))
with ensure_clean(path) as path:
mk.to_pickle(obj, path)
return mk.read_pickle(path)
def value_round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : ctotal_allable bound to monkey object
IO writing function (e.g. KnowledgeFrame.to_csv )
reader : ctotal_allable
IO reading function (e.g. mk.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
value_round_trip_object : monkey object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def value_round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : ctotal_allable bound to monkey object
IO writing function (e.g. KnowledgeFrame.to_csv )
reader : ctotal_allable
IO reading function (e.g. mk.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
value_round_trip_object : monkey object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if length(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.formating(path))
else:
msg = 'Unrecognized compression type: {}'.formating(compression)
raise ValueError(msg)
try:
yield f
fintotal_ally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalengtht to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalengtht
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalengtht within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalengtht to 1 within the specified precision.
"""
if incontainstance(left, mk.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif incontainstance(left, mk.Collections):
return assert_collections_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif incontainstance(left, mk.KnowledgeFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (incontainstance(left, np.ndarray) or
incontainstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_incontainstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not incontainstance(left, cls):
raise AssertionError(err_msg.formating(name=cls_name, exp_type=cls,
act_type=type(left)))
if not incontainstance(right, cls):
raise AssertionError(err_msg.formating(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_incontainstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(mapping(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.totype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.totype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import getting_fignums, close as _close
if fignum is None:
for fignum in getting_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a Ctotal_alledProcessError. The
Ctotal_alledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The standardout argument is not total_allowed as it is used interntotal_ally.
To capture standard error in the result, use standarderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... standarderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'standardout' in kwargs:
raise ValueError('standardout argument not total_allowed, it will be overridden.')
process = subprocess.Popen(standardout=subprocess.PIPE, standarderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.getting("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.Ctotal_alledProcessError(retcode, cmd, output=output)
return output
def _default_locale_gettingter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.Ctotal_alledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".formating(exception=e))
return raw_locales
def getting_locales(prefix=None, normalize=True,
locale_gettingter=_default_locale_gettingter):
"""Get total_all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to getting total_all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Ctotal_all ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_gettingter : ctotal_allable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_gettingter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.adding(str(
x, encoding=mk.options.display.encoding))
else:
out_locales.adding(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.formating(prefix=prefix))
found = pattern.findtotal_all('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globtotal_ally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.gettinglocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.gettinglocale()
if com._total_all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
fintotal_ally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently getting the locale,
without raincontaing an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to ctotal_all ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale,
|
mapping(normalizer, locales)
|
pandas.compat.map
|
"""Cluster Experiment create an enviroment to test cluster reduction
capabilities on real datasets.
"""
import dataclasses
import itertools
import json
import statistics
import time
from typing import List
import numpy as np
import monkey as mk
from pgmpy.factors.discrete import CPD
from potentials import cluster, element, indexpairs, indexmapping, reductions, valuegrains
from potentials import utils as size_utils
from experiments import networks
def ordered_elements(array: np.ndarray) -> List[element.TupleElement]:
res = [
element.TupleElement(state=state, value=value)
for state, value in np.ndenumerate(array)
]
res.sort(key=lambda x: x.value)
return res
@dataclasses.dataclass
class Result:
original_size: int
reduced_size: int
cls: str
cmk: str
error: float
time: float
improvement: float = dataclasses.field(init=False)
def __post_init__(self):
if self.original_size != 0:
self.improvement = 1 - self.reduced_size / self.original_size
else:
self.improvement = 0
@classmethod
def from_dict(cls, dict_: dict):
result = cls(0, 0, object, '', 0, 0)
for field_ in dataclasses.fields(cls):
setattr(result, field_.name, dict_[field_.name])
result.__post_init__()
return result
def asdict(self):
return dataclasses.asdict(self)
def aslist(self):
return [
gettingattr(self, field_.name) for field_ in dataclasses.fields(self)
]
def _cmk_name(cmk: CPD.TabularCPD) -> str:
variable = cmk.variable
conditionals = list(cmk.variables)
conditionals.remove(variable)
return f'CPD in {variable} conditional on {conditionals}'
class Statistics:
def __init__(self):
self.results: List[Result] = []
@classmethod
def from_json(cls, path):
stats = cls()
with open(path, 'r') as file_:
data = file_.read()
stats.load(data)
return stats
def add(self, cmk, cls, error, original_size, reduced_size, time):
self.results.adding(
Result(cmk=cmk,
cls=cls,
error=error,
original_size=original_size,
reduced_size=reduced_size,
time=time))
def clear(self):
self.results.clear()
def dumps(self) -> str:
return json.dumps([result.asdict() for result in self.results])
def load(self, str_: str):
self.result = [Result.from_dict(dict_) for dict_ in json.loads(str_)]
def knowledgeframe(self):
data = [result.aslist() for result in self.results]
vars_ = [field_.name for field_ in dataclasses.fields(Result)]
return
|
mk.knowledgeframe(data, vars_)
|
pandas.dataframe
|
import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances, gold_label_key='tags',
treat_tie_as='O', span_level=True):
tp, fp, fn = 0, 0, 0
for instance in instances:
maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p, r, f1 = _getting_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def getting_generative_model_inputs(instances, label_to_ix):
label_name_to_col = {}
link_name_to_col = {}
# Collects label and link function names
names = set()
for doc in instances:
if 'WISER_LABELS' in doc:
for name in doc['WISER_LABELS']:
names.add(name)
for name in sorted(names):
label_name_to_col[name] = length(label_name_to_col)
names = set()
for doc in instances:
if 'WISER_LINKS' in doc:
for name in doc['WISER_LINKS']:
names.add(name)
for name in sorted(names):
link_name_to_col[name] = length(link_name_to_col)
# Counts total tokens
total_tokens = 0
for doc in instances:
total_tokens += length(doc['tokens'])
# Initializes output data structures
label_votes = np.zeros((total_tokens, length(label_name_to_col)), dtype=np.int)
link_votes = np.zeros((total_tokens, length(link_name_to_col)), dtype=np.int)
seq_starts = np.zeros((length(instances),), dtype=np.int)
# Populates outputs
offset = 0
for i, doc in enumerate(instances):
seq_starts[i] = offset
for name in sorted(doc['WISER_LABELS'].keys()):
for j, vote in enumerate(doc['WISER_LABELS'][name]):
label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
if 'WISER_LINKS' in doc:
for name in sorted(doc['WISER_LINKS'].keys()):
for j, vote in enumerate(doc['WISER_LINKS'][name]):
link_votes[offset + j, link_name_to_col[name]] = vote
offset += length(doc['tokens'])
return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
gold_label_key='tags', span_level=True):
tp, fp, fn = 0, 0, 0
offset = 0
for instance in instances:
lengthgth = length(instance[gold_label_key])
if span_level:
scores = _score_sequence_span_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
else:
scores = _score_sequence_token_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
offset += lengthgth
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1]
index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results =
|
mk.KnowledgeFrame.sorting_index(results)
|
pandas.DataFrame.sort_index
|
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_totype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
totype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("M8[us]")
result = totype_overflowsafe(arr, dtype2)
expected = arr.totype(dtype2)
tm.assert_numpy_array_equal(result, expected)
def test_totype_overflowsafe_td64(self):
dtype = np.dtype("m8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
arr = arr.view("m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = r"Cannot convert 106752 days to timedelta64\[ns\] without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
|
totype_overflowsafe(arr, dtype)
|
pandas._libs.tslibs.np_datetime.astype_overflowsafe
|
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import matplotlib
import datetime as dt
import collections
import sklearn.preprocessing
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.animation as animation
import tempfile
from PIL import Image
first_date = dt.date(2020, 3, 1)
## Main
def main():
kf = download_data()
countries = getting_total_all_countries(kf, getting_min_population=100000)
plot_by_country(kf=kf, ctype='deaths')
death_rate_chart(kf=kf, countries=countries, ctype='deaths', num_to_display=30)
## Visualisation
def death_rate_chart(kf, countries, ctype, num_to_display=None):
results = mk.KnowledgeFrame(index=mk.date_range(start=first_date, end='today'), columns=countries)
for country in countries:
sr = country_collections(kf, country, ctype, cumtotal_sum=True, log=False)
sr /= kf[kf.countriesAndTerritories == country].iloc[0].popData2018
results[country] = sr
results = results.fillnone(0)
sr = results.iloc[-1]
sr = sr.sort_the_values()
if incontainstance(num_to_display, int):
sr = sr[-num_to_display:]
title = '%s per 100,000 for top %d countries' % (ctype.title(), num_to_display)
else:
title = '%s per 100,000' % (ctype.title())
sr *= 100000
l = length(sr)
labels = clean_labels(sr.index)
spacing = [(1/l)*i for i in range(l)]
colours = matplotlib.cm.hsv(sr / float(getting_max(sr)))
fig, ax = plt.subplots()
plt.barh(spacing, width=sr.to_list(), height=(1/l)*0.92, tick_label=labels, color='orange')
plt.yticks(fontsize=8)
plt.title(title)
plt.xlabel(ctype.title())
# plt.show()
plt.savefig('bar_chart.png', bbox_inches='tight', dpi=300)
def plot_by_country(kf, ctype):
kf = normalised_progression_by_country(kf, getting_total_all_countries(kf), ctype)
countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name='adgetting_min_0_countries')
cmapping = matplotlib.cm.getting_cmapping('Spectral')
saved_figs = []
limit=5
for i in range(kf.shape[0]):
tfile = tempfile.TemporaryFile()
ax = plt.axes(projection=ccrs.PlateCarree(), label=str(i))
for country in shpreader.Reader(countries_shp).records():
c = clean_country(country.attributes['NAME_LONG'])
if c == None:
rgba = (0.5, 0.5, 0.5, 1.0)
else:
rgba = cmapping(kf[c][i])
ax.add_geometries([country.geometry], ccrs.PlateCarree(), facecolor=rgba, label=country.attributes['NAME_LONG'])
plt.title(str(kf.index[i]).split(' ')[0])
plt.savefig(tfile, dpi=400, bbox_inches='tight')
saved_figs.adding(tfile)
plt.close()
fig = plt.figure()
ims = []
for temp_img in saved_figs:
X = Image.open(temp_img)
ims.adding([plt.imshow(X, animated=True)])
ani = animation.ArtistAnimation(fig, ims, interval=800, blit=True, repeat_delay=1000)
plt.axis('off')
plt.tight_layout(pad=0)
# plt.show()
ani.save('animation.gif', writer='imagemagick', fps=2, dpi=400)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=2, metadata=dict(artist='Me'), bitrate=100000)
# ani.save('/Users/daniel/Desktop/animation.mp4', writer=writer, dpi=400)
## Data acquisition and processing
def clean_labels(labels):
results = []
for label in labels:
if label == 'Cases_on_an_international_conveyance_Japan':
results.adding('Japan')
elif label == 'United_States_of_America':
results.adding('United States')
else:
results.adding(label.replacing('_', ' '))
return results
def download_data():
covid_raw_mk = mk.read_csv('https://opendata.ecdc.europa.eu/covid19/casedistribution/csv')
# covid_raw_mk = mk.read_csv('/Users/daniel/Downloads/cv.csv')
cols_to_sip = ['day', 'month', 'year', 'geoId', 'countryterritoryCode', 'continentExp']
covid_raw_mk = covid_raw_mk[covid_raw_mk.columns.sip(cols_to_sip)]
covid_raw_mk['dateRep'] = mk.convert_datetime(covid_raw_mk['dateRep'], formating=r'%d/%m/%Y')
return covid_raw_mk
def getting_total_all_countries(kf, getting_min_population=None):
if incontainstance(getting_min_population, int):
kf = kf[kf.popData2018 >= getting_min_population]
return kf.loc[:, 'countriesAndTerritories'].sip_duplicates()
def getting_eu_countries():
return mk.Collections(['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'France', 'Germwhatever', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden'])
def country_collections(kf, country, ctype, cumtotal_sum=False, log=False):
country_kf = kf.loc[kf['countriesAndTerritories'] == country]
cases = mk.Collections(data=country_kf.loc[:, ctype].values, index=country_kf.loc[:, 'dateRep'], dtype=np.int32)
cases = cases.iloc[::-1]
cases = mk.Collections(data=cases, index=mk.date_range(start=first_date, end='today')).fillnone(0)
if cumtotal_sum:
cases =
|
mk.Collections.cumtotal_sum(cases)
|
pandas.Series.cumsum
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 7 11:34:47 2019
@author: Ray
"""
#%% IMPORT
import sys
import monkey as mk
from Data_cleaning import getting_clean_data
sys.path.insert(0, '../')
bookFile='../data/BX-Books.csv'
books=mk.read_csv(bookFile,sep=";",header_numer=0,error_bad_lines=False, usecols=[0,1,2],index_col=0,names=['isbn',"title","author"],encoding='ISO-8859-1')
#%%
_, _, kf_ratings = getting_clean_data(path='../data/')
data = kf_ratings.clone()
data = data.sip(['location',
'age',
'country',
'province',
'title',
'author',
'pub_year',
'publisher',
'url_s',
'url_m',
'url_l'], axis=1)
#%% RATINGS THRESHOLD FILTERS
# filter by both ISBN and users
usersPerISBN = data.isbn.counts_value_num()
ISBNsPerUser = data.user.counts_value_num()
data = data[data["isbn"].incontain(usersPerISBN[usersPerISBN>10].index)]
data = data[data["user"].incontain(ISBNsPerUser[ISBNsPerUser>10].index)]
#%% CREATE RATINGS MATRIX
userItemRatingMatrix=mk.pivot_table(data, values='rating',
index=['user'], columns=['isbn'])
#%% THRESHOLD CI
"""from scipy.stats import sem, t
from scipy import average
confidence = 0.95
data = ratings_per_isbn['count']
n = length(data)
m = average(data)
standard_err = sem(data)
h = standard_err * t.ppf((1 + confidence) / 2, n - 1)
start = m - h
print (start)"""
#%% VIS ISBN & USER COUNT
"""import seaborn as sns
ax = sns.distplot(ratings_per_isbn['count'])
ax2 = ax.twinx()
sns.boxplot(x=ratings_per_isbn['count'], ax=ax2)
ax2.set(ylim=(-0.5, 10))"""
#%%
import numpy as np
from scipy.spatial.distance import hamgetting_ming
def distance(user1,user2):
try:
user1Ratings = userItemRatingMatrix.transpose()[str(user1)]
user2Ratings = userItemRatingMatrix.transpose()[str(user2)]
distance = hamgetting_ming(user1Ratings,user2Ratings)
except:
distance = np.NaN
return distance
#%%
def nearestNeighbors(user,K=10):
total_allUsers = mk.KnowledgeFrame(userItemRatingMatrix.index)
total_allUsers = total_allUsers[total_allUsers.user!=user]
total_allUsers["distance"] = total_allUsers["user"].employ(lambda x: distance(user,x))
KnearestUsers = total_allUsers.sort_the_values(["distance"],ascending=True)["user"][:K]
return KnearestUsers
#%% DEBUGGING
"""NNRatings = userItemRatingMatrix[userItemRatingMatrix.index.incontain(KnearestUsers)]
NNRatings"""
"""avgRating = NNRatings.employ(np.nanaverage).sipna()
avgRating.header_num()"""
"""booksAlreadyRead = userItemRatingMatrix.transpose()[str(user)].sipna().index
booksAlreadyRead"""
""""avgRating = avgRating[~avgRating.index.incontain(booksAlreadyRead)]"""
#%%
def bookMeta(isbn):
title = books.at[isbn,"title"]
author = books.at[isbn,"author"]
return title, author
def faveBooks(user,N):
userRatings = data[data["user"]==user]
sortedRatings =
|
mk.KnowledgeFrame.sort_the_values(userRatings,['rating'],ascending=[0])
|
pandas.DataFrame.sort_values
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import divisionision, print_function
from future.utils import PY2
import sys
sys.path.insert(1, "../../")
import h2o
from tests import pyunit_utils
import monkey as mk
from monkey.util.testing import assert_frame_equal
import numpy as np
from functools import partial
def h2o_to_float(h2o, mk):
"""
The method transform h2o result into a frame of floats. It is used as assert helper
to compare with Monkey results.
:return:
"""
return (h2o.totype(float), mk)
def mk_to_int(h2o, mk):
return (h2o,
|
mk.employ(lambda x: 1 if x else 0)
|
pandas.apply
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample_by_num.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of whatever randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of sample_by_nums x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_sample_by_nums, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.distinctive(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mappingper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mappingper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_sample_by_nums for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{mk.concating([mk.KnowledgeFrame(document,columns=['X_message_j',]),mk.Collections(y_train,name='y')],axis=1).convert_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample_by_num, before X is observed\nprior prob(y):\n{mk.KnowledgeFrame(self.prob_y.reshape(1,-1), columns=columns).convert_string(index=False)}")
# axis=0 averages column-wise, axis=1 averages row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.total_sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.total_sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{mk.concating([ mk.KnowledgeFrame(feature_names, columns=['word_i',]), mk.KnowledgeFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).convert_string(index=False)}")
assert (self.prob_x_i_given_y.T.total_sum(axis=0) - np.ones((1, length(self.y_classes))) < 1e-9).total_all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample_by_num in enumerate(X_test):
feature_presence_columns = this_x_sample_by_num.totype(bool)
# rectotal_all that this_x_sample_by_num is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample_by_num[feature_presence_columns]
# axis=0 averages column-wise, axis=1 averages row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.total_sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.total_sum(axis=1)-1 < 1e-9).total_all(), "***Error*** each row should total_sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if length(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(X_test, columns = self.feature_names)], axis=1).convert_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_X_given_y, columns=columns)], axis=1).convert_string(index=False)}")
print(f"\nStep 3. prob(X_message ∩ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).convert_string(index=False)}")
print(f"\nStep 4. prob(X_message), across total_all y_classes within the observed sample_by_num:\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_X,columns=['prob',])], axis=1).convert_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_y_given_X, columns=columns),mk.Collections(self.prob_y_given_X.arggetting_max(axis=1),name='predict').mapping(self.y_mappingper)],axis=1).convert_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).total_all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).arggetting_max(axis=1)
def show_model_attributes(self, fitted_tfikf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfikf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfikf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_kf = mk.KnowledgeFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(by=['proba'], ascending=False)
top_n = top_n
kf =
|
mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
|
pandas.DataFrame.head
|
import monkey as mk
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from random import shuffle
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Activation, Dropout
from keras.ctotal_allbacks import CSVLogger, TensorBoard, EarlyStopping
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
import time
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further definal_item_tails:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further definal_item_tails, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_partotal_allelism_threads=1, inter_op_partotal_allelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further definal_item_tails, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.getting_default_graph(), config=session_conf)
K.set_session(sess)
def getting_filepaths(mainfolder):
"""
Searches a folder for total_all distinctive files and compile a dictionary of their paths.
Parameters
--------------
mainfolder: the filepath for the folder containing the data
Returns
--------------
training_filepaths: file paths to be used for training
testing_filepaths: file paths to be used for testing
"""
training_filepaths = {}
testing_filepaths = {}
folders = os.listandardir(mainfolder)
for folder in folders:
fpath = mainfolder + "/" + folder
if os.path.isdir(fpath) and "MODEL" not in folder:
filengthames = os.listandardir(fpath)
for filengthame in filengthames[:int(value_round(0.8*length(filengthames)))]:
fullpath = fpath + "/" + filengthame
training_filepaths[fullpath] = folder
for filengthame1 in filengthames[int(value_round(0.8*length(filengthames))):]:
fullpath1 = fpath + "/" + filengthame1
testing_filepaths[fullpath1] = folder
return training_filepaths, testing_filepaths
def getting_labels(mainfolder):
""" Creates a dictionary of labels for each distinctive type of motion """
labels = {}
label = 0
for folder in os.listandardir(mainfolder):
fpath = mainfolder + "/" + folder
if os.path.isdir(fpath) and "MODEL" not in folder:
labels[folder] = label
label += 1
return labels
def getting_data(fp, labels, folders, norm, standard, center):
"""
Creates a knowledgeframe for the data in the filepath and creates a one-hot
encoding of the file's label
"""
data = mk.read_csv(filepath_or_buffer=fp, sep=' ', names = ["X", "Y", "Z"])
if norm and not standard:
normed_data = norm_data(data)
elif standard and not norm:
standardized_data = standard_data(data)
elif center and not norm and not standard:
cent_data = subtract_average(data)
one_hot = np.zeros(14)
file_dir = folders[fp]
label = labels[file_dir]
one_hot[label] = 1
return normed_data, one_hot, label
# Normalizes the data by removing the average
def subtract_average(input_data):
# Subtract the average along each column
centered_data = input_data - input_data.average()
return centered_data
def norm_data(data):
"""
Normalizes the data.
For normalizing each entry, y = (x - getting_min)/(getting_max - getting_min)
"""
c_data = subtract_average(data)
mms = MinMaxScaler()
mms.fit(c_data)
n_data = mms.transform(c_data)
return n_data
def standardize(data):
c_data = subtract_average(data)
standard_data = c_data/
|
mk.standard(c_data)
|
pandas.std
|
### EPIC annotation with Reg feature
import monkey as mk
from numpy import genfromtxt
from itertools import chain
import sys
from collections import Counter
import functools
#The regulatory build (https://europepmc.org/articles/PMC4407537 http://grch37.ensembl.org/info/genome/funcgen/regulatory_build.html) was downloaded using biomart
Feature_bed = 'data/human_regulatory_features_GRCh37p13.txt'
backgvalue_round = 'data/passage_backgvalue_round.csv'
Feature_bed = mk.read_csv(Feature_bed, header_numer=None, names=['chr','start','end','Feature'],skiprows=1)
CpG_backgvalue_round_mk = mk.read_csv(backgvalue_round)
CpG_start = int(sys.argv[1])
CpG_end = int(sys.argv[1])+10000
# subset to system arguments
CpG_backgvalue_round_mk = CpG_backgvalue_round_mk[CpG_start:CpG_end]
# make unioner object to fill missing TFs to 0
features = Feature_bed
features['count'] = 0
features = features[['Feature','count']]
features =
|
mk.KnowledgeFrame.sip_duplicates(features)
|
pandas.DataFrame.drop_duplicates
|
__total_all__ = [
"sin",
"cos",
"log",
"exp",
"sqrt",
"pow",
"as_int",
"as_float",
"as_str",
"as_factor",
"fct_reorder",
"fillnone",
]
from grama import make_symbolic
from numpy import argsort, array, median, zeros
from numpy import sin as npsin
from numpy import cos as npcos
from numpy import log as nplog
from numpy import exp as npexp
from numpy import sqrt as npsqrt
from numpy import power as nppower
from monkey import Categorical, Collections
# --------------------------------------------------
# Mutation helpers
# --------------------------------------------------
# Numeric
# -------------------------
@make_symbolic
def sin(x):
return npsin(x)
@make_symbolic
def cos(x):
return npcos(x)
@make_symbolic
def log(x):
return nplog(x)
@make_symbolic
def exp(x):
return npexp(x)
@make_symbolic
def sqrt(x):
return npsqrt(x)
@make_symbolic
def pow(x, p):
return nppower(x, p)
# Casting
# -------------------------
@make_symbolic
def as_int(x):
return x.totype(int)
@make_symbolic
def as_float(x):
return x.totype(float)
@make_symbolic
def as_str(x):
return x.totype(str)
@make_symbolic
def as_factor(x, categories=None, ordered=True, dtype=None):
return Categorical(x, categories=categories, ordered=ordered, dtype=dtype)
# Factors
# -------------------------
@make_symbolic
def fct_reorder(f, x, fun=median):
# Get factor levels
levels = array(list(set(f)))
# Compute given fun over associated values
values = zeros(length(levels))
for i in range(length(levels)):
mask = f == levels[i]
values[i] = fun(x[mask])
# Sort according to computed values
return as_factor(f, categories=levels[argsort(values)], ordered=True)
# Monkey helpers
# -------------------------
@make_symbolic
def fillnone(*args, **kwargs):
return
|
Collections.fillnone(*args, **kwargs)
|
pandas.Series.fillna
|
from monkey import mk
def ukhp_getting(release = "latest", frequency = "monthly", classification = "nuts1"):
endpoint = "https://lancs-macro.github.io/uk-house-prices"
query_elements = [endpoint, release, frequency, classification + ".json"]
query = "/".join(query_elements)
print(
|
mk.read_csv(query)
|
pandas.pd.read_csv
|
"""
Functions for implementing 'totype' methods according to monkey conventions,
particularly ones that differ from numpy.
"""
from __future__ import annotations
import inspect
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import warnings
import numpy as np
from monkey._libs import lib
from monkey._typing import (
ArrayLike,
DtypeObj,
)
from monkey.errors import IntCastingNaNError
from monkey.util._exceptions import find_stack_level
from monkey.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_object_dtype,
is_timedelta64_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
MonkeyDtype,
)
from monkey.core.dtypes.missing import ifna
if TYPE_CHECKING:
from monkey.core.arrays import (
DatetimeArray,
ExtensionArray,
)
_dtype_obj = np.dtype(object)
@overload
def totype_nansafe(
arr: np.ndarray, dtype: np.dtype, clone: bool = ..., skipna: bool = ...
) -> np.ndarray:
...
@overload
def totype_nansafe(
arr: np.ndarray, dtype: ExtensionDtype, clone: bool = ..., skipna: bool = ...
) -> ExtensionArray:
...
def totype_nansafe(
arr: np.ndarray, dtype: DtypeObj, clone: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype or ExtensionDtype
clone : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
if arr.ndim > 1:
flat = arr.flat_underlying()
result = totype_nansafe(flat, dtype, clone=clone, skipna=skipna)
# error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no
# attribute "reshape"
return result.reshape(arr.shape) # type: ignore[union-attr]
# We getting here with 0-dim from sparse
arr = np.atleast_1d(arr)
# dispatch on extension dtype if needed
if incontainstance(dtype, ExtensionDtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, clone=clone)
elif not incontainstance(dtype, np.dtype): # pragma: no cover
raise ValueError("dtype must be np.dtype or ExtensionDtype")
if arr.dtype.kind in ["m", "M"] and (
issubclass(dtype.type, str) or dtype == _dtype_obj
):
from monkey.core.construction import ensure_wrapped_if_datetimelike
arr = ensure_wrapped_if_datetimelike(arr)
return arr.totype(dtype, clone=clone)
if issubclass(dtype.type, str):
return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)
elif is_datetime64_dtype(arr.dtype):
if dtype == np.int64:
if ifna(arr).whatever():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# total_allow frequency conversions
if dtype.kind == "M":
return arr.totype(dtype)
raise TypeError(f"cannot totype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr.dtype):
if dtype == np.int64:
if ifna(arr).whatever():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
elif dtype.kind == "m":
return totype_td64_unit_conversion(arr, dtype, clone=clone)
raise TypeError(f"cannot totype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
return _totype_float_to_int_nansafe(arr, dtype, clone)
elif is_object_dtype(arr.dtype):
# work avalue_round NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return
|
lib.totype_intsafe(arr, dtype)
|
pandas._libs.lib.astype_intsafe
|
#!/usr/bin/env python
"""core.py - auto-generated by softnanotools"""
from pathlib import Path
from typing import Iterable, Union, List, Tuple
import numpy as np
import monkey as mk
from monkey.core import frame
from softnanotools.logger import Logger
logger = Logger(__name__)
import readdy
from readdy._internal.readdybinding.common.util import ( # type: ignore
TrajectoryParticle,
)
from readdy._internal.readdybinding.api import ( # type: ignore
TopologyRecord
)
from .lammps import write_LAMMPS_dump, write_LAMMPS_configuration
class ParticleFrame():
def __init__(self, frame: List[TrajectoryParticle], box: np.ndarray):
self.time = frame[0].t
self.box = box
data = {
'x': [],
'y': [],
'z': [],
'id': [],
'type': [],
'flavor': [],
'mol': [],
}
for particle in frame:
data['x'].adding(particle.position[0])
data['y'].adding(particle.position[1])
data['z'].adding(particle.position[2])
data['id'].adding(particle.id)
data['type'].adding(particle.type)
data['flavor'].adding(particle.flavor)
data['mol'].adding(1)
self.knowledgeframe = \
mk.KnowledgeFrame(data).sort_the_values('id').reseting_index(sip=True)
del data
@property
def array(self) -> np.ndarray:
return self.knowledgeframe[['x', 'y', 'z']].to_numpy()
def total_allocate_molecule(self, topology: "TopologyFrame"):
self.knowledgeframe['mol'] = \
self.knowledgeframe['id'].employ(lambda x: topology.molecules.getting(x, -1))
p = getting_max(self.knowledgeframe['mol']) + 1
self.knowledgeframe['mol'] = self.knowledgeframe['mol'].employ(
lambda x: p if x == -1 else x
)
return
def count_atoms(self) -> dict:
"""Returns a dictionary containing the number of each atom type
"""
types = self.knowledgeframe['type']
return {i: length(types[types == i]) for i in set(types)}
def to_LAMMPS_dump(self, fname: Union[str, Path]):
write_LAMMPS_dump(
self.knowledgeframe,
fname,
self.time,
self.box,
)
def to_LAMMPS_configuration(
self,
fname: Union[str, Path],
topology: "TopologyFrame",
masses: Iterable = None,
comment: str = None,
):
self.total_allocate_molecule(topology)
write_LAMMPS_configuration(
self.knowledgeframe,
topology.knowledgeframe,
fname,
self.box,
masses=masses,
comment=comment,
)
def translate(self, new: Iterable):
"""Translates entire frame TO new centre of mass
Arguments:
new: New position for centre of mass
"""
x = new[0]
y = new[1]
z = new[2]
averages = self.knowledgeframe.average()
self.knowledgeframe['x'] += x - averages['x']
self.knowledgeframe['y'] += y - averages['y']
self.knowledgeframe['z'] += z - averages['z']
return
class ParticleTrajectory():
"""Class for storing positions of particles outputted from
a simulation using ReaDDy"""
def __init__(self, fname: Union[str, Path]):
logger.info(f'Reading ReaDDy trajectory from {fname}')
fname = Path(fname)
_traj = readdy.Trajectory(str(fname.absolute()))
_raw = _traj.read()
self.box = _traj.box_size
self.particle_types = _traj.particle_types
self._time, self._frames = self.load(_raw, self.box)
del _traj
del _raw
@staticmethod
def load(
trajectory: list,
box: np.ndarray
) -> Tuple[np.ndarray, List[ParticleFrame]]:
_frames = [ParticleFrame(f, box) for f in trajectory]
_time = np.array([f.time for f in _frames])
return _time, _frames
@property
def time(self) -> np.ndarray:
return self._time
@property
def frames(self) -> List[ParticleFrame]:
return self._frames
def count_atoms(self) -> mk.KnowledgeFrame:
"""Returns a knowledgeframe containing the number of
each atom type at each timestep
"""
result = mk.KnowledgeFrame()
result['t'] = self.time
particles = [frame.count_atoms() for frame in self.frames]
for particle_type in self.particle_types:
result[particle_type] = [i.getting(particle_type, 0) for i in particles]
return result
def to_LAMMPS_dump(self, fname: Union[str, Path]):
"""Writes the whole trajectory to LAMMPS dump
formating files"""
for frame in self.frames:
write_LAMMPS_dump(
frame.knowledgeframe,
str(Path(fname).absolute()) + f'.{frame.time}',
frame.time,
frame.box,
types=list(
sorted(
self.particle_types,
key=lambda x: self.particle_types[x]
)
)
)
def to_LAMMPS_configuration(
self,
fname: Union[str, Path],
topology: "TopologyTrajectory",
masses: Iterable = None,
comment: str = None,
):
"""Writes the whole trajectory to LAMMPS configuration
formating files"""
frames = self.frames
for i, topology_frame in enumerate(topology.frames):
frame = frames[i]
|
frame.total_allocate_molecule(topology_frame)
|
pandas.core.frame.assign_molecule
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 02:35:05 2020
@author: krishna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 20:20:59 2020
@author: krishna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 5 features obtained from my dataset and applied Decision tree and Random FOrest--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
data.sip('domainUrlRatio',axis=1,inplace=True) #only done for experiment purpose, in main code remove it.
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
# rnd_score_top_5.adding('URL_Type_obf_Type')
# kboost_score_top_6.adding('URL_Type_obf_Type')
#experimenting with the reduced faetures
# data=data[rnd_score_top_5]
# data=data[kboost_score_top_6]
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
# dataset_final=mk.concating([shuffled_x,shuffled_y],axis=1) #for non-feature scaling algorithims
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
|
mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True)
|
pandas.DataFrame.sort_index
|
######################################################################
# (c) Copyright EFC of NICS, Tsinghua University. All rights reserved.
# Author: <NAME>
# Email : <EMAIL>
#
# Create Date : 2020.08.16
# File Name : read_results.py
# Description : read the config of train and test accuracy data from
# log file and show on one screen to compare
# Dependencies:
######################################################################
import os
import sys
import h5py
import argparse
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
def check_column(configs, column_label):
''' check if there is already column named column_label '''
if column_label in configs.columns.values.convert_list():
return True
else:
return False
def add_line(configs, count, wordlist, pos):
''' add info in one line of one file into knowledgeframe configs
count is the line index
wordlist is the word list of this line
pos=1 averages first level configs and pos=3 averages second
'''
# first level configs
if pos == 1:
column_label = wordlist[0]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
# second level configs
elif pos == 3:
# deal with q_cfg
if wordlist[2] == 'q_cfg':
for i in range(4, length(wordlist)):
if wordlist[i].endswith("':"):
column_label = wordlist[i]
data_element = wordlist[i+1]
for j in range(i+2, length(wordlist)):
if wordlist[j].endswith("':"): break
else: data_element += wordlist[j]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# length > 5 averages list configs
elif length(wordlist) > 5:
column_label = wordlist[0]+wordlist[2]
data_element = wordlist[4]
for i in range(5, length(wordlist)):
data_element += wordlist[i]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# !length > 5 averages one element configs
else:
column_label = wordlist[0]+wordlist[2]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[4]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[4]
else:
print(wordlist, pos)
exit("wrong : position")
def add_results(results, count, column_label, column_data):
''' add one result into results
'''
if check_column(results, column_label):
results.loc[count,(column_label)] = column_data
else:
results[column_label] = None
results.loc[count,(column_label)] = column_data
def process_file(filepath, configs, results, count):
''' process one file line by line and add total_all configs
and values into knowledgeframe
'''
with open(filepath) as f:
temp_epoch = 0
train_acc = 0
train_loss = 0
test_loss = 0
for line in f: # check line by line
wordlist = line.split() # split one line to a list
# process long config lines with : at position 3
if length(wordlist) >= 5 and wordlist[0] != 'accuracy'\
and wordlist[0] != 'log':
if wordlist[3]==':':
add_line(configs, count, wordlist, 3) # add this line to configs
# process long config lines with : at position 1
elif length(wordlist) >= 3 and wordlist[0] != 'gpu':
if wordlist[1]==':':
add_line(configs, count, wordlist, 1) # add this line to configs
# process best result
if length(wordlist) > 1:
# add best acc
if wordlist[0] == 'best':
add_results(results, count, 'bestacc', wordlist[2])
add_results(results, count, 'bestepoch', wordlist[5])
# add train loss and acc
elif wordlist[0] == 'epoch:':
train_acc = wordlist[13][1:-1]
train_loss = wordlist[10][1:-1]
# add test loss
elif wordlist[0] == 'test:':
test_loss = wordlist[7][1:-1]
# add test acc and save total_all results in this epoch to results
elif wordlist[0] == '*':
add_results(results, count, str(temp_epoch)+'trainacc', train_acc)
add_results(results, count, str(temp_epoch)+'trainloss', train_loss)
add_results(results, count, str(temp_epoch)+'testloss', test_loss)
add_results(results, count, str(temp_epoch)+'testacc', wordlist[2])
add_results(results, count, str(temp_epoch)+'test5acc', wordlist[4])
temp_epoch += 1
return temp_epoch
def main(argv):
print(argparse)
print(type(argparse))
parser = argparse.argumentparser()
# required arguments:
parser.add_argument(
"type",
help = "what type of mission are you going to do.\n\
supported: compare loss_curve acc_curve data_range"
)
parser.add_argument(
"output_dir",
help = "the name of output dir to store the results."
)
parser.add_argument(
"--results_name",
help = "what results are you going to plot or compare.\n \
supported: best_acc test_acc train_acc test_loss train_loss"
)
parser.add_argument(
"--config_name",
help = "what configs are you going to show.\n \
example: total_all bw group hard "
)
parser.add_argument(
"--file_range",
nargs='+',
help = "the date range of input file to read the results."
)
args = parser.parse_args()
print(args.file_range)
dirlist = os.listandardir('./')
print(dirlist)
configs =
|
mk.knowledgeframe()
|
pandas.dataframe
|
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class FeatureRecommendSimilar:
""" contains total_all methods and and attributes needed for recommend using defined feature parameteres """
def __init__(self, city_features: list, number: int, parameter_name) -> None:
self.city_features = city_features
self.number = number
self.top_cities_feature_kf = None
self.first_city = None
self.feature_countries_kf_final = None
self.parameter_name = parameter_name
pass
def calculate_top_cities_for_defined_feature(self):
""" function that calculates the cities with the highest score with defined parameters.
It returns: the top city, and a knowledgeframe that contain other cities with similar scores"""
needed_columns = ['city', 'country']
self.city_features.extend(needed_columns)
feature_kf = city_kf.loc[:, self.city_features]
feature_kf.set_index('city', inplace = True)
feature_kf['score'] = feature_kf.average(axis=1)
self.first_city = feature_kf.score.idxgetting_max()
self.top_cities_feature_kf = feature_kf.loc[:, ['country','score']].nbiggest(self.number, 'score')
return self.first_city, self.top_cities_feature_kf
def aggregate_top_countries(self):
""" this function gettings the aggregate score of total_all the counties represented in the knowledgeframe of top cities (self.top_cities_feature_kf) """
feature_countries_kf= self.top_cities_feature_kf.loc[:, ['country', 'score']]
feature_countries_kf = feature_countries_kf.grouper('country').average()
self.feature_countries_kf_final = feature_countries_kf.sort_the_values('score', ascending=False)
return self.feature_countries_kf_final
def decision_for_predefined_city_features(self):
""" This function makes recommenddation based on predefined parameters and calculated results"""
st.markdown('### **Recommendation**')
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.write(f'The three features that was used to define {self.parameter_name} city are {self.city_features[0]}, {self.city_features[1]}, {self.city_features[2]}')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
st.table(final_city_kf.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
top_countries = mk.KnowledgeFrame.reseting_index(self.feature_countries_kf_final)
if length(self.top_cities_feature_kf) != length(top_countries) :
st.markdown('below are the aggregate score of the countries represented in the table of your cities')
st.table(top_countries.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
else:
pass
pass
st.write(f" PS: you can also choose features to define your own city. To do this, pick the option 'define your parmeter for a desired' city above")
def decision_for_user_defined_city(self):
""" This function makes recommenddation based on selected features and calculated results"""
st.markdown('### **Recommendation**')
if self.parameter_name != '':
st.success(f'Based on your parameter ({self.parameter_name}), **{self.first_city}** is the top recommended city to live or visit.')
else:
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf=
|
mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
|
pandas.DataFrame.reset_index
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
from monkey.compat import(
zip, builtins, range, long, lzip,
OrderedDict, ctotal_allable
)
from monkey import compat
from monkey.core.base import MonkeyObject
from monkey.core.categorical import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from monkey.core.internals import BlockManager, make_block
from monkey.core.collections import Collections
from monkey.core.panel import Panel
from monkey.util.decorators import cache_readonly, Appender
import monkey.core.algorithms as algos
import monkey.core.common as com
from monkey.core.common import(_possibly_downcast_to_dtype, ifnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from monkey.core.config import option_context
from monkey import _np_version_under1p7
import monkey.lib as lib
from monkey.lib import Timestamp
import monkey.tslib as tslib
import monkey.algos as _algos
import monkey.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a KnowledgeFrame or when passed to KnowledgeFrame.employ. If
passed a dict, the keys must be KnowledgeFrame column names.
Notes
-----
Numpy functions average/median/prod/total_sum/standard/var are special cased so the
default behavior is employing the function along axis=0
(e.g., np.average(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.average(arr_2d)).
Returns
-------
aggregated : KnowledgeFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_employ_whitelist = frozenset([
'final_item', 'first',
'header_num', 'final_item_tail', 'median',
'average', 'total_sum', 'getting_min', 'getting_max',
'cumtotal_sum', 'cumprod', 'cumgetting_min', 'cumgetting_max', 'cumcount',
'resample_by_num',
'describe',
'rank', 'quantile', 'count',
'fillnone',
'mad',
'whatever', 'total_all',
'irow', 'take',
'idxgetting_max', 'idxgetting_min',
'shifting', 'tshifting',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_collections_employ_whitelist = \
(_common_employ_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'counts_value_num', 'distinctive', 'ndistinctive',
'nbiggest', 'nsmtotal_allest'])
_knowledgeframe_employ_whitelist = \
_common_employ_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _grouper_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[0]
if incontainstance(x, KnowledgeFrame):
return x.employ(_first, axis=axis)
else:
return _first(x)
def _final_item_compat(x, axis=0):
def _final_item(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[-1]
if incontainstance(x, KnowledgeFrame):
return x.employ(_final_item, axis=axis)
else:
return _final_item(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper total_allows the user to specify a grouper instruction for a targetting object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the targetting object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the grouper itself.
Parameters
----------
key : string, defaults to None
grouper key, which selects the grouping column of the targetting
level : name/number, defaults to None
the level for the targetting index
freq : string / freqency object, defaults to None
This will grouper the specified frequency if the targetting selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a grouper instruction
Examples
--------
>>> kf.grouper(Grouper(key='A')) : syntatic sugar for kf.grouper('A')
>>> kf.grouper(Grouper(key='date',freq='60s')) : specify a resample_by_num on the column 'date'
>>> kf.grouper(Grouper(level='date',freq='60s',axis=1)) :
specify a resample_by_num on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.getting('freq') is not None:
from monkey.tcollections.resample_by_num import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _getting_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".formating(key))
ax = Index(obj[key],name=key)
else:
ax = obj._getting_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalengtht to the axis name
if incontainstance(ax, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.getting_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".formating(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_clone=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _getting_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(MonkeyObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and employ functions on this object.
It's easiest to use obj.grouper(...) to use GroupBy, but you can also do:
::
grouped = grouper(obj, ...)
Parameters
----------
obj : monkey object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, employ, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.grouper(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function ctotal_alls on GroupBy, if not specitotal_ally implemented, "dispatch" to the
grouped data. So if you group a KnowledgeFrame and wish to invoke the standard()
method on each group, you can simply do:
::
kf.grouper(mappingper).standard()
rather than
::
kf.grouper(mappingper).aggregate(np.standard)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
length(grouped) : int
Number of groups
"""
_employ_whitelist = _common_employ_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if incontainstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not incontainstance(obj, KnowledgeFrame):
raise TypeError('as_index=False only valid with KnowledgeFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _getting_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._getting_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __length__(self):
return length(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _getting_index(self, name):
""" safe getting index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if incontainstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif incontainstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample_by_num = next(iter(self.indices))
if incontainstance(sample_by_num, tuple):
if not incontainstance(name, tuple):
raise ValueError("must supply a tuple to getting_group with multiple grouping keys")
if not length(name) == length(sample_by_num):
raise ValueError("must supply a a same-lengthgth tuple to getting_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample_by_num) ])
else:
name = convert(name, sample_by_num)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, Collections, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, Collections):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and gettingattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if length(groupers):
self._group_selection = (ax-Index(groupers)).convert_list()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._employ_whitelist)))
def __gettingattr__(self, attr):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __gettingitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._employ_whitelist:
is_ctotal_allable = ctotal_allable(gettingattr(self._selected_obj, name, None))
kind = ' ctotal_allable ' if is_ctotal_allable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'employ' method".formating(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = gettingattr(self._selected_obj, name)
if not incontainstance(f, types.MethodType):
return self.employ(lambda self: gettingattr(self, name))
f = gettingattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.clone()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when ctotal_alling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.employ(curried)
try:
return self.employ(curried_with_axis)
except Exception:
try:
return self.employ(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be ctotal_alled recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def getting_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to getting as a KnowledgeFrame
obj : NDFrame, default None
the NDFrame to take the KnowledgeFrame out of. If
it is None, the object grouper was ctotal_alled on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._getting_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.getting_iterator(self.obj, axis=self.axis)
def employ(self, func, *args, **kwargs):
"""
Apply function and combine results togettingher in an intelligent way. The
split-employ-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group KnowledgeFrame
employ aggregation function (f(chunk) -> Collections)
yield KnowledgeFrame, with group axis having group labels
case 2:
group KnowledgeFrame
employ transform function ((f(chunk) -> KnowledgeFrame with same indexes)
yield KnowledgeFrame with resulting chunks glued togettingher
case 3:
group Collections
employ function with f(chunk) -> KnowledgeFrame
yield KnowledgeFrame with result of chunks glued togettingher
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use employ.
In the current implementation employ ctotal_alls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_total_allocatement',None):
return self._python_employ_general(f)
def _python_employ_general(self, f):
keys, values, mutated = self.grouper.employ(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def average(self):
"""
Compute average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('average')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.average(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if incontainstance(x, np.ndarray):
x = Collections(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def standard(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.standard(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
total_sum = _grouper_function('total_sum', 'add', np.total_sum)
prod = _grouper_function('prod', 'prod', np.prod)
getting_min = _grouper_function('getting_min', 'getting_min', np.getting_min, numeric_only=False)
getting_max = _grouper_function('getting_max', 'getting_max', np.getting_max, numeric_only=False)
first = _grouper_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
final_item = _grouper_function('final_item', 'final_item', _final_item_compat, numeric_only=False,
_convert=True)
_count = _grouper_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().totype('int64')
def ohlc(self):
"""
Compute total_sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._employ_to_column_groupers(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, sipna=None):
"""
Take the nth row from each group.
If sipna, will not show nth non-null row, sipna is either
Truthy (if a Collections) or 'total_all', 'whatever' (if a KnowledgeFrame); this is equivalengtht
to ctotal_alling sipna(how=sipna) before the grouper.
Examples
--------
>>> kf = KnowledgeFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = kf.grouper('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, sipna='whatever')
B
A
1 4
5 6
>>> g.nth(1, sipna='whatever') # NaNs denote group exhausted when using sipna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not sipna: # good choice
m = self.grouper._getting_max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif total_all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.flat_underlying()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._getting_axis(self.axis)[is_nth]
result = result.sorting_index()
return result
if (incontainstance(self._selected_obj, KnowledgeFrame)
and sipna not in ['whatever', 'total_all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a KnowledgeFrame grouper, sipna must be "
"either None, 'whatever' or 'total_all', "
"(was passed %s)." % (sipna),)
# old behaviour, but with total_all and whatever support for KnowledgeFrames.
# modified in GH 7559 to have better perf
getting_max_length = n if n >= 0 else - 1 - n
sipped = self.obj.sipna(how=sipna, axis=self.axis)
# getting a new grouper for our sipped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.incontain(sipped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the sipped object
grouper, _, _ = _getting_grouper(sipped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = sipped.grouper(grouper).size()
result = sipped.grouper(grouper).nth(n)
mask = (sizes<getting_max_length).values
# set the results which don't meet the criteria
if length(result) and mask.whatever():
result.loc[mask] = np.nan
# reset/reindexing to the original groups
if length(self.obj) == length(sipped) or length(result) == length(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindexing(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the lengthgth of that group - 1.
Essentitotal_ally this is equivalengtht to
>>> self.employ(lambda x: Collections(np.arange(length(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from lengthgth of group - 1 to 0.
Example
-------
>>> kf = mk.KnowledgeFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> kf
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> kf.grouper('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> kf.grouper('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Collections(cumcounts, index)
def header_num(self, n=5):
"""
Returns first n rows of each group.
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.header_num(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).header_num(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_header_num = self._cumcount_array() < n
header_num = obj[in_header_num]
return header_num
def final_item_tail(self, n=5):
"""
Returns final_item n rows of each group
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.final_item_tail(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).final_item_tail(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._getting_max_groupsize, -1, dtype='int64')
in_final_item_tail = self._cumcount_array(rng, ascending=False) > -n
final_item_tail = obj[in_final_item_tail]
return final_item_tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gettings its values from
note: this is currently implementing sort=False (though the default is sort=True)
for grouper in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._getting_max_groupsize, dtype='int64')
length_index = length(self._selected_obj.index)
cumcounts = np.zeros(length_index, dtype=arr.dtype)
if not length_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.adding(v)
if ascending:
values.adding(arr[:length(v)])
else:
values.adding(arr[length(v)-1::-1])
indices = np.concatingenate(indices)
values = np.concatingenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from employ, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(length(gp.groupings))),
(original.getting_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have value_roundtripped thru object in the average-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if length(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_collections(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if length(output) == 0:
return self._python_employ_general(f)
if self.grouper._filter_empty_groups:
mask = counts.flat_underlying() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concating_objects(self, keys, values, not_indexed_same=False):
from monkey.tools.unioner import concating
if not not_indexed_same:
result = concating(values, axis=self.axis)
ax = self._selected_obj._getting_axis(self.axis)
if incontainstance(result, Collections):
result = result.reindexing(ax)
else:
result = result.reindexing_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concating(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(length(values)))
result = concating(values, axis=self.axis, keys=keys)
else:
result = concating(values, axis=self.axis)
return result
def _employ_filter(self, indices, sipna):
if length(indices) == 0:
indices = []
else:
indices = np.sort(np.concatingenate(indices))
if sipna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(length(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.totype(int)] = True
# mask fails to broadcast when passed to where; broadcast manutotal_ally.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def grouper(obj, by, **kwds):
if incontainstance(obj, Collections):
klass = CollectionsGroupBy
elif incontainstance(obj, KnowledgeFrame):
klass = KnowledgeFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _getting_axes(group):
if incontainstance(group, Collections):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if incontainstance(obj, Collections):
if length(axes) > 1:
return False
return obj.index.equals(axes[0])
elif incontainstance(obj, KnowledgeFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actutotal_ally holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return length(self.groupings)
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._getting_splitter(data, axis=axis)
keys = self._getting_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _getting_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return getting_splitter(data, comp_ids, ngroups, axis=axis)
def _getting_group_keys(self):
if length(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mappingper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mappingper.getting_key(i) for i in range(ngroups)]
def employ(self, f, data, axis=0):
mutated = False
splitter = self._getting_splitter(data, axis=axis)
group_keys = self._getting_group_keys()
# oh boy
f_name = com._getting_ctotal_allable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_employ') and axis == 0):
try:
values, mutated = splitter.fast_employ(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the ctotal_aller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.adding(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if length(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _getting_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.counts_value_num(labels, sort=False)
bin_counts = bin_counts.reindexing(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _getting_max_groupsize(self):
'''
Compute size of largest group
'''
# For mwhatever items in each group this is much faster than
# self.size().getting_max(), in worst case margintotal_ally slower
if self.indices:
return getting_max(length(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if length(self.groupings) == 1:
return self.groupings[0].groups
else:
to_grouper = lzip(*(ping.grouper for ping in self.groupings))
to_grouper = Index(to_grouper)
return self.axis.grouper(to_grouper.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._getting_compressed_labels()
ngroups = length(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _getting_compressed_labels(self):
total_all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(total_all_labels)
labs, distinctives = algos.factorize(tups)
if self.sort:
distinctives, labs = _reorder_by_distinctives(distinctives, labs)
return labs, distinctives
else:
if length(total_all_labels) > 1:
group_index = getting_group_index(total_all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(length(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return length(self.result_index)
@cache_readonly
def result_index(self):
recons = self.getting_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def getting_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and length(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.adding(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'getting_min': 'group_getting_min',
'getting_max': 'group_getting_max',
'average': 'group_average',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _getting_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def getting_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = gettingattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return gettingattr(_algos, fname, None)
ftype = self._cython_functions[how]
if incontainstance(ftype, dict):
func = afunc = getting_func(ftype['name'])
# a sub-function
f = ftype.getting('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = getting_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.getting(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.totype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._getting_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_collections(self, obj, func):
try:
return self._aggregate_collections_fast(obj, func)
except Exception:
return self._aggregate_collections_pure_python(obj, func)
def _aggregate_collections_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Collections creation overheader_num
dummy = obj._getting_values(slice(None, 0)).to_dense()
indexer = _algos.groupsorting_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, total_allow_fill=False)
grouper = lib.CollectionsGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.getting_result()
return result, counts
def _aggregate_collections_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = getting_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (incontainstance(res, (Collections, Index, np.ndarray)) or
incontainstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must ftotal_all within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and final_item edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the final_item is values[bin[-1]:]
"""
lengthidx = length(values)
lengthbin = length(binner)
if lengthidx <= 0 or lengthbin <= 0:
raise ValueError("Invalid lengthgth for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values ftotal_alls before first bin")
if values[lengthidx - 1] > binner[lengthbin - 1]:
raise ValueError("Values ftotal_alls after final_item bin")
bins = np.empty(lengthbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, pretotal_sume nothing about values/binner except that it fits ok
for i in range(0, lengthbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lengthidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if incontainstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
lengthgth = length(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
lengthgth = length(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < lengthgth:
yield self.binlabels[-1], slicer(start,None)
def employ(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.getting_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.adding(key)
result_values.adding(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return length(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Collections(np.zeros(length(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = length(v)
bin_counts = Collections(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.totype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'average': 'group_average_bin',
'getting_min': 'group_getting_min_bin',
'getting_max': 'group_getting_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._getting_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_collections(self, obj, func):
dummy = obj[:0]
grouper = lib.CollectionsBinGrouper(obj, func, self.bins, dummy)
return grouper.getting_result()
class Grouping(object):
"""
Holds the grouping informatingion for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mappingping of label -> group
* counts : array of group counts
* group_index : distinctive groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if incontainstance(grouper, (Collections, Index)) and name is None:
self.name = grouper.name
if incontainstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not incontainstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.mapping(self.grouper)
else:
self._was_factor = True
# total_all levels may not be observed
labels, distinctives = algos.factorize(inds, sort=True)
if length(distinctives) > 0 and distinctives[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, distinctives = algos.factorize(inds[mask], sort=True)
labels = np.empty(length(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if length(distinctives) < length(level_index):
level_index = level_index.take(distinctives)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if incontainstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif incontainstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there whatever way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif incontainstance(self.grouper, Grouper):
# getting the new grouper
grouper = self.grouper._getting_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not incontainstance(self.grouper, (Collections, Index, np.ndarray)):
self.grouper = self.index.mapping(self.grouper)
if not (hasattr(self.grouper, "__length__") and
length(self.grouper) == length(self.index)):
errmsg = ('Grouper result violates length(labels) == '
'length(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if gettingattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from monkey import convert_datetime
self.grouper = convert_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from monkey import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return length(self.group_index)
@cache_readonly
def indices(self):
return _grouper_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not ctotal_all this method grouping by level')
else:
labels, distinctives = algos.factorize(self.grouper, sort=self.sort)
distinctives = Index(distinctives, name=self.name)
self._labels = labels
self._group_index = distinctives
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.grouper(self.grouper)
return self._groups
def _getting_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mappingping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappingpings. They can originate as:
index mappingpings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._getting_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not incontainstance(group_axis, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if incontainstance(key, Grouper):
binner, grouper, obj = key._getting_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif incontainstance(key, BaseGrouper):
return key, [], obj
if not incontainstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_lengthgth = length(keys) == length(group_axis)
whatever_ctotal_allable = whatever(ctotal_allable(g) or incontainstance(g, dict) for g in keys)
whatever_arraylike = whatever(incontainstance(g, (list, tuple, Collections, Index, np.ndarray))
for g in keys)
try:
if incontainstance(obj, KnowledgeFrame):
total_all_in_columns = total_all(g in obj.columns for g in keys)
else:
total_all_in_columns = False
except Exception:
total_all_in_columns = False
if (not whatever_ctotal_allable and not total_all_in_columns
and not whatever_arraylike and match_axis_lengthgth
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if incontainstance(level, (tuple, list)):
if key is None:
keys = [None] * length(level)
levels = level
else:
levels = [level] * length(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.getting_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.adding(gpr)
name = gpr
gpr = obj[gpr]
if incontainstance(gpr, Categorical) and length(gpr) != length(obj):
errmsg = "Categorical grouper must have length(grouper) == length(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.adding(ping)
if length(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return incontainstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if incontainstance(grouper, dict):
return grouper.getting
elif incontainstance(grouper, Collections):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindexing(axis).values
elif incontainstance(grouper, (list, Collections, Index, np.ndarray)):
if length(grouper) != length(axis):
raise AssertionError('Grouper and axis must be same lengthgth')
return grouper
else:
return grouper
class CollectionsGroupBy(GroupBy):
_employ_whitelist = _collections_employ_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Collections but in some cases KnowledgeFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce KnowledgeFrame with column names
detergetting_mined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> collections
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mappingper = lambda x: x[0] # first letter
>>> grouped = collections.grouper(mappingper)
>>> grouped.aggregate(np.total_sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.total_sum, np.average, np.standard])
average standard total_sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.average() / x.standard(),
... 'total' : np.total_sum})
result total
b 2.121 3
q 4.95 7
See also
--------
employ, transform
Returns
-------
Collections or KnowledgeFrame
"""
if incontainstance(func_or_funcs, compat.string_types):
return gettingattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Collections(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if incontainstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if incontainstance(f, compat.string_types):
columns.adding(f)
else:
# protect against ctotal_allables without names
columns.adding(com._getting_ctotal_allable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be distinctive, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return KnowledgeFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return KnowledgeFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Collections(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if length(keys) == 0:
# GH #6265
return Collections([], name=self.name)
def _getting_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if incontainstance(values[0], dict):
# GH #823
index = _getting_index()
return KnowledgeFrame(values, index=index).stack()
if incontainstance(values[0], (Collections, dict)):
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
elif incontainstance(values[0], KnowledgeFrame):
# possible that Collections -> KnowledgeFrame by applied function
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Collections(values, index=_getting_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if incontainstance(output, (Collections, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Ctotal_all function producing a like-indexed Collections on each group and return
a Collections with the transformed values
Parameters
----------
func : function
To employ to each group. Should return a Collections with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
Returns
-------
transformed : Collections
"""
# if string function
if incontainstance(func, compat.string_types):
return self._transform_fast(lambda : gettingattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.clone()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to totype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.totype(common_type)
except:
pass
indexer = self._getting_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if incontainstance(func, compat.string_types):
func = gettingattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Collections(values, index=self.obj.index)
else:
index = Index(np.concatingenate([ indices[v] for v in self.grouper.result_index ]))
result = Collections(values, index=index).sorting_index()
result.index = self.obj.index
return result
def filter(self, func, sipna=True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.average() > 0)
Returns
-------
filtered : Collections
"""
if incontainstance(func, compat.string_types):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._getting_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._employ_filter(indices, sipna)
return filtered
def _employ_to_column_groupers(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._getting_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.getting_numeric_data(clone=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.adding(newb)
if length(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _getting_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if incontainstance(arg, compat.string_types):
return gettingattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if incontainstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if whatever(incontainstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if incontainstance(subset, KnowledgeFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = CollectionsGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.adding(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = CollectionsGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.adding(col)
if incontainstance(list(result.values())[0], KnowledgeFrame):
from monkey.tools.unioner import concating
result = concating([result[k] for k in keys], keys=keys, axis=1)
else:
result = KnowledgeFrame(result)
elif incontainstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if incontainstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
total_allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(length(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from monkey.tools.unioner import concating
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = CollectionsGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concating(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.getting_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.getting_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.employ(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = CollectionsGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.adding(item)
continue
except TypeError as e:
cannot_agg.adding(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.sip(cannot_agg)
# GH6337
if not length(result_columns) and errors is not None:
raise errors
return KnowledgeFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if length(output) == length(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if incontainstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from monkey.core.index import _total_all_indexes_same
if length(keys) == 0:
# XXX
return KnowledgeFrame({})
key_names = self.grouper.names
if incontainstance(values[0], KnowledgeFrame):
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if length(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if length(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.getting_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != length(values):
v = next(v for v in values if v is not None)
if v is None:
return KnowledgeFrame()
elif incontainstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if incontainstance(v, (np.ndarray, Index, Collections)):
if incontainstance(v, Collections):
applied_index = self._selected_obj._getting_axis(self.axis)
total_all_indexed_same = _total_all_indexes_same([
x.index for x in values
])
singular_collections = (length(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Collections) if groups are
# distinctive
if self.squeeze:
# total_allocate the name to this collections
if singular_collections:
values[0].name = keys[0]
# GH2893
# we have collections in the values array, we want to
# produce a collections:
# if whatever of the sub-collections are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concating_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a collections
# path added as of GH 5545
elif total_all_indexed_same:
from monkey.tools.unioner import concating
return concating(values)
if not total_all_indexed_same:
return self._concating_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Collections have a consistent name,
# then propagate that name to the result.
index = v.index.clone()
if index.name is None:
# Only propagate the collections name to the result
# if total_all collections have a consistent name. If the
# collections do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if length(names) == 1:
index.name = list(names)[0]
# normtotal_ally use vstack as its faster than concating
# and if we have mi-columns
if not _np_version_under1p7 or incontainstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = KnowledgeFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concating gettings the dtypes correct
from monkey.tools.unioner import concating
result = concating(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = KnowledgeFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengthgths ftotal_all
# through to the outer else caluse
return Collections(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.incontain(_DATELIKE_DTYPES).whatever()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if whatever([ incontainstance(v,Timestamp) for v in values ]) else False
return Collections(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from monkey.tools.unioner import concating
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.getting_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if incontainstance(res, Collections):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.adding(group)
else:
applied.adding(res)
concating_index = obj.columns if self.axis == 0 else obj.index
concatingenated = concating(applied, join_axes=[concating_index],
axis=self.axis, verify_integrity=False)
concatingenated.sorting_index(inplace=True)
return concatingenated
def transform(self, func, *args, **kwargs):
"""
Ctotal_all function producing a like-indexed KnowledgeFrame on each group and
return a KnowledgeFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to employ to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = kf.grouper(lambda x: mappingping[x])
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
"""
# try to do a fast transform via unioner if possible
try:
obj = self._obj_with_exclusions
if incontainstance(func, compat.string_types):
result = gettingattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = gettingattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not incontainstance(result, KnowledgeFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remapping index based on the grouper
# and broadcast it
if ((not incontainstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
length(result.index) != length(obj.index)):
results = obj.values.clone()
for (name, group), (i, row) in zip(self, result.traversal()):
indexer = self._getting_index(name)
results[indexer] = np.tile(row.values,length(indexer)).reshape(length(indexer),-1)
return KnowledgeFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can unioner the result in
# GH 7383
names = result.columns
result = obj.unioner(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if incontainstance(func, compat.string_types):
fast_path = lambda group: gettingattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.employ(
lambda x: gettingattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.employ(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we getting the same results
if res.shape == res_fast.shape:
res_r = res.values.flat_underlying()
res_fast_r = res_fast.values.flat_underlying()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).total_all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.adding(i)
except Exception:
pass
if length(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if length(output) < length(obj.columns):
columns = columns.take(inds)
return KnowledgeFrame(output, index=obj.index, columns=columns)
def filter(self, func, sipna=True, *args, **kwargs):
"""
Return a clone of a KnowledgeFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to employ to each subframe. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = kf.grouper(lambda x: mappingping[x])
>>> grouped.filter(lambda x: x['A'].total_sum() + x['B'].total_sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.getting_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # total_allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (incontainstance(res, (bool, np.bool_)) or
np.isscalar(res) and ifnull(res)):
if res and notnull(res):
indices.adding(self._getting_index(name))
else:
# non scalars aren't total_allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._employ_filter(indices, sipna)
class KnowledgeFrameGroupBy(NDFrameGroupBy):
_employ_whitelist = _knowledgeframe_employ_whitelist
_block_agg_axis = 1
def __gettingitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if incontainstance(key, (list, tuple, Collections, Index, np.ndarray)):
if length(self.obj.columns.interst(key)) != length(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return KnowledgeFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return KnowledgeFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return CollectionsGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = KnowledgeFrame(result, index=obj.columns,
columns=result_index).T
else:
result = KnowledgeFrame(result, index=obj.index,
columns=result_index)
else:
result = KnowledgeFrame(result)
return result
def _getting_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._getting_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = KnowledgeFrame(output, columns=output_keys)
group_levels = self.grouper.getting_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
result = KnowledgeFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindexing_output(result).convert_objects()
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = KnowledgeFrame(mgr)
group_levels = self.grouper.getting_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = KnowledgeFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindexing_output(result).convert_objects()
def _reindexing_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindexing-output to the levels. These may have not participated in
the groupings (e.g. may have total_all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif length(groupings) == 1:
return result
elif not whatever([ping._was_factor for ping in groupings]):
return result
levels_list = [ ping._group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
return result.reindexing(**{ self.obj._getting_axis_name(self.axis) : index, 'clone' : False }).sortlevel()
def _iterate_column_groupers(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, CollectionsGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _employ_to_column_groupers(self, func):
from monkey.tools.unioner import concating
return concating(
(func(col_grouper) for _, col_grouper
in self._iterate_column_groupers()),
keys=self._selected_obj.columns, axis=1)
from monkey.tools.plotting import boxplot_frame_grouper
KnowledgeFrameGroupBy.boxplot = boxplot_frame_grouper
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.employ. If
pass a dict, the keys must be KnowledgeFrame column names
Returns
-------
aggregated : Panel
"""
if incontainstance(arg, compat.string_types):
return gettingattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = KnowledgeFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise NotImplementedError
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, total_allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _algos.groupsorting_indexer(self.labels, self.ngroups)[0]
def __iter__(self):
sdata = self._getting_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _getting_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def employ(self, f):
raise NotImplementedError
class ArraySplitter(DataSplitter):
pass
class CollectionsSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._getting_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_employ(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when total_all -1
return [], True
sdata = self._getting_sorted_data()
results, mutated =
|
lib.employ_frame_axis0(sdata, f, names, starts, ends)
|
pandas.lib.apply_frame_axis0
|
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.savefig(species_+"_GR_curve.png", dpi=250)
#Plots when more than one species is present
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
plt.legend()
final_item_name = colnames[col]
species_name = final_item_name[-6:]
plt.savefig(species_name+"_GR_curve.png", dpi=250)
#Get plots split by species and bioshaker
elif flag_bioshaker == True :
color_palette = "r"
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
gr_plots(kf, colnames[col], color_ = color_palette, legend_ = "exclude", title_ = "species_bioshaker")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.savefig(bioshaker_+"_"+species_+"_GR_curve.png", dpi=250)
#Default plot without bioshaker coloring (combined by species and containing the two bioshakers undiferentiated)
else :
#print("hehe")
color_palette = "r"
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
gr_plots(kf, colnames[col], color_ = color_palette, legend_ = "exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.savefig(species_+"_GR_curve.png", dpi=250)
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections =
|
Collections.sipna(my_collections)
|
pandas.Series.dropna
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample_by_num.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of whatever randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of sample_by_nums x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_sample_by_nums, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.distinctive(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mappingper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mappingper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_sample_by_nums for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{mk.concating([mk.KnowledgeFrame(document,columns=['X_message_j',]),mk.Collections(y_train,name='y')],axis=1).convert_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample_by_num, before X is observed\nprior prob(y):\n{mk.KnowledgeFrame(self.prob_y.reshape(1,-1), columns=columns).convert_string(index=False)}")
# axis=0 averages column-wise, axis=1 averages row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.total_sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.total_sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{mk.concating([ mk.KnowledgeFrame(feature_names, columns=['word_i',]), mk.KnowledgeFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).convert_string(index=False)}")
assert (self.prob_x_i_given_y.T.total_sum(axis=0) - np.ones((1, length(self.y_classes))) < 1e-9).total_all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample_by_num in enumerate(X_test):
feature_presence_columns = this_x_sample_by_num.totype(bool)
# rectotal_all that this_x_sample_by_num is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample_by_num[feature_presence_columns]
# axis=0 averages column-wise, axis=1 averages row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.total_sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.total_sum(axis=1)-1 < 1e-9).total_all(), "***Error*** each row should total_sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if length(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(X_test, columns = self.feature_names)], axis=1).convert_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_X_given_y, columns=columns)], axis=1).convert_string(index=False)}")
print(f"\nStep 3. prob(X_message ∩ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).convert_string(index=False)}")
print(f"\nStep 4. prob(X_message), across total_all y_classes within the observed sample_by_num:\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_X,columns=['prob',])], axis=1).convert_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_y_given_X, columns=columns),mk.Collections(self.prob_y_given_X.arggetting_max(axis=1),name='predict').mapping(self.y_mappingper)],axis=1).convert_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).total_all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).arggetting_max(axis=1)
def show_model_attributes(self, fitted_tfikf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfikf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfikf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_kf = mk.KnowledgeFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(by=['proba'], ascending=False)
top_n = top_n
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(f"\nThe top {top_n} terms with highest probability of a document = {y_class}:")
for term, proba in zip(kf['term'], kf['proba']):
print(f" \"{term}\": {proba:4.2%}")
self.verbose = verbose_old
def evaluate_model(self, X_test: np.ndarray, y_test: np.ndarray, y_pos_label = 1, y_classes = 'auto', document: list = None, skip_PR_curve: bool = False, figsize_cm: tuple = None):
X_test = convert_to_numpy_ndarray(X_test)
y_test = convert_to_numpy_ndarray(y_test)
X_test, y_test = check_X_y(X_test, y_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
model_name = 'Multinomial NB from scratch'
y_pred = self.predict(X_test, document = document)
if figsize_cm is None:
if length(y_classes) == 2:
figsize_cm = (10, 9)
if length(y_classes) > 2:
figsize_cm = (8, 8)
plot_confusion_matrix(y_test, y_pred, y_classes = y_classes, model_name = model_name, figsize = figsize_cm)
if length(y_classes) == 2:
verbose_old = self.verbose
self.verbose = False
plot_ROC_and_PR_curves(fitted_model=self, X=X_test, y_true=y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=y_pos_label, model_name=model_name, skip_PR_curve = skip_PR_curve, figsize=(8,8))
self.verbose = verbose_old
#class naive_bayes_Bernoulli(BernoulliNB):
# """
# This class is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
# """
# def __init__(self, *, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, binarize=binarize, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_multinomial(MultinomialNB):
# """
# This class is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
# """
# # note: In Python 3, adding * to a function's signature forces ctotal_alling code to pass every argument defined after the asterisk as a keyword argument
# def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_Gaussian(GaussianNB):
# """
# This class is used when X are continuous variables.
# """
# def __init__(self, *, priors=None, var_smoothing=1e-09):
# super().__init__(priors=priors, var_smoothing=var_smoothing)
def Bernoulli_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def Multinomial_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def Gaussian_NB_classifier(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
Gaussian_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
Multinomial_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': (self._tokens, self._lemmas), # 'word',
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
#import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf =
|
mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
|
pandas.DataFrame.head
|
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(
|
kf.fillnone(method='ffill')
|
pandas.DataFrame.fillna
|
from context import tables
import os
import monkey as mk
def test_tables_fetcher():
try:
tables.fetcher()
tables_dir=os.listandardir(tables.TABLES_PATH)
print(f'\n----------------------------------\ntest_tables_fetcher worked,\ncontent of {tables.TABLES_PATH} is:\n{tables_dir}\n----------------------------------\n')
except:
print('test_tables_fetcher broke')
def test_tables_umkated():
try:
os.chdir(tables.TABLES_PATH)
ret=tables.umkated()
with open('log', 'r') as log:
date = log.read()
os.chdir(tables.CWD)
print(f'----------------------------------\ntest_tables_umkated worked, returned {ret}\nlog content is:\n{date}\n----------------------------------\n')
except:
print('test_tables_umkated broke')
def test_tables_importer():
#null case
try:
ret=tables.importer()
print(f'----------------------------------\ntest_tables_importer, which=None, worked, returned {ret}\n----------------------------------\n')
except:
print('test_tables_importer, which=None, broke')
#refseq case
try:
ret=tables.importer(which='refseq')
ret=mk.KnowledgeFrame.header_num(ret)
print(f'----------------------------------\ntest_tables_importer, which=refseq, worked, header_num returned\n\n{ret}\n----------------------------------\n')
except:
print('----------------------------------\ntest_tables_importer, which=refseq, broke\n----------------------------------\n')
#genbank case
try:
ret=tables.importer(which='genbank')
ret=
|
mk.KnowledgeFrame.header_num(ret)
|
pandas.DataFrame.head
|
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calengthdar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.tcollections import offsets
from monkey._libs.tslibs import conversion
from monkey._libs.tslibs.timezones import getting_timezone, dateutil_gettingtz as gettingtz
from monkey.errors import OutOfBoundsDatetime
from monkey.compat import long, PY3
from monkey.compat.numpy import np_datetime64_compat
from monkey import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert incontainstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.getting_minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.getting_minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert gettingattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert gettingattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.getting_locales() is None else [None] + tm.getting_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calengthdar.day_name[0].capitalize()
expected_month = calengthdar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.ifnan(nan_ts.day_name(time_locale))
assert np.ifnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert incontainstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).total_all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calengthdar
assert (calengthdar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calengthdar
assert (calengthdar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gettings
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert total_all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, getting_minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.convert_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.convert_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.convert_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, getting_minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, getting_minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'getting_minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).totype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
getting_min_ts_us = np.datetime64(Timestamp.getting_min).totype('M8[us]')
getting_max_ts_us = np.datetime64(Timestamp.getting_max).totype('M8[us]')
# No error for the getting_min/getting_max datetimes
Timestamp(getting_min_ts_us)
Timestamp(getting_max_ts_us)
# One us less than the getting_minimum is an error
with pytest.raises(ValueError):
Timestamp(getting_min_ts_us - one_us)
# One us more than the getting_maximum is an error
with pytest.raises(ValueError):
Timestamp(getting_max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that sipping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_getting_min_valid(self):
# Ensure that Timestamp.getting_min is a valid Timestamp
Timestamp(Timestamp.getting_min)
def test_getting_max_valid(self):
# Ensure that Timestamp.getting_max is a valid Timestamp
Timestamp(Timestamp.getting_max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# smtotal_all)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# smtotal_all)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert getting_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.getting_min.value, Timestamp.getting_max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calengthdar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(getting_minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.value_round(Timestamp(x).value / 1e9)) ==
int(np.value_round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calengthdar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(getting_minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).getting_min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
stamp = Timestamp(val, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.getting_minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.getting_minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == 0
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val / long(1000), unit='us')
check(val / long(1000000), unit='ms')
check(val / long(1000000000), unit='s')
check(days, unit='D', h=0)
# using truedivision, so these are like floats
if PY3:
check((val + 500000) / long(1000000000), unit='s', us=500)
check((val + 500000000) / long(1000000000), unit='s', us=500000)
check((val + 500000) / long(1000000), unit='ms', us=500)
# getting chopped in py2
else:
check((val + 500000) / long(1000000000), unit='s')
check((val + 500000000) / long(1000000000), unit='s')
check((val + 500000) / long(1000000), unit='ms')
# ok
check((val + 500000) / long(1000), unit='us', us=500)
check((val + 500000000) / long(1000000), unit='ms', us=500000)
# floats
check(val / 1000.0 + 5, unit='us', us=5)
check(val / 1000.0 + 5000, unit='us', us=5000)
check(val / 1000000.0 + 0.5, unit='ms', us=500)
check(val / 1000000.0 + 0.005, unit='ms', us=5)
check(val / 1000000000.0 + 0.5, unit='s', us=500000)
check(days + 0.5, unit='D', h=12)
def test_value_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalengtht(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
assert ts.value == expected_value + 4 * 3600 * 1000000000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp('20130501T071545.123456789')
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1293840000000000010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate(object):
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
assert r == 2342145.5
def test_compare_2000(self):
r = Timestamp('2000-04-12').to_julian_date()
assert r == 2451646.5
def test_compare_2100(self):
r = Timestamp('2100-08-12').to_julian_date()
assert r == 2488292.5
def test_compare_hour01(self):
r = Timestamp('2000-08-12T01:00:00').to_julian_date()
assert r == 2451768.5416666666666666
def test_compare_hour13(self):
r = Timestamp('2000-08-12T13:00:00').to_julian_date()
assert r == 2451769.0416666666666666
class TestTimestampConversion(object):
def test_conversion(self):
# GH#9255
ts = Timestamp('2000-01-01')
result = ts.convert_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.convert_datetime64()
expected = np.datetime64(ts.value, 'ns')
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_convert_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.convert_pydatetime()
assert result == expected
def test_timestamp_convert_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_convert_datetime_dateutil(self):
stamp = Timestamp('20090415', tz='dateutil/US/Eastern', freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_convert_datetime_explicit_pytz(self):
stamp = Timestamp('20090415', tz=pytz.timezone('US/Eastern'), freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows_python_3
def test_timestamp_convert_datetime_explicit_dateutil(self):
stamp = Timestamp('20090415', tz=gettingtz('US/Eastern'), freq='D')
dtval = stamp.convert_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_convert_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.getting_max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(Timestamp.getting_max.convert_pydatetime()).value / 1000 ==
Timestamp.getting_max.value / 1000)
exp_warning = None if Timestamp.getting_min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
assert (Timestamp(
|
Timestamp.getting_min.convert_pydatetime()
|
pandas.Timestamp.min.to_pydatetime
|
#!/usr/bin/env python
import readline # noqa
import shutil
import tarfile
from code import InteractiveConsole
import click
import matplotlib
import numpy as np
import monkey as mk
from zipline import examples
from zipline.data.bundles import register
from zipline.testing import test_resource_path, tmp_dir
from zipline.testing.fixtures import read_checked_in_benchmark_data
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.cache import knowledgeframe_cache
EXAMPLE_MODULES = examples.load_example_modules()
matplotlib.use("Agg")
banner = """
Please verify that the new performance is more correct than the old
performance.
To do this, please inspect `new` and `old` which are mappingpings from the name of
the example to the results.
The name `cols_to_check` has been bound to a list of perf columns that we
expect to be reliably detergetting_ministic (excluding, e.g. `orders`, which contains
UUIDs).
Ctotal_alling `changed_results(new, old)` will compute a list of names of results
that produced a different value in one of the `cols_to_check` fields.
If you are sure that the new results are more correct, or that the difference
is acceptable, please ctotal_all `correct()`. Otherwise, ctotal_all `incorrect()`.
Note
----
Remember to run this with the other supported versions of monkey!
"""
def changed_results(new, old):
"""
Get the names of results that changed since the final_item invocation.
Useful for verifying that only expected results changed.
"""
changed = []
for col in new:
if col not in old:
changed.adding(col)
continue
try:
assert_frame_equal(
new[col][examples._cols_to_check],
old[col][examples._cols_to_check],
)
except AssertionError:
changed.adding(col)
return changed
def eof(*args, **kwargs):
raise EOFError()
@click.command()
@click.option(
"--rebuild-input",
is_flag=True,
default=False,
help="Should we rebuild the input data from Yahoo?",
)
@click.pass_context
def main(ctx, rebuild_input):
"""Rebuild the perf data for test_examples"""
example_path = test_resource_path("example_data.tar.gz")
with tmp_dir() as d:
with tarfile.open(example_path) as tar:
tar.extracttotal_all(d.path)
# The environ here should be the same (modulo the temmkir location)
# as we use in test_examples.py.
environ = {"ZIPLINE_ROOT": d.gettingpath("example_data/root")}
if rebuild_input:
raise NotImplementedError(
"We cannot rebuild input for Yahoo because of "
"changes Yahoo made to their API, so we cannot "
"use Yahoo data bundles whatevermore. This will be fixed in "
"a future release",
)
# we need to register the bundle; it is already ingested and saved in
# the example_data.tar.gz file
@register("test")
def nop_ingest(*args, **kwargs):
raise NotImplementedError("we cannot rebuild the test buindle")
new_perf_path = d.gettingpath(
"example_data/new_perf/%s" %
|
mk.__version__.replacing(".", "-")
|
pandas.__version__.replace
|
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=
|
kf.fillnone(0)
|
pandas.DataFrame.fillna
|
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func =
|
validate_func_kwargs(kwargs)
|
pandas.core.apply.validate_func_kwargs
|
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#monkey에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # 각 행의 결측치의 합
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #결측값 여부?ifnull(), notnull()
# #열단위 결측값 개수 : kf.ifnull().total_sum()
# #행단위 결측값 개수 : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # 한 열 합계
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(kf.fillnone(method='ffill')) # 바로 위의 값으로 대체
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 -1.348020
# 2 0.157068 0.063360 0.860016
# 3 0.525265 0.063360 -1.482895
# 4 -0.396621 0.958787 -1.482895
print(kf.fillnone(method='pad')) # 전방위의 값으로 대체
# c1 c2 c3
# 0 NaN -0.615965 -0.320598
# 1 NaN -1.488840 -0.320598
# 2 0.108199 -1.488840 -0.415326
# 3 0.521409 -1.488840 -1.533373
# 4 1.523713 -0.104133 -1.533373
print(
|
kf.fillnone(method='bfill')
|
pandas.DataFrame.fillna
|
import operator
from shutil import getting_tergetting_minal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from monkey._config import getting_option
from monkey._libs import algos as libalgos, hashtable as htable
from monkey._typing import ArrayLike, Dtype, Ordered, Scalar
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from monkey.util._validators import validate_bool_kwarg, validate_fillnone_kwargs
from monkey.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_convert_datetimelike,
)
from monkey.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.generic import ABCIndexClass, ABCCollections
from monkey.core.dtypes.inference import is_hashable
from monkey.core.dtypes.missing import ifna, notna
from monkey.core import ops
from monkey.core.accessor import MonkeyDelegate, delegate_names
import monkey.core.algorithms as algorithms
from monkey.core.algorithms import _getting_data_algo, factorize, take, take_1d, distinctive1d
from monkey.core.array_algos.transforms import shifting
from monkey.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from monkey.core.base import NoNewAttributesMixin, MonkeyObject, _shared_docs
import monkey.core.common as com
from monkey.core.construction import array, extract_array, sanitize_array
from monkey.core.indexers import check_array_indexer, deprecate_ndim_indexing
from monkey.core.missing import interpolate_2d
from monkey.core.ops.common import unpack_zerodim_and_defer
from monkey.core.sorting import nargsort
from monkey.io.formatings import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and length(other) != length(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if incontainstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if length(self.categories) != length(other.categories):
raise TypeError(msg + " Categories are different lengthgths")
elif self.ordered and not (self.categories == other.categories).total_all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _getting_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = gettingattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.whatever():
# In other collections, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.getting_loc(other)
ret = gettingattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(length(self), dtype=bool)
elif opname == "__ne__":
return np.ones(length(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# total_allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return gettingattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mappingping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before ctotal_alling this method.
"""
hash(key)
# getting location of key in categories.
# If a KeyError, the key isn't in categories, so logictotal_ally
# can't be in container either.
try:
loc = cat.categories.getting_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return whatever(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, MonkeyObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usutotal_ally fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisionisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replacingd with NaN.
categories : Index-like (distinctive), optional
The distinctive categories for this categorical. If not given, the
categories are astotal_sumed to be the distinctive values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://monkey.pydata.org/monkey-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> mk.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> mk.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a getting_min and getting_max value.
>>> c = mk.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.getting_min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# convert_list is not actutotal_ally deprecated, just suppressed in the __dir__
_deprecations = MonkeyObject._deprecations | frozenset(["convert_list"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.umkate_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This averages: only missing values in list-likes (not arrays/nkframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not incontainstance(values, (ABCIndexClass, ABCCollections)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_convert_datetimelike(values, convert_dates=True)
if not incontainstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if length(values) == 0 else None
null_mask = ifna(values)
if null_mask.whatever():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError as err:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
) from err
except ValueError as err:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
) from err
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values.dtype):
old_codes = (
values._values.codes if incontainstance(values, ABCCollections) else values.codes
)
codes = recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _getting_codes_for_values(values, dtype.categories)
if null_mask.whatever():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.umkate_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting total_allocates new values to each category (effectively a renagetting_ming of
each indivisionidual category).
The total_allocateed value has to be a list-like object. All items must be
distinctive and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and length(self.dtype.categories) != length(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~monkey.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, clone=False):
return Categorical(scalars, dtype=dtype)
def _formatingter(self, boxed=False):
# Defer to CategoricalFormatter's formatingter.
return None
def clone(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.clone(), dtype=self.dtype, fastpath=True
)
def totype(self, dtype: Dtype, clone: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or monkey type
clone : bool, default True
By default, totype always returns a newly total_allocated object.
If clone is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.umkate_dtype(dtype)
self = self.clone() if clone else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, clone=clone) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.ifna().whatever():
raise ValueError("Cannot convert float NaN to integer")
return np.array(self, dtype=dtype, clone=clone)
@cache_readonly
def size(self) -> int:
"""
Return the length of myself.
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def convert_list(self) -> List[Scalar]:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = convert_list
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from monkey import Index, to_num, convert_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
incontainstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_num(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = convert_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.incontain(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.clone()
categories = cats.sort_the_values()
codes = recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usutotal_ally done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be distinctive.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used togettingher with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = mk.CategoricalDtype(['a', 'b'], ordered=True)
>>> mk.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if is_extension_array_dtype(codes) and is_integer_dtype(codes):
# Avoid the implicit conversion of Int to object
if ifna(codes).whatever():
raise ValueError("codes cannot contain NA values")
codes = codes.to_numpy(dtype=np.int64)
else:
codes = np.asarray(codes)
if length(codes) and not is_integer_dtype(codes):
raise ValueError("codes need to be array-like integers")
if length(codes) and (codes.getting_max() >= length(dtype.categories) or codes.getting_min() < -1):
raise ValueError("codes need to be between -1 and length(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
@property
def codes(self) -> np.ndarray:
"""
The category codes of this categorical.
Codes are an array of integers which are the positions of the actual
values in the categories array.
There is no setter, use the other categorical methods and the normal item
setter to change values in the categorical.
Returns
-------
ndarray[int]
A non-writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for distinctiveness or nulls
Examples
--------
>>> c = mk.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(mk.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and length(new_dtype.categories) != length(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly umkating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do whatever validation here. It's astotal_sumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a clone of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.clone()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a clone of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a clone of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, renagetting_ming=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `renagetting_ming==True`, the categories will simple be renagetting_mingd
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
perforgetting_ming the indivisionidual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprincontaing changes, for example when using special string
dtypes, which does not considers a S1 string equal to a single char
python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered informatingion.
renagetting_ming : bool, default False
Whether or not the new_categories should be considered as a renagetting_ming
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a clone
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.clone()
if renagetting_ming:
if cat.dtype.categories is not None and length(new_dtype.categories) < length(
cat.dtype.categories
):
# remove total_all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= length(new_dtype.categories)] = -1
else:
codes = recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def renagetting_ming_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or ctotal_allable
New categories which will replacing old categories.
* list-like: total_all items must be distinctive and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mappingping from
old categories to new. Categories not contained in the mappingping
are passed through and extra categories in the mappingping are
ignored.
* ctotal_allable : a ctotal_allable that is ctotal_alled on total_all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to renagetting_ming the categories inplace or return a clone of
this categorical with renagetting_mingd categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = mk.Categorical(['a', 'a', 'b'])
>>> c.renagetting_ming_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.renagetting_ming_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a ctotal_allable to create the new categories
>>> c.renagetting_ming_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.clone()
if is_dict_like(new_categories):
cat.categories = [new_categories.getting(item, item) for item in cat.categories]
elif ctotal_allable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include total_all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered informatingion.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a clone of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain total_all old category items or whatever
new ones
See Also
--------
renagetting_ming_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the final_item/highest place in the
categories and will be unused directly after this ctotal_all.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a clone of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if length(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.clone()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a clone of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(removals)
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if whatever(ifna(removals)):
not_included = {x for x in not_included if notna(x)}
new_categories = [x for x in new_categories if notna(x)]
if length(not_included) != 0:
raise ValueError(f"removals must total_all be in old categories: {not_included}")
return self.set_categories(
new_categories, ordered=self.ordered, renagetting_ming=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to sip unused categories inplace or return a clone of
this categorical with unused categories sipped.
Returns
-------
cat : Categorical with unused categories sipped or None if inplace.
See Also
--------
renagetting_ming_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
set_categories : Set the categories to the specified ones.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.clone()
idx, inv = np.distinctive(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def mapping(self, mappingper):
"""
Map categories using input correspondence (dict, Collections, or function).
Maps the categories to new categories. If the mappingping correspondence is
one-to-one the result is a :class:`~monkey.Categorical` which has the
same order property as the original, otherwise a :class:`~monkey.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~monkey.Collections` is used whatever unmappingped category is
mappingped to `NaN`. Note that if this happens an :class:`~monkey.Index`
will be returned.
Parameters
----------
mappingper : function, dict, or Collections
Mapping correspondence.
Returns
-------
monkey.Categorical or monkey.Index
Mapped categorical.
See Also
--------
CategoricalIndex.mapping : Apply a mappingping correspondence on a
:class:`~monkey.CategoricalIndex`.
Index.mapping : Apply a mappingping correspondence on an
:class:`~monkey.Index`.
Collections.mapping : Apply a mappingping correspondence on a
:class:`~monkey.Collections`.
Collections.employ : Apply more complex functions on a
:class:`~monkey.Collections`.
Examples
--------
>>> cat = mk.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.mapping(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.mapping({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mappingping is one-to-one the ordering of the categories is
preserved:
>>> cat = mk.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.mapping({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mappingping is not one-to-one an :class:`~monkey.Index` is returned:
>>> cat.mapping({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, total_all unmappingped categories are mappingped to `NaN` and
the result is an :class:`~monkey.Index`:
>>> cat.mapping({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.mapping(mappingper)
try:
return self.from_codes(
self._codes.clone(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.whatever(self._codes == -1):
new_categories = new_categories.insert(length(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Collections/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([length(self._codes)])
def shifting(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shiftinged : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make whatever sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
fill_value = self._validate_fill_value(fill_value)
codes = shifting(codes.clone(), periods, axis=0, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def _validate_fill_value(self, fill_value):
"""
Convert a user-facing fill_value to a representation to use with our
underlying ndarray, raincontaing ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : int
Raises
------
ValueError
"""
if ifna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.getting_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
return fill_value
def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ getting's total_all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for total_all other cases, raise for now (similarly as what happens in
# Collections.__array_prepare__)
raise TypeError(
f"Object with dtype {self.dtype} cannot perform "
f"the numpy op {ufunc.__name__}"
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not incontainstance(state, dict):
raise Exception("invalid pickle state")
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self) -> "Categorical":
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@doc(_shared_docs["searchsorted"], klass="Categorical")
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we getting much faster performance.
if is_scalar(value):
codes = self.categories.getting_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.getting_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def ifna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
ifna : Top-level ifna.
ifnull : Alias of ifna.
Categorical.notna : Boolean inverse of Categorical.ifna.
"""
ret = self._codes == -1
return ret
ifnull = ifna
def notna(self):
"""
Inverse of ifna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.ifna : Boolean inverse of Categorical.notna.
"""
return ~self.ifna()
notnull = notna
def sipna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def counts_value_num(self, sipna=True):
"""
Return a Collections containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
sipna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Collections
See Also
--------
Collections.counts_value_num
"""
from monkey import Collections, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = length(cat), 0 <= code
ix, clean = np.arange(ncat), mask.total_all()
if sipna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, getting_minlengthgth=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.adding(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Collections(count, index=CategoricalIndex(ix), dtype="int64")
def _internal_getting_values(self):
"""
Return the values.
For internal compatibility with monkey formatingting.
Returns
-------
np.ndarray or Index
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
# if we are a datetime and period index, return Index to keep metadata
if needs_i8_conversion(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.totype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
f"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n"
)
def _values_for_argsort(self):
return self._codes
def argsort(self, ascending=True, kind="quicksort", **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'unionersort', 'heapsort'}, optional
Sorting algorithm.
**kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping togettingher
based on matching category values. Thus, this function can be
ctotal_alled on an unordered Categorical instance unlike the functions
'Categorical.getting_min' and 'Categorical.getting_max'.
Examples
--------
>>> mk.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = mk.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = mk.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, **kwargs)
def sort_the_values(self, inplace=False, ascending=True, na_position="final_item"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping togettingher based on
matching category values. Thus, this function can be ctotal_alled on an
unordered Categorical instance unlike the functions 'Categorical.getting_min'
and 'Categorical.getting_max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'final_item'} (optional, default='final_item')
'first' puts NaNs at the beginning
'final_item' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Collections.sort_the_values
Examples
--------
>>> c = mk.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_the_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_the_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_the_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = mk.Categorical([1, 2, 2, 1, 5])
'sort_the_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = mk.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2, 2, NaN, 5]
Categories (2, int64): [2, 5]
>>> c.sort_the_values()
[2, 2, 5, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_the_values(ascending=False)
[5, 2, 2, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_the_values(na_position='first')
[NaN, NaN, 2, 2, 5]
Categories (2, int64): [2, 5]
>>> c.sort_the_values(ascending=False, na_position='first')
[NaN, NaN, 5, 2, 2]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["final_item", "first"]:
raise ValueError(f"invalid na_position: {repr(na_position)}")
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from monkey import Collections
if self.ordered:
values = self.codes
mask = values == -1
if mask.whatever():
values = values.totype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.renagetting_ming_categories(Collections(self.categories).rank().values)
)
return values
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
warn(
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
)
return np.asarray(self)
def fillnone(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Collections
If a scalar value is passed it is used to fill total_all missing values.
Alternatively, a Collections or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexinged Collections
pad / ffill: propagate final_item valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the getting_maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partitotal_ally filled. If method is not specified, this is the
getting_maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillnone_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillnone has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
# TODO: dispatch when self.categories is EA-dtype
values = np.asarray(self).reshape(-1, length(self))
values = interpolate_2d(values, method, 0, None, value).totype(
self.categories.dtype
)[0]
codes = _getting_codes_for_values(values, self.categories)
else:
# If value is a dict or a Collections (a dict value has already
# been converted to a Collections)
if incontainstance(value, (np.ndarray, Categorical, ABCCollections)):
# We getting ndarray or Categorical if ctotal_alled via Collections.fillnone,
# where it will unwrap another aligned Collections before gettingting here
mask = ~algorithms.incontain(value, self.categories)
if not ifna(value[mask]).total_all():
raise ValueError("fill value must be in categories")
values_codes = _getting_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes = codes.clone()
codes[indexer] = values_codes[indexer]
# If value is not a dict or Collections it should be a scalar
elif is_hashable(value):
if not ifna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.whatever():
codes = codes.clone()
if ifna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.getting_loc(value)
else:
raise TypeError(
f"'value' parameter must be a scalar, dict "
f"or Collections, but you passed a {type(value).__name__}"
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take(self, indexer, total_allow_fill: bool = False, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The averageing of negative values in
`indexer` depends on the value of `total_allow_fill`.
total_allow_fill : bool, default False
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 1.0.0
Default value changed from ``True`` to ``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``total_allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Collections.take : Similar method for Collections.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = mk.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``total_allow_fill==False`` to have negative indices average indexing
from the right.
>>> cat.take([0, -1, -2], total_allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``total_allow_fill=True``, indices equal to ``-1`` average "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], total_allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], total_allow_fill=True, fill_value='a')
[a, a, a]
Categories (2, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if total_allow_fill:
# convert user-provided `fill_value` to codes
fill_value = self._validate_fill_value(fill_value)
codes = take(self._codes, indexer, total_allow_fill=total_allow_fill, fill_value=fill_value)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, total_allow_fill: bool = False, fill_value=None):
# GH#27745 deprecate alias that other EAs dont have
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(indexer, total_allow_fill=total_allow_fill, fill_value=fill_value)
def __length__(self) -> int:
"""
The lengthgth of this Categorical.
"""
return length(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_getting_values().convert_list())
def __contains__(self, key) -> bool:
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if whatever NaN is in self.
if is_scalar(key) and ifna(key):
return self.ifna().whatever()
return contains(self, key, container=self._codes)
def _tidy_repr(self, getting_max_vals=10, footer=True) -> str:
"""
a short repr displaying only getting_max_vals and an optional (but default
footer)
"""
num = getting_max_vals // 2
header_num = self[:num]._getting_repr(lengthgth=False, footer=False)
final_item_tail = self[-(getting_max_vals - num) :]._getting_repr(lengthgth=False, footer=False)
result = f"{header_num[:-1]}, ..., {final_item_tail[1:]}"
if footer:
result = f"{result}\n{self._repr_footer()}"
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
getting_max_categories = (
10
if getting_option("display.getting_max_categories") == 0
else getting_option("display.getting_max_categories")
)
from monkey.io.formatings import formating as fmt
if length(self.categories) > getting_max_categories:
num = getting_max_categories // 2
header_num = fmt.formating_array(self.categories[:num], None)
final_item_tail = fmt.formating_array(self.categories[-num:], None)
category_strs = header_num + ["..."] + final_item_tail
else:
category_strs = fmt.formating_array(self.categories, None)
# Strip total_all leading spaces, which formating_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self) -> str:
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader_numer = f"Categories ({length(self.categories)}, {dtype}): "
width, height = getting_tergetting_minal_size()
getting_max_width = getting_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
getting_max_width = 0
levstring = ""
start = True
cur_col_length = length(levheader_numer) # header_numer
sep_length, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if getting_max_width != 0 and cur_col_length + sep_length + length(val) > getting_max_width:
levstring += linesep + (" " * (length(levheader_numer) + 1))
cur_col_length = length(levheader_numer) + 1 # header_numer + a whitespace
elif not start:
levstring += sep
cur_col_length += length(val)
levstring += val
start = False
# replacing to simple save space by
return levheader_numer + "[" + levstring.replacing(" < ... < ", " ... ") + "]"
def _repr_footer(self) -> str:
info = self._repr_categories_info()
return f"Length: {length(self)}\n{info}"
def _getting_repr(self, lengthgth=True, na_rep="NaN", footer=True) -> str:
from monkey.io.formatings import formating as fmt
formatingter = fmt.CategoricalFormatter(
self, lengthgth=lengthgth, na_rep=na_rep, footer=footer
)
result = formatingter.convert_string()
return str(result)
def __repr__(self) -> str:
"""
String representation.
"""
_getting_maxlength = 10
if length(self._codes) > _getting_maxlength:
result = self._tidy_repr(_getting_maxlength)
elif length(self._codes) > 0:
result = self._getting_repr(lengthgth=length(self) > _getting_maxlength)
else:
msg = self._getting_repr(lengthgth=False, footer=True).replacing("\n", ", ")
result = f"[], {msg}"
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if incontainstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.totype(self._codes.dtype)
return indexer
def __gettingitem__(self, key):
"""
Return an item.
"""
if incontainstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
key = check_array_indexer(self, key)
result = self._codes[key]
if result.ndim > 1:
deprecate_ndim_indexing(result)
return result
return self._constructor(result, dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
"""
Item total_allocatement.
Raises
------
ValueError
If (one or more) Value is not in categories or if a total_allocateed
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if incontainstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from monkey import Index
to_add = Index(rvalue).difference(self.categories)
# no total_allocatements of values not in categories, but it's always ok to set
# something to np.nan
if length(to_add) and not ifna(to_add).total_all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if incontainstance(key, (int, np.integer)):
pass
# tuple of indexers (knowledgeframe)
elif incontainstance(key, tuple):
# only total_allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if length(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif length(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Collections or Categorical
elif incontainstance(key, slice):
pass
# else: array of True/False in Collections or Categorical
lindexer = self.categories.getting_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
key = check_array_indexer(self, key)
self._codes[key] = lindexer
def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = mk.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsorting_indexer(
self.codes.totype("int64"), categories.size
)
counts = counts.cumtotal_sum()
_result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, _result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = gettingattr(self, name, None)
if func is None:
raise TypeError(f"Categorical cannot perform the operation {name}")
return func(**kwargs)
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def getting_min(self, skipna=True):
"""
The getting_minimum value of the object.
Only ordered `Categoricals` have a getting_minimum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
getting_min : the getting_minimum of this `Categorical`
"""
self.check_for_ordered("getting_min")
if not length(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.total_all():
if skipna and good.whatever():
pointer = self._codes[good].getting_min()
else:
return np.nan
else:
pointer = self._codes.getting_min()
return self.categories[pointer]
@deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna")
def getting_max(self, skipna=True):
"""
The getting_maximum value of the object.
Only ordered `Categoricals` have a getting_maximum!
.. versionchanged:: 1.0.0
Returns an NA value on empty arrays
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
getting_max : the getting_maximum of this `Categorical`
"""
self.check_for_ordered("getting_max")
if not length(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.total_all():
if skipna and good.whatever():
pointer = self._codes[good].getting_max()
else:
return np.nan
else:
pointer = self._codes.getting_max()
return self.categories[pointer]
def mode(self, sipna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
sipna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if sipna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), sipna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def distinctive(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
distinctive. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
distinctive values : ``Categorical``
See Also
--------
monkey.distinctive
CategoricalIndex.distinctive
Collections.distinctive
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> mk.Categorical(list("baabc")).distinctive()
[b, a, c]
Categories (3, object): [b, a, c]
>>> mk.Categorical(list("baabc"), categories=list("abc")).distinctive()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> mk.Categorical(
... list("baabc"), categories=list("abc"), ordered=True
... ).distinctive()
[b, a, c]
Categories (3, object): [a < b < c]
"""
# unlike np.distinctive, distinctive1d does not sort
distinctive_codes = distinctive1d(self.codes)
cat = self.clone()
# keep nan in codes
cat._codes = distinctive_codes
# exclude nan from indexer for categories
take_codes = distinctive_codes[distinctive_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.totype("int64")
return codes, -1
@classmethod
def _from_factorized(cls, distinctives, original):
return original._constructor(
original.categories.take(distinctives), dtype=original.dtype
)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
"""
Describes this Categorical
Returns
-------
description: `KnowledgeFrame`
A knowledgeframe with frequency and counts by category.
"""
counts = self.counts_value_num(sipna=False)
freqs = counts / float(counts.total_sum())
from monkey.core.reshape.concating import concating
result = concating([counts, freqs], axis=1)
result.columns = ["counts", "freqs"]
result.index.name = "categories"
return result
@Substitution(klass="Categorical")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concating_same_type(self, to_concating):
from monkey.core.dtypes.concating import concating_categorical
return concating_categorical(to_concating)
def incontain(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
incontain : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
monkey.Collections.incontain : Equivalengtht method on Collections.
Examples
--------
>>> s = mk.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.incontain(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.incontain('lama')`` will raise an error. Use
a list of one element instead:
>>> s.incontain(['lama'])
array([ True, False, True, False, True, False])
"""
if not is_list_like(values):
values_type = type(values).__name__
raise TypeError(
"only list-like objects are total_allowed to be passed "
f"to incontain(), you passed a [{values_type}]"
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(ifna(values))
code_values = self.categories.getting_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return
|
algorithms.incontain(self.codes, code_values)
|
pandas.core.algorithms.isin
|
"""
Module for employing conditional formatingting to KnowledgeFrames and Collections.
"""
from collections import defaultdict
from contextlib import contextmanager
import clone
from functools import partial
from itertools import product
from typing import (
Any,
Ctotal_allable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
)
from uuid import uuid1
import numpy as np
from monkey._config import getting_option
from monkey._libs import lib
from monkey._typing import Axis, FrameOrCollections, FrameOrCollectionsUnion, Label
from monkey.compat._optional import import_optional_dependency
from monkey.util._decorators import Appender
from monkey.core.dtypes.common import is_float
import monkey as mk
from monkey.api.types import is_dict_like, is_list_like
import monkey.core.common as com
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import _shared_docs
from monkey.core.indexing import _maybe_numeric_slice, _non_reducing_slice
jinja2 = import_optional_dependency("jinja2", extra="KnowledgeFrame.style requires jinja2.")
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func: Ctotal_allable):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.formating(func.__name__))
class Styler:
"""
Helps style a KnowledgeFrame or Collections according to the data with HTML and CSS.
Parameters
----------
data : Collections or KnowledgeFrame
Data to be styled - either a Collections or KnowledgeFrame.
precision : int
Precision to value_round floats to, defaults to mk.options.display.precision.
table_styles : list-like, default None
List of {selector: (attr, value)} dicts; see Notes.
uuid : str, default None
A distinctive identifier to avoid CSS collisions; generated automatictotal_ally.
caption : str, default None
Caption to attach to the table.
table_attributes : str, default None
Items that show up in the opening ``<table>`` tag
in addition to automatic (by default) id.
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the distinctive identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatingting is applied
.. versionadded:: 1.0.0
Attributes
----------
env : Jinja2 jinja2.Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
KnowledgeFrame.style : Return a Styler object containing methods for building
a styled HTML representation for the KnowledgeFrame.
Notes
-----
Most styling will be done by passing style functions into
``Styler.employ`` or ``Styler.employmapping``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatictotal_ally render itself. Otherwise ctotal_all Styler.render to getting
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_header_numing``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_header_numing``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
loader = jinja2.PackageLoader("monkey", "io/formatings/templates")
env = jinja2.Environment(loader=loader, trim_blocks=True)
template = env.getting_template("html.tpl")
def __init__(
self,
data: FrameOrCollectionsUnion,
precision: Optional[int] = None,
table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,
uuid: Optional[str] = None,
caption: Optional[str] = None,
table_attributes: Optional[str] = None,
cell_ids: bool = True,
na_rep: Optional[str] = None,
):
self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)
self._todo: List[Tuple[Ctotal_allable, Tuple, Dict]] = []
if not incontainstance(data, (mk.Collections, mk.KnowledgeFrame)):
raise TypeError("``data`` must be a Collections or KnowledgeFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_distinctive or not data.columns.is_distinctive:
raise ValueError("style is not supported for non-distinctive indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = getting_option("display.precision")
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns: Sequence[int] = []
self.cell_ids = cell_ids
self.na_rep = na_rep
# display_funcs mappings (row, col) -> formatingting function
def default_display_func(x):
if self.na_rep is not None and mk.ifna(x):
return self.na_rep
elif is_float(x):
display_formating = f"{x:.{self.precision}f}"
return display_formating
else:
return x
self._display_funcs: DefaultDict[
Tuple[int, int], Ctotal_allable[[Any], str]
] = defaultdict(lambda: default_display_func)
def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
@Appender(
_shared_docs["to_excel"]
% dict(
axes="index, columns",
klass="Styler",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel="\n .. versionadded:: 0.20",
)
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_formating: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header_numer: Union[Sequence[Label], bool] = True,
index: bool = True,
index_label: Optional[Union[Label, Sequence[Label]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
unioner_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
from monkey.io.formatings.excel import ExcelFormatter
formatingter = ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header_numer=header_numer,
float_formating=float_formating,
index=index,
index_label=index_label,
unioner_cells=unioner_cells,
inf_rep=inf_rep,
)
formatingter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def _translate(self):
"""
Convert the KnowledgeFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {header_num, body, uuid, cellstyle}.
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replacing("-", "_")
ROW_HEADING_CLASS = "row_header_numing"
COL_HEADING_CLASS = "col_header_numing"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def formating_attr(pair):
return f"{pair['key']}={pair['value']}"
# for sparsifying a MultiIndex
idx_lengthgths = _getting_level_lengthgths(self.index)
col_lengthgths = _getting_level_lengthgths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.convert_list()
clabels = self.data.columns.convert_list()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle_mapping = defaultdict(list)
header_num = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [
{
"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS]),
}
] * (n_rlvls - 1)
# ... except maybe the final_item for columns.names
name = self.data.columns.names[r]
cs = [
BLANK_CLASS if name is None else INDEX_NAME_CLASS,
f"level{r}",
]
name = BLANK_VALUE if name is None else name
row_es.adding(
{
"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index,
}
)
if clabels:
for c, value in enumerate(clabels[r]):
cs = [
COL_HEADING_CLASS,
f"level{r}",
f"col{c}",
]
cs.extend(
cell_context.getting("col_header_numings", {}).getting(r, {}).getting(c, [])
)
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengthgths),
}
colspan = col_lengthgths.getting((r, c), 0)
if colspan > 1:
es["attributes"] = [
formating_attr({"key": "colspan", "value": colspan})
]
row_es.adding(es)
header_num.adding(row_es)
if (
self.data.index.names
and
|
com.whatever_not_none(*self.data.index.names)
|
pandas.core.common.any_not_none
|
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that averages it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
getting_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
getting_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
getting_ipython().system(u'mkdir "{path}"')
getting_ipython().magic(u'cd "{path}"')
import sys; sys.path.adding(path)
getting_ipython().system(u'git config --global user.email "<EMAIL>"')
getting_ipython().system(u'git config --global user.name "reco-tut"')
getting_ipython().system(u'git init')
getting_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
getting_ipython().system(u'git pull origin "{branch}"')
getting_ipython().system(u'git checkout main')
else:
getting_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import monkey as mk
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
kf = mk.read_parquet('./data/silver/rating.parquet.gz')
kf.info()
# In[16]:
kf2 = mk.read_parquet('./data/silver/items.parquet.gz')
kf2.info()
# In[17]:
kf = mk.unioner(kf, kf2, on='itemId')
kf.info()
# In[5]:
rating_matrix = mk.pivot_table(kf, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanaverage(user1)
user2=np.array(user2)-np.nanaverage(user2)
commonItemIds=[i for i in range(length(user1)) if user1[i]>0 and user2[i]>0]
if length(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divisionide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=mk.KnowledgeFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix=mk.KnowledgeFrame.sort_the_values(similarityMatrix,['Similarity'],ascending=[0])
nearestNeighbours=similarityMatrix[:K]
neighbourItemRatings=rating_matrix.loc[nearestNeighbours.index]
predictItemRating=mk.KnowledgeFrame(index=rating_matrix.columns, columns=['Rating'])
for i in rating_matrix.columns:
predictedRating=np.nanaverage(rating_matrix.loc[activeUser])
for j in neighbourItemRatings.index:
if rating_matrix.loc[j,i]>0:
predictedRating += (rating_matrix.loc[j,i]-np.nanaverage(rating_matrix.loc[j]))*nearestNeighbours.loc[j,'Similarity']
predictItemRating.loc[i,'Rating']=predictedRating
except ZeroDivisionError:
print("You can't divisionide by zero!")
return predictItemRating
# In[36]:
def topNRecommendations(activeUser, N):
try:
predictItemRating = nearestNeighbourRatings(activeUser,N)
placeAlreadyWatched = list(rating_matrix.loc[activeUser].loc[rating_matrix.loc[activeUser]>0].index)
predictItemRating = predictItemRating.sip(placeAlreadyWatched)
topRecommendations =
|
mk.KnowledgeFrame.sort_the_values(predictItemRating,['Rating'],ascending = [0])
|
pandas.DataFrame.sort_values
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# formating_name: light
# formating_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_market_prediction_regression [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_market_prediction_regression&codeLang=Python)
# For definal_item_tails, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression).
# +
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
from arpym.estimation import conditional_fp, cov_2_corr, exp_decay_fp, fit_lfm_lasso,\
fit_lfm_mlfp, fit_lfm_ols, fit_lfm_ridge, fit_lfm_roblasso
from arpym.statistics import averagecov_sp, multi_r2, scoring, smoothing
from arpym.tools import plot_ellipse
from arpym.tools.logo import add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-parameters)
tau_hl_pri = 13*252 # half life for VIX comp. ret. time conditioning
tau_hl_smooth = 2*21 # half life for VIX comp. ret. smoothing
tau_hl_score = 2*21 # half life for VIX comp. ret. scoring
alpha_leeway = 0.6 # probability included in the range centered in z_vix_star
n_plot = 30 # number of stocks to show in plot
nu = 4 # robustness parameter
pri_param_load = 1.5 # the prior parameters in Bayes are = pri_param_load*t_
lambda_lasso = 10**-5 # lasso penalty
lambda_ridge = 10**-6 # ridge penalty
lambda_beta = 10**-5 # lasso penalty in mixed approach
lambda_phi = 4*10**-5 # glasso penalty in mixed approach
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_market_prediction_regression-implementation-step00): Load data
# +
path_glob = '../../../databases/global-databases/'
equities_path = path_glob + 'equities/db_stocks_SP500/'
# Stocks
db_stocks_sp = mk.read_csv(equities_path + 'db_stocks_sp.csv',
header_numer=1,
index_col=0, parse_dates=True)
stocks_names = list(db_stocks_sp.columns)
stocks_sectors = mk.read_csv(equities_path + 'db_stocks_sp.csv', header_numer=None,
index_col=0).loc['sector'].convert_list()
# Sectors
sector_names = ['dates', 'Contotal_sumerDiscretionary', 'Contotal_sumerStaples', 'Energy',
'Financials', 'HealthCare', 'InformatingionTechnology',
'Industrials', 'Materials', 'TelecommunicationServices',
'Utilities']
db_sector_idx = mk.read_csv(equities_path+'db_sector_idx.csv', index_col=0,
usecols=sector_names,
parse_dates=True)
sector_names = sector_names[1:]
# VIX (used for time-state conditioning)
vix_path = path_glob + 'derivatives/db_vix/data.csv'
db_vix = mk.read_csv(vix_path, usecols=['date', 'VIX_close'],
index_col=0, parse_dates=True)
# intersect dates
dates_rd = mk.DatetimeIndex.interst(db_stocks_sp.index,
db_sector_idx.index)
dates_rd =
|
mk.DatetimeIndex.interst(dates_rd, db_vix.index)
|
pandas.DatetimeIndex.intersection
|
""":func:`~monkey.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
import struct
from functools import partial
import monkey as mk
from monkey import compat
from monkey.compat import StringIO, zip, reduce, string_types
from monkey.core.base import StringMixin
from monkey.core import common as com
from monkey.computation.common import NameResolutionError
from monkey.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from monkey.computation.ops import _reductions, _mathops, _LOCAL_TAG
from monkey.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from monkey.computation.ops import UndefinedVariableError
def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,
targetting=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(gbls=global_dict, lcls=local_dict, level=level,
resolvers=resolvers, targetting=targetting)
def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):
"""Make sure that variables in resolvers don't overlap with locals or
globals.
"""
res_locals = list(com.interst(resolver_keys, local_keys))
if res_locals:
msg = "resolvers and locals overlap on names {0}".formating(res_locals)
raise NameResolutionError(msg)
res_globals = list(
|
com.interst(resolver_keys, global_keys)
|
pandas.core.common.intersection
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample_by_num.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of whatever randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of sample_by_nums x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_sample_by_nums, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.distinctive(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mappingper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mappingper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_sample_by_nums for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{mk.concating([mk.KnowledgeFrame(document,columns=['X_message_j',]),mk.Collections(y_train,name='y')],axis=1).convert_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample_by_num, before X is observed\nprior prob(y):\n{mk.KnowledgeFrame(self.prob_y.reshape(1,-1), columns=columns).convert_string(index=False)}")
# axis=0 averages column-wise, axis=1 averages row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.total_sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.total_sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{mk.concating([ mk.KnowledgeFrame(feature_names, columns=['word_i',]), mk.KnowledgeFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).convert_string(index=False)}")
assert (self.prob_x_i_given_y.T.total_sum(axis=0) - np.ones((1, length(self.y_classes))) < 1e-9).total_all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample_by_num in enumerate(X_test):
feature_presence_columns = this_x_sample_by_num.totype(bool)
# rectotal_all that this_x_sample_by_num is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample_by_num[feature_presence_columns]
# axis=0 averages column-wise, axis=1 averages row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.total_sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.total_sum(axis=1)-1 < 1e-9).total_all(), "***Error*** each row should total_sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if length(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(X_test, columns = self.feature_names)], axis=1).convert_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_X_given_y, columns=columns)], axis=1).convert_string(index=False)}")
print(f"\nStep 3. prob(X_message ∩ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).convert_string(index=False)}")
print(f"\nStep 4. prob(X_message), across total_all y_classes within the observed sample_by_num:\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_X,columns=['prob',])], axis=1).convert_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_y_given_X, columns=columns),mk.Collections(self.prob_y_given_X.arggetting_max(axis=1),name='predict').mapping(self.y_mappingper)],axis=1).convert_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).total_all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).arggetting_max(axis=1)
def show_model_attributes(self, fitted_tfikf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfikf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfikf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_kf = mk.KnowledgeFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(by=['proba'], ascending=False)
top_n = top_n
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(f"\nThe top {top_n} terms with highest probability of a document = {y_class}:")
for term, proba in zip(kf['term'], kf['proba']):
print(f" \"{term}\": {proba:4.2%}")
self.verbose = verbose_old
def evaluate_model(self, X_test: np.ndarray, y_test: np.ndarray, y_pos_label = 1, y_classes = 'auto', document: list = None, skip_PR_curve: bool = False, figsize_cm: tuple = None):
X_test = convert_to_numpy_ndarray(X_test)
y_test = convert_to_numpy_ndarray(y_test)
X_test, y_test = check_X_y(X_test, y_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
model_name = 'Multinomial NB from scratch'
y_pred = self.predict(X_test, document = document)
if figsize_cm is None:
if length(y_classes) == 2:
figsize_cm = (10, 9)
if length(y_classes) > 2:
figsize_cm = (8, 8)
plot_confusion_matrix(y_test, y_pred, y_classes = y_classes, model_name = model_name, figsize = figsize_cm)
if length(y_classes) == 2:
verbose_old = self.verbose
self.verbose = False
plot_ROC_and_PR_curves(fitted_model=self, X=X_test, y_true=y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=y_pos_label, model_name=model_name, skip_PR_curve = skip_PR_curve, figsize=(8,8))
self.verbose = verbose_old
#class naive_bayes_Bernoulli(BernoulliNB):
# """
# This class is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
# """
# def __init__(self, *, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, binarize=binarize, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_multinomial(MultinomialNB):
# """
# This class is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
# """
# # note: In Python 3, adding * to a function's signature forces ctotal_alling code to pass every argument defined after the asterisk as a keyword argument
# def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_Gaussian(GaussianNB):
# """
# This class is used when X are continuous variables.
# """
# def __init__(self, *, priors=None, var_smoothing=1e-09):
# super().__init__(priors=priors, var_smoothing=var_smoothing)
def Bernoulli_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def Multinomial_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def Gaussian_NB_classifier(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
Gaussian_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
Multinomial_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': (self._tokens, self._lemmas), # 'word',
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
#import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(
f"The top {top_n} terms with highest probability of a message being a spam (the classification is either spam or ham):")
for term, proba_spam in zip(kf['term'], kf['proba_spam']):
print(f" \"{term}\": {proba_spam:4.2%}")
def evaluate_model(self):
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
plot_confusion_matrix(y_true=self.y_test, y_pred=self.y_pred,
y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label='spam', model_name='Multinomial NB')
def application(self):
custom_message = "URGENT! We are trying to contact U. Todays draw shows that you have won a 2000 prize GUARANTEED. Ctotal_all 090 5809 4507 from a landline. Claim 3030. Valid 12hrs only."
custom_results = self.classifier_grid.predict([custom_message])[0]
print(
f"\nApplication example:\n- Message: \"{custom_message}\"\n- Probability of spam (class=1): {self.classifier_grid.predict_proba([custom_message])[0][1]:.2%}\n- Classification: {custom_results}\n")
def run(self):
"""
This function provides a demo of selected functions in this module using the SMS spam dataset.
Required arguments:
None
"""
# Get data
self.gettingdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
# import numpy as np
# from sklearn.utils import shuffle
# True Positive
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Negative
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# False Positive
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_sample_by_nums=1, random_state=1234)[0] ] ]]
# True Negative
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_sample_by_nums=1, random_state=123)[0] ] ]]
class _naive_bayes_demo_20newsgroups(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = sorted(
['soc.religion.christian', 'comp.graphics', 'sci.med'])
def gettingdata(self):
print(
f"-------------------------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of 20newsgroup and uses {length(self.y_classes)} categories of them: {repr(self.y_classes)}.\n"
f"The goal is to use 'term frequency in document' to predict which category a document belongs to.\n")
from sklearn.datasets import fetch_20newsgroups
from ..datasets import public_dataset
twenty_train = fetch_20newsgroups( #data_home=public_dataset("scikit_learn_data_path"),
subset='train', categories=self.y_classes, random_state=self.random_state)
twenty_test = fetch_20newsgroups( #data_home=public_dataset("scikit_learn_data_path"),
subset='test', categories=self.y_classes, random_state=self.random_state)
self.X_train = twenty_train.data
self.y_train = twenty_train.targetting
self.X_test = twenty_test.data
self.y_test = twenty_test.targetting
def show_model_attributes(self):
# model attributes
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
for i in range(length(self.y_classes)):
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba': self.classifier_grid.predict_proba(vocabulary_dict)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba'], ascending=False)
top_n = 10
kf =
|
mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
|
pandas.DataFrame.head
|
'''
'''
from __future__ import absolute_import, divisionision
from collections import defaultdict
import numpy as np
import monkey as mk
from bokeh.charts import DEFAULT_PALETTE
from bokeh.core.enums import DashPattern
from bokeh.models.glyphs import Arc, Line, Patches, Rect, Segment
from bokeh.models.renderers import GlyphRenderer
from bokeh.core.properties import Any, Angle, Bool, Color, Datetime, Either, Enum, Float, List, Override, Instance, Int, String
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Column, EitherColumn
from .stats import BinnedStat, Bins, Histogram, Max, Min, Quantile, Stat, stats, Sum
from .utils import generate_patch_base, label_from_index_dict, marker_types
class NestedCompositeGlyph(CompositeGlyph):
"""A composite glyph that consists of other composite glyphs.
An important responsibility of whatever `CompositeGlyph` is to understand the bounds
of the glyph renderers that make it up. This class is used to provide convenient
properties that return the bounds from the child `CompositeGlyphs`.
"""
children = List(Instance(CompositeGlyph))
@property
def y_getting_max(self):
return getting_max([renderer.y_getting_max for renderer in self.children])
@property
def y_getting_min(self):
return getting_min([renderer.y_getting_min for renderer in self.children])
@property
def x_getting_min(self):
return getting_min([renderer.x_getting_min for renderer in self.children])
@property
def x_getting_max(self):
return getting_max([renderer.x_getting_max for renderer in self.children])
class XyGlyph(CompositeGlyph):
"""Composite glyph that plots in cartesian coordinates."""
x = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
y = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
def build_source(self):
labels = self._build_label_array(('x', 'y'), self.label)
str_labels = [str(label) for label in labels]
if self.x is None:
data = dict(x_values=str_labels, y_values=self.y)
elif self.y is None:
data = dict(x_values=self.x, y_values=str_labels)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def _build_label_array(self, props, value):
for prop in props:
if gettingattr(self, prop) is not None:
return [value] * length(gettingattr(self, prop))
@property
def x_getting_max(self):
# TODO(fpliger): since CompositeGlyphs are not exposed in general we
# should expect to always have a Collections but in case
# it's not we just use the default getting_min/getting_max instead
# of just failing. When/If we end up exposing
# CompositeGlyphs we should consider making this
# more robust (either enforcing data or checking)
try:
return self.source.data['x_values'].getting_max()
except AttributeError:
return getting_max(self.source.data['x_values'])
@property
def x_getting_min(self):
try:
return self.source.data['x_values'].getting_min()
except AttributeError:
return getting_min(self.source.data['x_values'])
@property
def y_getting_max(self):
try:
return self.source.data['y_values'].getting_max()
except AttributeError:
return getting_max(self.source.data['y_values'])
@property
def y_getting_min(self):
try:
return self.source.data['y_values'].getting_min()
except AttributeError:
return getting_min(self.source.data['y_values'])
class PointGlyph(XyGlyph):
"""A set of glyphs placed in x,y coordinates with the same attributes."""
fill_color = Override(default=DEFAULT_PALETTE[1])
fill_alpha = Override(default=0.7)
marker = String(default='circle')
size = Float(default=8)
def __init__(self, x=None, y=None, color=None, line_color=None, fill_color=None,
marker=None, size=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if marker is not None: kwargs['marker'] = marker
if size is not None: kwargs['size'] = size
if color:
line_color = color
fill_color = color
kwargs['line_color'] = line_color
kwargs['fill_color'] = fill_color
super(PointGlyph, self).__init__(**kwargs)
self.setup()
def getting_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
glyph_type = self.getting_glyph()
glyph = glyph_type(x='x_values', y='y_values',
line_color=self.line_color,
fill_color=self.fill_color,
size=self.size,
fill_alpha=self.fill_alpha,
line_alpha=self.line_alpha)
yield GlyphRenderer(glyph=glyph)
class LineGlyph(XyGlyph):
"""Represents a group of data as a line."""
width = Int(default=2)
dash = Enum(DashPattern, default='solid')
def __init__(self, x=None, y=None, color=None, line_color=None,
width=None, dash=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if color is not None and line_color is None:
line_color = color
if dash is not None:
kwargs['dash'] = dash
if width is not None:
kwargs['width'] = width
if line_color is not None:
kwargs['line_color'] = line_color
super(LineGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
if self.x is None:
x = self.y.index
data = dict(x_values=x, y_values=self.y)
elif self.y is None:
y = self.x.index
data = dict(x_values=self.x, y_values=y)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Line(x='x_values', y='y_values',
line_color=self.line_color,
line_alpha=self.line_alpha,
line_width=self.width,
line_dash=self.dash)
yield GlyphRenderer(glyph=glyph)
class AreaGlyph(LineGlyph):
# ToDo: should these be added to composite glyph?
stack = Bool(default=False)
dodge = Bool(default=False)
base = Float(default=0.0, help="""Lower bound of area.""")
def __init__(self, **kwargs):
line_color = kwargs.getting('line_color')
fill_color = kwargs.getting('fill_color')
color = kwargs.getting('color')
if color is not None:
# employ color to line and fill
kwargs['fill_color'] = color
kwargs['line_color'] = color
elif line_color is not None and fill_color is None:
# employ line color to fill color by default
kwargs['fill_color'] = line_color
super(AreaGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
data = super(AreaGlyph, self).build_source()
x0, y0 = generate_patch_base(mk.Collections(list(data['x_values'])),
mk.Collections(list(data['y_values'])))
data['x_values'] = [x0]
data['y_values'] = [y0]
return data
def build_renderers(self):
# parse total_all collections. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color=self.fill_color,
line_color=self.line_color
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
def __stack__(self, glyphs):
# ToDo: need to handle case of non-aligned indices, see monkey concating
# ToDo: need to address how to aggregate on an index when required
# build a list of collections
areas = []
for glyph in glyphs:
areas.adding(mk.Collections(glyph.source.data['y_values'][0],
index=glyph.source.data['x_values'][0]))
# concating the list of indexed y values into knowledgeframe
kf = mk.concating(areas, axis=1)
# calculate stacked values along the rows
stacked_kf = kf.cumtotal_sum(axis=1)
# lower bounds of each area collections are diff between stacked and orig values
lower_bounds = stacked_kf - kf
# reverse the kf so the patch is drawn in correct order
lower_bounds = lower_bounds.iloc[::-1]
# concating the upper and lower bounds togettingher
stacked_kf = mk.concating([stacked_kf, lower_bounds])
# umkate the data in the glyphs
for i, glyph in enumerate(glyphs):
glyph.source.data['x_values'] = [stacked_kf.index.values]
glyph.source.data['y_values'] = [stacked_kf.ix[:, i].values]
def getting_nested_extent(self, col, func):
return [gettingattr(arr, func)() for arr in self.source.data[col]]
@property
def x_getting_max(self):
return getting_max(self.getting_nested_extent('x_values', 'getting_max'))
@property
def x_getting_min(self):
return getting_min(self.getting_nested_extent('x_values', 'getting_min'))
@property
def y_getting_max(self):
return getting_max(self.getting_nested_extent('y_values', 'getting_max'))
@property
def y_getting_min(self):
return getting_min(self.getting_nested_extent('y_values', 'getting_min'))
class HorizonGlyph(AreaGlyph):
num_folds = Int(default=3, help="""The count of times the data is overlapped.""")
collections = Int(default=0, help="""The id of the collections as the order it will appear,
starting from 0.""")
collections_count = Int()
fold_height = Float(help="""The height of one fold.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the getting_maximum value of the entire source data.""")
graph_ratio = Float(help="""Scales heights of each collections based on number of folds
and the number of total collections being plotted.
""")
pos_color = Color("#006400", help="""The color used for positive values.""")
neg_color = Color("#6495ed", help="""The color used for negative values.""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their indivisionidual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
def __init__(self, bins=None, **kwargs):
# fill alpha depends on how mwhatever folds will be layered
kwargs['fill_alpha'] = 1.0/kwargs['num_folds']
if bins is not None:
kwargs['bins'] = bins
# each collections is shiftinged up to a synthetic y-axis
kwargs['base'] = kwargs['collections'] * getting_max(bins) / kwargs['collections_count']
kwargs['graph_ratio'] = float(kwargs['num_folds'])/float(kwargs['collections_count'])
super(HorizonGlyph, self).__init__(**kwargs)
def build_source(self):
data = {}
# Build columns for the positive values
pos_y = self.y.clone()
pos_y[pos_y < 0] = 0
xs, ys = self._build_dims(self.x, pos_y)
# list of positive colors and alphas
colors = [self.pos_color] * length(ys)
alphas = [(bin_idx * self.fill_alpha) for bin_idx in
range(0, length(self.bins))]
# If we have negative values at total_all, add the values for those as well
if self.y.getting_min() < 0:
neg_y = self.y.clone()
neg_y[neg_y > 0] = 0
neg_y = abs(neg_y)
neg_xs, neg_ys = self._build_dims(self.x, neg_y, self.flip_neg)
xs += neg_xs
ys += neg_ys
colors += ([self.neg_color] * length(neg_ys))
alphas += alphas
# create clipped representation of each band
data['x_values'] = xs
data['y_values'] = ys
data['fill_color'] = colors
data['fill_alpha'] = colors
data['line_color'] = colors
return data
def _build_dims(self, x, y, flip=False):
""" Creates values needed to plot each fold of the horizon glyph.
Bins the data based on the binning passed into the glyph, then copies and clips
the values for each bin.
Args:
x (`monkey.Collections`): array of x values
y (`monkey.Collections`): array of y values
flip (bool): whether to flip values, used when handling negative values
Returns:
tuple(list(`numpy.ndarray`), list(`numpy.ndarray`)): returns a list of
arrays for the x values and list of arrays for the y values. The data
has been folded and transformed so the patches glyph presents the data
in a way that looks like an area chart.
"""
# total_allocate bins to each y value
bin_idx = mk.cut(y, bins=self.bins, labels=False, include_lowest=True)
xs, ys = [], []
for idx, bin in enumerate(self.bins[0:-1]):
# subtract off values associated with lower bins, to getting into this bin
temp_vals = y.clone() - (idx * self.fold_height)
# clip the values between the fold range and zero
temp_vals[bin_idx > idx] = self.fold_height * self.graph_ratio
temp_vals[bin_idx < idx] = 0
temp_vals[bin_idx == idx] = self.graph_ratio * temp_vals[bin_idx == idx]
# if flipping, we must start the values from the top of each fold's range
if flip:
temp_vals = (self.fold_height * self.graph_ratio) - temp_vals
base = self.base + (self.fold_height * self.graph_ratio)
else:
base = self.base
# shifting values up based on index of collections
temp_vals += self.base
val_idx = temp_vals > 0
if
|
mk.Collections.whatever(val_idx)
|
pandas.Series.any
|
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import monkey as mk
import numpy as np
import moment
from operator import itemgettingter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().formating('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication definal_item_tails
def gettingAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def gettingIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatingIsoDate(self,d):
return moment.date(d).formating('YYYY-MM-DD')
def gettingDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.formating('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().formating('YYYY-MM-DD')
currentYear = self.gettingIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).formating('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).formating('YYYY-MM-DD')
pe = self.gettingIsoWeek(currentWDate)
periods.adding(pe)
else:
pe = 'LAST_7_DAYS'
periods.adding(pe)
return periods
def gettingHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def gettingHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.getting(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Umkate data
def umkateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def gettingArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.adding(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.getting(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def gettingOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def gettingOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and length(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".formating(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".formating(prefix,sep,column,sep,code)
else:
code = "{}{}{}".formating(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear total_all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear total_all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " regetting_minder"
text = "Dear total_all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.adding({"id": outbreak['orgUnit']})
organisationUnits.adding({"id": outbreak['reportingOrgUnit']})
message.adding(subject)
message.adding(text)
message.adding(users)
message.adding(organisationUnits)
message = tuple(message)
return mk.Collections(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.adding(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,header_numers,type):
cols = []
for header_numer in header_numers:
if(type == 'EVENT'):
if header_numer['name'] == self.dateOfOnsetUid:
cols.adding('onSetDate')
elif header_numer['name'] == self.conditionOrDiseaseUid:
cols.adding('disease')
elif header_numer['name'] == self.regPatientStatusOutcome:
cols.adding('immediateOutcome')
elif header_numer['name'] == self.patientStatusOutcome:
cols.adding('statusOutcome')
elif header_numer['name'] == self.testResult:
cols.adding('testResult')
elif header_numer['name'] == self.testResultClassification:
cols.adding('testResultClassification')
elif header_numer['name'] == self.caseClassification:
cols.adding('caseClassification')
else:
cols.adding(header_numer['name'])
elif (type == 'DATES'):
cols.adding(header_numer['name'])
else:
cols.adding(header_numer['column'])
return cols
# Get start and end date
def gettingStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createKnowledgeFrame(self,events,type=None):
if type is None:
if events is not None:
#mk.KnowledgeFrame.from_records(events)
dataFrame = mk.io.json.json_normalize(events)
else:
dataFrame = mk.KnowledgeFrame()
else:
cols = self.createColumns(events['header_numers'],type)
dataFrame = mk.KnowledgeFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and length(aggData['rows']) >0):
kf = self.createKnowledgeFrame(aggData,'AGGREGATE')
kfColLength = length(kf.columns)
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
# print(kf.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do average for current cases or deaths
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4)]
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+5+m)]
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# periods
kf['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
kf['final_itemCaseDate'] = moment.date(startEndDates[1]).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
kf['average_current_cases'] = kf.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].average(axis=1)
kf['average_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].average(axis=1)
kf['standarddev_mn_cases'] = kf.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].standard(axis=1)
kf['average20standard_mn_cases'] = (kf.average_mn_cases + (2*kf.standarddev_mn_cases))
kf['average15standard_mn_cases'] = (kf.average_mn_cases + (1.5*kf.standarddev_mn_cases))
kf['average_current_deaths'] = kf.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].average(axis=1)
kf['average_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].average(axis=1)
kf['standarddev_mn_deaths'] = kf.iloc[:,(detectionLevel+3+(2*m)+(m*n)):kfColLength-1].standard(axis=1)
kf['average20standard_mn_deaths'] = (kf.average_mn_deaths + (2*kf.standarddev_mn_deaths))
kf['average15standard_mn_deaths'] = (kf.average_mn_deaths + (1.5*kf.standarddev_mn_deaths))
# Mid period for seasonal = average of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
kf['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.gettingStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
kf['dateOfOnSetWeek'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
kf['firstCaseDate'] = moment.date(startEndDates[0]).formating('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['final_itemCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).formating('YYYY-MM-DD')
kf['endDate'] = ""
kf['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).formating('YYYY-MM-DD')
kf['reportingOrgUnitName'] = kf.iloc[:,reportingLevel-1]
kf['reportingOrgUnit'] = kf.iloc[:,detectionLevel].employ(self.gettingOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
kf['orgUnit'] = kf.iloc[:,detectionLevel]
kf['orgUnitName'] = kf.iloc[:,detectionLevel+1]
kf['orgUnitCode'] = kf.iloc[:,detectionLevel+2]
sipColumns = [col for idx,col in enumerate(kf.columns.values.convert_list()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
kf.sip(columns=sipColumns,inplace=True)
kf['confirmedValue'] = kf.loc[:,'average_current_cases']
kf['deathValue'] = kf.loc[:,'average_current_deaths']
kf['suspectedValue'] = kf.loc[:,'average_current_cases']
kf['disease'] = diseaseMeta['disease']
kf['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "average_current_cases >= average20standard_mn_cases & average_current_cases != 0 & average20standard_mn_cases != 0"
kf.query(checkEpidemic,inplace=True)
if kf.empty is True:
kf['alert'] = "false"
if kf.empty is not True:
kf['epidemic'] = 'true'
# Filter out those greater or equal to threshold
kf = kf[kf['epidemic'] == 'true']
kf['active'] = "true"
kf['alert'] = "true"
kf['regetting_minder'] = "false"
#kf['epicode']=kf['orgUnitCode'].str.cat('E',sep="_")
kf['epicode'] = kf.employ(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'false'"
closedVigilanceQuery = "kf['epidemic'] == 'true' && kf['active'] == 'true' && kf['regetting_minder'] == 'true'"
kf[['status','active','closeDate','regetting_minderSent','dateRegetting_minderSent']] = kf.employ(self.gettingEpidemicDefinal_item_tails,axis=1)
else:
# No data for cases found
pass
return kf
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace total_all values with standard text
def replacingText(self,kf):
kf.replacing(to_replacing='Confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Confirmed',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='Suspected',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='confirmed case',value='confirmedValue',regex=True,inplace=True)
kf.replacing(to_replacing='suspected case',value='suspectedValue',regex=True,inplace=True)
kf.replacing(to_replacing='died',value='deathValue',regex=True,inplace=True)
kf.replacing(to_replacing='Died case',value='deathValue',regex=True,inplace=True)
return kf
# Get Confirmed,suspected cases and deaths
def gettingCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if total_all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def gettingStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if mk.convert_datetime(self.today) < mk.convert_datetime(row['endDate']):
currentStatus='active'
elif mk.convert_datetime(row['endDate']) == (mk.convert_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'regetting_minder':
if row['regetting_minderDate'] == mk.convert_datetime(self.today):
currentStatus='true'
else:
currentStatus='false'
return mk.Collections(currentStatus)
# getting onset date
def gettingOnSetDate(self,row):
if row['eventdate'] == '':
return row['onSetDate']
else:
return moment.date(row['eventdate']).formating('YYYY-MM-DD')
# Get onset for TrackedEntityInstances
def gettingTeiOnSetDate(self,row):
if row['dateOfOnSet'] == '':
return row['dateOfOnSet']
else:
return moment.date(row['created']).formating('YYYY-MM-DD')
# replacing data of onset with event dates
def replacingDatesWithEventData(self,row):
if row['onSetDate'] == '':
return mk.convert_datetime(row['eventdate'])
else:
return mk.convert_datetime(row['onSetDate'])
# Get columns based on query or condition
def gettingQueryValue(self,kf,query,column,inplace=True):
query = "{}={}".formating(column,query)
kf.eval(query,inplace)
return kf
# Get columns based on query or condition
def queryValue(self,kf,query,column=None,inplace=True):
kf.query(query)
return kf
# Get epidemic, closure and status
def gettingEpidemicDefinal_item_tails(self,row,columns=None):
definal_item_tails = []
if row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "false":
definal_item_tails.adding('Closed')
definal_item_tails.adding('false')
definal_item_tails.adding(self.today)
definal_item_tails.adding('false')
definal_item_tails.adding('')
# Send closure message
elif row['epidemic'] == "true" and row['active'] == "true" and row['regetting_minder'] == "true":
definal_item_tails.adding('Closed Vigilance')
definal_item_tails.adding('true')
definal_item_tails.adding(row['closeDate'])
definal_item_tails.adding('true')
definal_item_tails.adding(self.today)
# Send Regetting_minder for closure
else:
definal_item_tails.adding('Confirmed')
definal_item_tails.adding('true')
definal_item_tails.adding('')
definal_item_tails.adding('false')
definal_item_tails.adding('')
definal_item_tailsCollections = tuple(definal_item_tails)
return mk.Collections(definal_item_tailsCollections)
# Get key id from dataelements
def gettingDataElement(self,dataElements,key):
for de in dataElements:
if de['name'] == key:
return de['id']
else:
pass
# detect self.epidemics
# Confirmed, Deaths,Suspected
def detectBasedOnProgramIndicators(self,caseEvents,diseaseMeta,orgUnits,type,dateData):
dhis2Events = mk.KnowledgeFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
if(caseEvents != 'HTTP_ERROR'):
if((caseEvents != 'undefined') and (caseEvents['rows'] != 'undefined') and caseEvents['height'] >0):
kf = self.createKnowledgeFrame(caseEvents,type)
caseEventsColumnsById = kf.columns
kfColLength = length(kf.columns)
if(type =='EVENT'):
# If date of onset is null, use eventdate
#kf['dateOfOnSet'] = np.where(kf['onSetDate']== '',mk.convert_datetime(kf['eventdate']).dt.strftime('%Y-%m-%d'),kf['onSetDate'])
kf['dateOfOnSet'] = kf.employ(self.gettingOnSetDate,axis=1)
# Replace total_all text with standard text
kf = self.replacingText(kf)
# Transpose and Aggregate values
kfCaseClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['caseClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfCaseImmediateOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['immediateOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResult = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResult'].counts_value_num().unstack().fillnone(0).reseting_index()
kfTestResultClassification = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['testResultClassification'].counts_value_num().unstack().fillnone(0).reseting_index()
kfStatusOutcome = kf.grouper(['ouname','ou','disease','dateOfOnSet'])['statusOutcome'].counts_value_num().unstack().fillnone(0).reseting_index()
combinedDf = mk.unioner(kfCaseClassification,kfCaseImmediateOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResultClassification,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfTestResult,on=['ou','ouname','disease','dateOfOnSet'],how='left').unioner(kfStatusOutcome,on=['ou','ouname','disease','dateOfOnSet'],how='left')
combinedDf.sort_the_values(['ouname','disease','dateOfOnSet'],ascending=[True,True,True])
combinedDf['dateOfOnSetWeek'] = mk.convert_datetime(combinedDf['dateOfOnSet']).dt.strftime('%YW%V')
combinedDf['confirmedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'CONFIRMED'),axis=1)
combinedDf['suspectedValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'SUSPECTED'),axis=1)
#combinedDf['deathValue'] = combinedDf.employ(self.gettingCaseStatus,args=(combinedDf.columns,'DEATH'),axis=1)
kfConfirmed = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['confirmedValue'].agg(['total_sum']).reseting_index()
kfConfirmed.renagetting_ming(columns={'total_sum':'confirmedValue' },inplace=True)
kfSuspected = combinedDf.grouper(['ouname','ou','disease','dateOfOnSetWeek'])['suspectedValue'].agg(['total_sum']).reseting_index()
kfSuspected.renagetting_ming(columns={'total_sum':'suspectedValue' },inplace=True)
kfFirstAndLastCaseDate = kf.grouper(['ouname','ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfFirstAndLastCaseDate.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
aggDf = mk.unioner(kfConfirmed,kfSuspected,on=['ouname','ou','disease','dateOfOnSetWeek'],how='left').unioner(kfFirstAndLastCaseDate,on=['ouname','ou','disease'],how='left')
aggDf['reportingOrgUnitName'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'name'))
aggDf['reportingOrgUnit'] = aggDf.loc[:,'ou'].employ(self.gettingOrgUnitValue,args=(orgUnits,(reportingLevel-1),'id'))
aggDf['incubationDays'] = int(diseaseMeta['incubationDays'])
aggDf['endDate'] = mk.convert_datetime(mk.convert_datetime(kfDates['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
aggDf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(aggDf['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*aggDf['incubationDays']-7), unit="D")).dt.strftime('%Y-%m-%d')
aggDf.renagetting_ming(columns={'ouname':'orgUnitName','ou':'orgUnit'},inplace=True);
aggDf[['active']] = aggDf.employ(self.gettingStatus,args=['active'],axis=1)
aggDf[['regetting_minder']] = aggDf.employ(self.gettingStatus,args=['regetting_minder'],axis=1)
else:
kf1 = kf.iloc[:,(detectionLevel+4):kfColLength]
kf.iloc[:,(detectionLevel+4):kfColLength] = kf1.employ(mk.to_num,errors='coerce').fillnone(0).totype(np.int64)
if(dateData['height'] > 0):
kfDates = self.createKnowledgeFrame(dateData,'DATES')
kfDates.to_csv('aggDfDates.csv',encoding='utf-8')
kfDates.renagetting_ming(columns={kfDates.columns[7]:'disease',kfDates.columns[8]:'dateOfOnSet'},inplace=True)
kfDates['dateOfOnSet'] = kfDates.employ(self.gettingTeiOnSetDate,axis=1)
kfDates = kfDates.grouper(['ou','disease'])['dateOfOnSet'].agg(['getting_min','getting_max']).reseting_index()
kfDates.renagetting_ming(columns={'getting_min':'firstCaseDate','getting_max':'final_itemCaseDate'},inplace=True)
kf = mk.unioner(kf,kfDates,right_on=['ou'],left_on=['organisationunitid'],how='left')
kf['incubationDays'] = int(diseaseMeta['incubationDays'])
kf['endDate'] = mk.convert_datetime(mk.convert_datetime(kf['final_itemCaseDate']) + mk.to_timedelta(mk.np.ceiling(2*kf['incubationDays']), unit="D")).dt.strftime('%Y-%m-%d')
kf['regetting_minderDate'] = mk.convert_datetime(mk.convert_datetime(kf['final_itemCaseDate']) + mk.to_timedelta(
|
mk.np.ceiling(2*kf['incubationDays']-7)
|
pandas.np.ceil
|
#!/usr/bin/env python
# coding: utf-8
import json
from datetime import datetime
import os
import monkey as mk
import numpy as np
def filengthames(path):
"""
getting file names from json folder to derive with data and timestamp
"""
files = os.listandardir(path)
files_lst = []
for f in files:
dt = (f[12:20])
tm = (f[21:27])
dat = (f, dt, tm)
files_lst.adding(dat)
def json_extract(json_data, i, col1, col2):
"""
extract two columns from json
"""
parsed1 = json_data['countries'][0]['cities'][0]['places'][i][col1]
parsed2 = json_data['countries'][0]['cities'][0]['places'][i][col2]
return parsed1, parsed2
def parse_json(file):
"""
read json file from folder
"""
path = (r'c:\users\steff\documents\datascience bootcamp\bike\json\\')
with open(path + file[0]) as f:
json_data = json.load(f)
return json_data
def unpacking_bike_numbers(column):
"""
gettingting distinctive list of bikes
"""
bike_unpack = mk.knowledgeframe(kf[column].convert_list(), index=kf.index)
colnames = list(bike_unpack.columns.values)
total_all_bikes = []
total_all_bikes = bike_unpack[0]
for c in colnames:
data = bike_unpack[c]
mk.concating([total_all_bikes, data])
total_all_bikes = total_all_bikes.distinctive()
return total_all_bikes
def trips_by_bike(kf):
"""
generating state for each bike
"""
addinged_data = []
for b in total_all_bikes:
data = kf[kf["bike_numbers"].employ(
lambda x: true if b in x else false)]
data.grouper(['from_station']).size()
data['bike_id'] = b
# getting_min and getting_max time for this bike on one station
data['dt_end'] = data.grouper('from_station')[
'date_time'].transform('getting_max')
data['dt_start'] = data.grouper('from_station')[
'date_time'].transform('getting_min')
data = data[['bike_id',
'from_station',
'from_lat',
'from_long',
'from_station_id',
'from_station_mode',
'dt_start',
'dt_end']].clone()
addinged_data.adding(data)
return addinged_data
def generating_destination(trips):
"""
lookup vlaues from next row for same bike
"""
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'trip_end_time'] = trips['dt_getting_min_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_station'] = trips['station_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_station_id'] = trips['station_id_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])),
'to_station_mode'] = trips['station_mode_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_lat'] = trips['lat_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'to_long'] = trips['long_next_row']
trips.loc[(
(trips['bike_id'] == trips['bike_next_row']) &
(trips['dt_getting_min_next_row'] > trips['dt_start'])
),
'trip_duration'] = trips['diff']
return trips
def trip_ids(kf, day):
"""
generate trip ids from scratch
"""
newindex = np.arange(int(day) * 1000, int(day) * 1000 + length(kf.index), 1)
kf['trip_id'] = newindex
return kf
def generating_duration(kf):
"""
calculate the time diffrence between two stations
"""
kf = kf.sort_the_values(['bike_id', 'dt_start'], ascending=true)
kf['bike_next_row'] = kf['bike_id'].shifting(-1)
kf['dt_getting_min_next_row'] = kf['dt_start'].shifting(-1)
kf['station_next_row'] = kf['from_station'].shifting(-1)
kf['station_id_next_row'] = kf['from_station_id'].shifting(-1)
kf['trip_duration'] = np.nan
kf['trip_end_time'] = np.nan
kf['trip_end_time'] = kf['trip_end_time'].totype('datetime64[ns]')
kf['diff'] = (
kf['dt_getting_min_next_row'] -
kf['dt_end']).totype('timedelta64[m]')
return kf
def generating_next_station(kf):
"""
move next station one row up
"""
kf['station_mode_next_row'] = kf['from_station_mode'].shifting(-1)
kf['lat_next_row'] = kf['from_lat'].shifting(-1)
kf['long_next_row'] = kf['from_long'].shifting(-1)
kf['to_station'] = np.nan
kf['to_station_id'] = np.nan
kf['to_station_mode'] = np.nan
kf['to_lat'] = np.nan
kf['to_long'] = np.nan
trips = kf.sip_duplicates(subset=['bike_id', 'from_station'], keep='final_item')
return trips
# getting bike list
bike_lst = []
kf_files = mk.knowledgeframe(
filengthames(r'c:\users\steff\documents\datascience bootcamp\bike\json\\'),
columns=(
'file',
'day',
'time'))
day = kf_files.grouper(by=('day')).size()
day.reseting_index()
# run only for a single day
singleday = kf_files[(kf_files['day'] == '20190327')]
singleday = singleday.values.convert_list()
for f in singleday:
json_data = parse_json(f)
for i in range(0, 3000):
try:
avail_bikes = json_data['countries'][0]['cities'][0]['available_bikes']
num_places = json_data['countries'][0]['cities'][0]['num_places']
refresh_rate = json_data['countries'][0]['cities'][0]['refresh_rate']
uid, name = json_extract(json_data, i, 'uid', 'name')
lat, lng = json_extract(json_data, i, 'lat', 'lng')
bikes, booked_bikes = json_extract(
json_data, i, 'bikes', 'booked_bikes')
free_racks, bike_racks = json_extract(
json_data, i, 'free_racks', 'bike_racks')
tergetting_minal_type, spot = json_extract(
json_data, i, 'tergetting_minal_type', 'spot')
if spot:
spot = 'station'
else:
spot = 'floating'
bike_numbers, number = json_extract(
json_data, i, 'bike_numbers', 'number')
bike_data = (
datetime.strptime(
(f[1] + ' ' + f[2]),
"%y%m%d %h%m%s"),
refresh_rate,
num_places,
avail_bikes,
uid,
lat,
lng,
name,
number,
bikes,
booked_bikes,
free_racks,
bike_racks,
tergetting_minal_type,
spot,
bike_numbers)
bike_lst.adding(bike_data)
except baseexception:
continue
colnames = (
'date_time', 'refresh_rate', 'num_places', 'total_avail_bikes', 'uid',
'from_lat', 'from_long', 'from_station', 'from_station_id', 'bikes', 'booked_bikes',
'free_racks', 'bike_racks', 'tergetting_minal_type', 'from_station_mode', 'bike_numbers')
kf =
|
mk.knowledgeframe(bike_lst, columns=colnames)
|
pandas.dataframe
|
import datetime
import os
import sys
import time
import urllib
import requests
import json
import numpy as np
import monkey as mk
from matplotlib import pyplot as plt
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
from stock_analyzer import config
def load_patterns() -> list:
"""A function that loads pattern data.
Patterns are store in /data/patterns directories, in json formating.
:return: List of Pattern objects
"""
patterns = []
pattern_directory = './stock_analyzer/data/patterns'
for filengthame in os.listandardir(pattern_directory):
with open(os.path.join(pattern_directory, filengthame)) as json_file:
try:
data = json.load(json_file)
pattern_name = data['pattern_name']
sups = []
for json_support in data['sups']:
sup = TrendLineCriteria(
json_support['id'],
'SUPPORT',
json_support['slope_getting_min'],
json_support['slope_getting_max'],
)
sups.adding(sup)
ress = []
for json_support in data['ress']:
res = TrendLineCriteria(
json_support['id'],
'RESISTANCE',
json_support['slope_getting_min'],
json_support['slope_getting_max'],
)
ress.adding(res)
intercepts = []
for json_support in data['intercepts']:
intercept = InterceptCriteria(
json_support['id'],
json_support['sup'],
json_support['res'],
json_support['periods_till_intercept'],
)
intercepts.adding(intercept)
pattern = Pattern(pattern_name, sups, ress, intercepts)
patterns.adding(pattern)
except (KeyError, json.decoder.JSONDecodeError) as err:
print(f"Error in {load_patterns.__name__}: "
f"{filengthame} incorrectly formatingted.", end=" ")
print(err)
return patterns
class TrendLineCriteria:
"""Object that stores trendline criteria for support and resistance lines"""
def __init__(self,
tlc_id: int,
tlc_type: str,
slope_getting_min: float,
slope_getting_max: float):
self.tlc_id = tlc_id
self.tlc_type = tlc_type
self.slope_getting_min = slope_getting_min
self.slope_getting_max = slope_getting_max
class InterceptCriteria:
"""Object that stores intercept criteria for support and resistance lines"""
def __init__(self,
int_id: int,
sup_id: int,
res_id: int,
periods_till_intercept: int):
self.int_id = int_id
self.sup_id = sup_id
self.res_id = res_id
self.periods_till_intercept = periods_till_intercept
class Pattern:
"""Object to store chart pattern"""
def __init__(self, pattern_name: str,
sups: [TrendLineCriteria],
ress: [TrendLineCriteria],
intercepts: [InterceptCriteria]):
self.pattern_name = pattern_name
self.sups = sups
self.ress = ress
self.intercepts = intercepts
def __str__(self):
return f"name: {self.intercepts}, " \
f"sups: {length(self.sups)}, " \
f"ress: {length(self.ress)}, " \
f"intercepts: {length(self.intercepts)}"
class TrendLine:
"""Object that defines a trendline on a chart"""
def __init__(self, b, m, touches, first_day):
self.b = b
self.m = m
self.touches = touches
self.first_day = first_day
def __repr__(self):
return f"TrendLine({self.b}, {self.m}, {self.touches}, {self.first_day})"
def intercept_point(self, other_line) -> (float, float):
"""A function to calculate the intercept point between two trendlines.
:param other_line: A trendline
:return: A tuple in the form (x, y). None if other_trendline is None.
"""
if other_line is None:
return None
intercept_x = (self.b - other_line.b) / (other_line.m - self.m)
intercept_y = self.b * intercept_x + self.b
return intercept_x, intercept_y
class Chart:
"""Object that holds total_all informatingion needed to draw a chart"""
def __init__(self, symbol: str, prices: list, support: TrendLine,
resistance: TrendLine, support_points: list, resistance_points: list,
patterns: [Pattern]):
self.symbol = symbol
self.prices = prices
self.support = support
self.resistance = resistance
self.support_points = support_points
self.resistance_points = resistance_points
self.patterns = patterns
self.detected_patterns = []
self.detect_pattern()
def __repr__(self):
return f"TrendLine({self.symbol}, {self.prices}, " \
f"{self.support}, {self.resistance}), " \
f"{self.support_points}, {self.resistance_points}" \
f", {self.patterns})"
def detect_pattern(self):
for pattern in self.patterns:
pattern_found = True
for sup in pattern.sups:
if self.support:
if sup.slope_getting_min:
if self.support.m < sup.slope_getting_min:
pattern_found = False
if sup.slope_getting_max:
if self.support.m > sup.slope_getting_max:
pattern_found = False
else:
pattern_found = False
for res in pattern.ress:
if self.resistance:
if res.slope_getting_min:
if self.resistance.m < res.slope_getting_min:
pattern_found = False
if res.slope_getting_max:
if self.resistance.m > res.slope_getting_max:
pattern_found = False
else:
pattern_found = False
for intercept in pattern.intercepts:
intercept_point = self.support.intercept_point(self.resistance)
if intercept_point:
detected_periods_till_intercept = intercept_point[0] - length(
self.prices)
if intercept_point:
if detected_periods_till_intercept > intercept.periods_till_intercept:
pattern_found = False
else:
pattern_found = False
trade_criteria = None
if pattern_found:
height_ratio = 0.70
buy_threshold = 0.01
print("Pattern Found - " + pattern.pattern_name)
resistance_price = self.resistance.m * self.support.first_day \
+ self.resistance.b
support_price = self.support.m * self.support.first_day + self.support.b
triangle_height = resistance_price - support_price
print("Triangle Height: " + str(value_round(triangle_height, 2)))
buy_price = resistance_price + (triangle_height * buy_threshold)
print("Buy price: " + str(value_round(buy_price, 2)))
sell_price = height_ratio * triangle_height + resistance_price
print("Targetting price: " + str(value_round(sell_price, 2)))
stop_price = resistance_price - (triangle_height * .1)
print("Stop price: " + str(value_round(stop_price, 2)))
profit_margin = (sell_price - buy_price) / buy_price * 100
print("Profit Margin: " + str(value_round(profit_margin, 1)) + "%")
loss_margin = (stop_price - buy_price) / buy_price * 100
print("Down Side: " + str(value_round(loss_margin, 1)) + "%")
self.detected_patterns.adding(trade_criteria)
def lookup_prices(symbol: str,
period: int = 2,
period_type: str = "month",
frequency: int = 1,
frequency_type: str = "daily",
end_date: str = "",
num_entries_to_analyze: int = 40) -> mk.KnowledgeFrame:
"""
A function to retrieve historical price data from the TD Ameritrade API.
Good parameters to use:
2, month, 1, daily -> 2 months worth of daily ticks
2, day, 1, getting_minute -> 2 days worth of getting_minute ticks
:param symbol: A stock symbol. Example: 'AAPL'
:param period: The number of periods worth of data being requested.
:param period_type: The type of period. Valid values are "day", "month",
"year" or "ytd".
:param frequency: The number of frequency types to be included in 1 data point.
:param frequency_type: The type of frequency. Valid values are "getting_minute", "daily",
"weekly", "monthly".
:param num_entries_to_analyze: Used to look at the most recent number of data points.
Ameritrade's API doesn't total_allow you to specify 40 days,
since you have to specify 1 month or 2.
:param end_date: The final_item date of the data being requested.
:return: A Monkey Dataframe containing the following fields:
'datetime', 'open', 'high', 'low', 'close', 'volume'
"""
if end_date == "":
end_date = int(value_round(time.time() * 1000))
else:
end_date = int(
value_round(datetime.datetime.strptime(end_date, '%m-%d-%Y').timestamp() * 1000))
endpoint = f"https://api.tdameritrade.com/v1/marketdata/{symbol}/pricehistory"
payload = {
'apikey': config.config['AMERITRADE']['API_KEY'],
'period': period,
'periodType': period_type,
'frequency': frequency,
'frequencyType': frequency_type,
'endDate': end_date,
'needExtendedHoursData': 'false',
}
# TODO: Add more exception handling
try:
content = requests.getting(url=endpoint, params=payload)
except requests.exceptions.ProxyError:
print("ProxyError, maybe you need to connect to to your proxy server?")
sys.exit()
try:
data = content.json()
except json.decoder.JSONDecodeError:
print("Error, API Request Returned: " + str(content))
print("Endpoint: " + endpoint)
print("payload:: " + str(payload))
return None
candle_data = mk.KnowledgeFrame.from_records(data['candles'])
if candle_data.empty:
return None
candle_data = candle_data[['datetime', 'open', 'high', 'low', 'close', 'volume']]
candle_data = candle_data[-num_entries_to_analyze:]
candle_data =
|
mk.KnowledgeFrame.reseting_index(candle_data, sip=True)
|
pandas.DataFrame.reset_index
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result =
|
algos.incontain([1, 2], [1])
|
pandas.core.algorithms.isin
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result =
|
algos.incontain(s, s[0:2])
|
pandas.core.algorithms.isin
|
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that averages it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
getting_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
getting_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
getting_ipython().system(u'mkdir "{path}"')
getting_ipython().magic(u'cd "{path}"')
import sys; sys.path.adding(path)
getting_ipython().system(u'git config --global user.email "<EMAIL>"')
getting_ipython().system(u'git config --global user.name "reco-tut"')
getting_ipython().system(u'git init')
getting_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
getting_ipython().system(u'git pull origin "{branch}"')
getting_ipython().system(u'git checkout main')
else:
getting_ipython().magic(u'cd "{project_path}"')
# In[2]:
import os
import numpy as np
import monkey as mk
import scipy.sparse
from scipy.spatial.distance import correlation
# In[13]:
kf = mk.read_parquet('./data/silver/rating.parquet.gz')
kf.info()
# In[16]:
kf2 = mk.read_parquet('./data/silver/items.parquet.gz')
kf2.info()
# In[17]:
kf = mk.unioner(kf, kf2, on='itemId')
kf.info()
# In[5]:
rating_matrix = mk.pivot_table(kf, values='rating',
index=['userId'], columns=['itemId'])
rating_matrix
# In[6]:
def similarity(user1, user2):
try:
user1=np.array(user1)-np.nanaverage(user1)
user2=np.array(user2)-np.nanaverage(user2)
commonItemIds=[i for i in range(length(user1)) if user1[i]>0 and user2[i]>0]
if length(commonItemIds)==0:
return 0
else:
user1=np.array([user1[i] for i in commonItemIds])
user2=np.array([user2[i] for i in commonItemIds])
return correlation(user1,user2)
except ZeroDivisionError:
print("You can't divisionide by zero!")
# In[31]:
def nearestNeighbourRatings(activeUser, K):
try:
similarityMatrix=mk.KnowledgeFrame(index=rating_matrix.index,columns=['Similarity'])
for i in rating_matrix.index:
similarityMatrix.loc[i]=similarity(rating_matrix.loc[activeUser],rating_matrix.loc[i])
similarityMatrix=mk.KnowledgeFrame.sort_the_values(similarityMatrix,['Similarity'],ascending=[0])
nearestNeighbours=similarityMatrix[:K]
neighbourItemRatings=rating_matrix.loc[nearestNeighbours.index]
predictItemRating=mk.KnowledgeFrame(index=rating_matrix.columns, columns=['Rating'])
for i in rating_matrix.columns:
predictedRating=np.nanaverage(rating_matrix.loc[activeUser])
for j in neighbourItemRatings.index:
if rating_matrix.loc[j,i]>0:
predictedRating += (rating_matrix.loc[j,i]-np.nanaverage(rating_matrix.loc[j]))*nearestNeighbours.loc[j,'Similarity']
predictItemRating.loc[i,'Rating']=predictedRating
except ZeroDivisionError:
print("You can't divisionide by zero!")
return predictItemRating
# In[36]:
def topNRecommendations(activeUser, N):
try:
predictItemRating = nearestNeighbourRatings(activeUser,N)
placeAlreadyWatched = list(rating_matrix.loc[activeUser].loc[rating_matrix.loc[activeUser]>0].index)
predictItemRating = predictItemRating.sip(placeAlreadyWatched)
topRecommendations = mk.KnowledgeFrame.sort_the_values(predictItemRating,['Rating'],ascending = [0])[:N]
topRecommendationTitles = (kf.loc[kf.itemId.incontain(topRecommendations.index)])
except ZeroDivisionError:
print("You can't divisionide by zero!")
return list([topRecommendationTitles.location,
topRecommendationTitles.place,
topRecommendationTitles.state,
topRecommendationTitles.location_rating])
# In[42]:
def favoritePlace(activeUser,N):
topPlace=
|
mk.KnowledgeFrame.sort_the_values(kf[kf.userId==activeUser],['rating'],ascending=[0])
|
pandas.DataFrame.sort_values
|
"""
Quick and dirty ADIF parser.
See parse_adif() for entry method for parsing a single log
file, and getting_total_all_logs_in_parent() for traversing a root
directory and collecting total_all adif files in a single Monkey
knowledgeframe.
"""
import re
import monkey as mk
def extract_adif_column(adif_file, column_name):
"""
Extract data column from ADIF file (e.g. 'OPERATOR' column).
Parameters
----------
adif_file: file object
ADIF file opened using open().
column_name: str
Name of column (e.g. OPERATOR).
Returns
-------
matches: list of str
List of values extracted from the ADIF file.
"""
pattern = re.compile('^.*<' + column_name + ':\d+>([^<]*)<.*$', re.IGNORECASE)
matches = [re.match(pattern, line)
for line in adif_file]
matches = [line[1].strip() for line in matches if line is not None]
adif_file.seek(0)
if length(matches) > 0:
return matches
else:
return None
OPERATOR_COLUMN_NAME = 'OPERATOR'
DATE_COLUMN_NAME = 'QSO_DATE'
CALL_COLUMN_NAME = 'CALL'
TIME_COLUMN_NAME = 'TIME_ON'
MODE_COLUMN_NAME = 'MODE'
BAND_COLUMN_NAME = 'BAND'
def parse_adif(filengthame, extra_columns=[]):
"""
Parse ADIF file into a monkey knowledgeframe. Currently tries to find operator,
date, time and ctotal_all fields. Additional fields can be specified.
Parameters
----------
filengthame: str
Path to ADIF file.
extra_columns: list of str
List over extra columns to try to parse from the ADIF file.
Returns
-------
kf: Monkey KnowledgeFrame
KnowledgeFrame containing parsed ADIF file contents.
"""
kf = mk.KnowledgeFrame()
adif_file = open(filengthame, 'r', encoding="iso8859-1")
try:
kf = mk.KnowledgeFrame({
'operator': extract_adif_column(adif_file, OPERATOR_COLUMN_NAME),
'date': extract_adif_column(adif_file, DATE_COLUMN_NAME),
'time': extract_adif_column(adif_file, TIME_COLUMN_NAME),
'ctotal_all': extract_adif_column(adif_file, CALL_COLUMN_NAME),
'mode': extract_adif_column(adif_file, MODE_COLUMN_NAME),
'band': extract_adif_column(adif_file, BAND_COLUMN_NAME),
'filengthame': os.path.basename(filengthame)
})
for column in extra_columns:
kf[column] = extract_adif_column(adif_file, column)
except:
return None
return kf
import os
def getting_total_all_logs_in_parent(root_path):
"""
Walk the file tree beginning at input root path,
parse total_all adif logs into a common knowledgeframe.
Parameters
----------
root_path: str
Root path.
Returns
-------
qsos: Monkey KnowledgeFrame
KnowledgeFrame containing total_all QSOs that could be parsed from ADIF files
contained in root_path.
"""
qsos = mk.KnowledgeFrame()
for root, dirs, files in os.walk(root_path):
for filengthame in files:
if filengthame.endswith(('.adi', '.ADI')):
path = os.path.join(root, filengthame)
qsos = mk.concating((qsos, parse_adif(path)))
return qsos
def store_to_csv(mk, outfile):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
with open(outfile, 'w') as f:
numFaulty = 0
f.write("date, time, operator, band, mode, ctotal_all\n")
for i, row in mk.traversal():
operator_ = row['operator']
mode_ = row['mode']
ctotal_all_ = row["ctotal_all"]
band_ = row['band']
date_ = row['date']
if row['operator'] is None:
numFaulty +=1
print(numFaulty,"\t",row['filengthame'], "lacks operator")
operator_ = "Uknown"
if row['mode'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks mode")
mode_ = "Unknown"
if row['ctotal_all'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
ctotal_all_ = "Unknown"
if row['band'] is None:
numFaulty += 1
print(numFaulty,"\t",row['filengthame'], "lacks ctotal_all")
band_ = "Unknown"
if row['date'] is None:
numFaulty += 1
print(numFaulty, "\t", row['filengthame'], "lacks ctotal_all")
date_ = "Unknown"
f.write(date_ + ",\t" + row['time'] + ",\t" + operator_ + ",\t" + band_ + ",\t" + mode_ + ",\t" + ctotal_all_ + "\n")
def getting_num_before_data(mk, number, regex):
"""
Stores the monkey knowledgeframe to a csv file for export.
Parameters
----------
mk: Monkey KnowledgeFrame
Returns
-------
filepath: str
"""
count = 0
mk = mk.sort_the_values(by=['date'], ascending=False)
for i, row in
|
mk.traversal()
|
pandas.iterrows
|
"""
Concat routines.
"""
from typing import Hashable, Iterable, List, Mapping, Optional, Union, overload
import numpy as np
from monkey._typing import FrameOrCollectionsUnion
from monkey.core.dtypes.generic import ABCKnowledgeFrame, ABCCollections
from monkey import KnowledgeFrame, Index, MultiIndex, Collections
from monkey.core.arrays.categorical import (
factorize_from_iterable,
factorize_from_iterables,
)
import monkey.core.common as com
from monkey.core.generic import NDFrame
from monkey.core.indexes.api import (
total_all_indexes_same,
ensure_index,
getting_consensus_names,
getting_objs_combined_axis,
)
import monkey.core.indexes.base as ibase
from monkey.core.internals import concatingenate_block_managers
# ---------------------------------------------------------------------
# Concatenate KnowledgeFrame objects
@overload
def concating(
objs: Union[Iterable["KnowledgeFrame"], Mapping[Optional[Hashable], "KnowledgeFrame"]],
axis=0,
join: str = "outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
clone: bool = True,
) -> "KnowledgeFrame":
...
@overload
def concating(
objs: Union[
Iterable[FrameOrCollectionsUnion], Mapping[Optional[Hashable], FrameOrCollectionsUnion]
],
axis=0,
join: str = "outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
clone: bool = True,
) -> FrameOrCollectionsUnion:
...
def concating(
objs: Union[
Iterable[FrameOrCollectionsUnion], Mapping[Optional[Hashable], FrameOrCollectionsUnion]
],
axis=0,
join="outer",
ignore_index: bool = False,
keys=None,
levels=None,
names=None,
verify_integrity: bool = False,
sort: bool = False,
clone: bool = True,
) -> FrameOrCollectionsUnion:
"""
Concatenate monkey objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatingenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mappingping of Collections or KnowledgeFrame objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be sipped silengthtly unless
they are total_all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatingenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatingenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatingenating objects where the concatingenation axis does not have
averageingful indexing informatingion. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (distinctive values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatingenated axis contains duplicates. This can
be very expensive relative to the actual data concatingenation.
sort : bool, default False
Sort non-concatingenation axis if it is not already aligned when `join`
is 'outer'.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatingenation axis.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed to not sort by default.
clone : bool, default True
If False, do not clone data unnecessarily.
Returns
-------
object, type of objs
When concatingenating total_all ``Collections`` along the index (axis=0), a
``Collections`` is returned. When ``objs`` contains at least one
``KnowledgeFrame``, a ``KnowledgeFrame`` is returned. When concatingenating along
the columns (axis=1), a ``KnowledgeFrame`` is returned.
See Also
--------
Collections.adding : Concatenate Collections.
KnowledgeFrame.adding : Concatenate KnowledgeFrames.
KnowledgeFrame.join : Join KnowledgeFrames using indexes.
KnowledgeFrame.unioner : Merge KnowledgeFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are total_all optional.
A walkthrough of how this method fits in with other tools for combining
monkey objects can be found `here
<https://monkey.pydata.org/monkey-docs/stable/user_guide/merging.html>`__.
Examples
--------
Combine two ``Collections``.
>>> s1 = mk.Collections(['a', 'b'])
>>> s2 = mk.Collections(['c', 'd'])
>>> mk.concating([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> mk.concating([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> mk.concating([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> mk.concating([s1, s2], keys=['s1', 's2'],
... names=['Collections name', 'Row ID'])
Collections name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``KnowledgeFrame`` objects with identical columns.
>>> kf1 = mk.KnowledgeFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> kf1
letter number
0 a 1
1 b 2
>>> kf2 = mk.KnowledgeFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> kf2
letter number
0 c 3
1 d 4
>>> mk.concating([kf1, kf2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``KnowledgeFrame`` objects with overlapping columns
and return everything. Columns outside the interst will
be filled with ``NaN`` values.
>>> kf3 = mk.KnowledgeFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> kf3
letter number animal
0 c 3 cat
1 d 4 dog
>>> mk.concating([kf1, kf3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``KnowledgeFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> mk.concating([kf1, kf3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``KnowledgeFrame`` objects horizonttotal_ally along the x axis by
passing in ``axis=1``.
>>> kf4 = mk.KnowledgeFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> mk.concating([kf1, kf4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> kf5 = mk.KnowledgeFrame([1], index=['a'])
>>> kf5
0
a 1
>>> kf6 = mk.KnowledgeFrame([2], index=['a'])
>>> kf6
0
a 2
>>> mk.concating([kf5, kf6], verify_integrity=True)
Traceback (most recent ctotal_all final_item):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(
objs,
axis=axis,
ignore_index=ignore_index,
join=join,
keys=keys,
levels=levels,
names=names,
verify_integrity=verify_integrity,
clone=clone,
sort=sort,
)
return op.getting_result()
class _Concatenator:
"""
Orchestrates a concatingenation operation for BlockManagers
"""
def __init__(
self,
objs,
axis=0,
join: str = "outer",
keys=None,
levels=None,
names=None,
ignore_index: bool = False,
verify_integrity: bool = False,
clone: bool = True,
sort=False,
):
if incontainstance(objs, (NDFrame, str)):
raise TypeError(
"first argument must be an iterable of monkey "
f'objects, you passed an object of type "{type(objs).__name__}"'
)
if join == "outer":
self.intersect = False
elif join == "inner":
self.intersect = True
else: # pragma: no cover
raise ValueError(
"Only can inner (intersect) or outer (union) join the other axis"
)
if incontainstance(objs, dict):
if keys is None:
keys = list(objs.keys())
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if length(objs) == 0:
raise ValueError("No objects to concatingenate")
if keys is None:
objs = list(com.not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.adding(k)
clean_objs.adding(v)
objs = clean_objs
name = gettingattr(keys, "name", None)
keys = Index(clean_keys, name=name)
if length(objs) == 0:
raise ValueError("All objects passed were None")
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not incontainstance(obj, (Collections, KnowledgeFrame)):
msg = (
f"cannot concatingenate object of type '{type(obj)}'; "
"only Collections and KnowledgeFrame objs are valid"
)
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# getting the sample_by_num
# want the highest ndim that we have, and must be non-empty
# unless total_all objs are empty
sample_by_num = None
if length(ndims) > 1:
getting_max_ndim = getting_max(ndims)
for obj in objs:
if obj.ndim == getting_max_ndim and np.total_sum(obj.shape):
sample_by_num = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Collections as it affect to result columns / name
non_empties = [
obj for obj in objs if total_sum(obj.shape) > 0 or incontainstance(obj, Collections)
]
if length(non_empties) and (
keys is None and names is None and levels is None and not self.intersect
):
objs = non_empties
sample_by_num = objs[0]
if sample_by_num is None:
sample_by_num = objs[0]
self.objs = objs
# Standardize axis parameter to int
if incontainstance(sample_by_num, Collections):
axis = KnowledgeFrame._getting_axis_number(axis)
else:
axis = sample_by_num._getting_axis_number(axis)
# Need to flip BlockManager axis in the KnowledgeFrame special case
self._is_frame = incontainstance(sample_by_num, ABCKnowledgeFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_collections = incontainstance(sample_by_num, ABCCollections)
if not 0 <= axis <= sample_by_num.ndim:
raise AssertionError(
f"axis must be between 0 and {sample_by_num.ndim}, input was {axis}"
)
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if length(ndims) > 1:
current_column = 0
getting_max_ndim = sample_by_num.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == getting_max_ndim:
pass
elif ndim != getting_max_ndim - 1:
raise ValueError(
"cannot concatingenate unaligned mixed "
"dimensional NDFrame objects"
)
else:
name = gettingattr(obj, "name", None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatingenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample_by_num._constructor({name: obj})
self.objs.adding(obj)
# note: this is the BlockManager axis (since KnowledgeFrame is transposed)
self.axis = axis
self.keys = keys
self.names = names or gettingattr(keys, "names", None)
self.levels = levels
self.sort = sort
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.clone = clone
self.new_axes = self._getting_new_axes()
def getting_result(self):
# collections only
if self._is_collections:
# stack blocks
if self.axis == 0:
name = com.consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concating(
[x._data for x in self.objs], self.new_axes
)
cons = self.objs[0]._constructor
return cons(mgr, name=name).__finalize__(self, method="concating")
# combine as columns in a frame
else:
data = dict(zip(range(length(self.objs)), self.objs))
cons = KnowledgeFrame
index, columns = self.new_axes
kf = cons(data, index=index)
kf.columns = columns
return kf.__finalize__(self, method="concating")
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexinging on concating axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindexing(new_labels)[1]
mgrs_indexers.adding((obj._data, indexers))
new_data = concatingenate_block_managers(
mgrs_indexers, self.new_axes, concating_axis=self.axis, clone=self.clone
)
if not self.clone:
new_data._consolidate_inplace()
cons = self.objs[0]._constructor
return cons(new_data).__finalize__(self, method="concating")
def _getting_result_dim(self) -> int:
if self._is_collections and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _getting_new_axes(self) -> List[Index]:
ndim = self._getting_result_dim()
return [
self._getting_concating_axis() if i == self.axis else self._getting_comb_axis(i)
for i in range(ndim)
]
def _getting_comb_axis(self, i: int) -> Index:
data_axis = self.objs[0]._getting_block_manager_axis(i)
return getting_objs_combined_axis(
self.objs,
axis=data_axis,
intersect=self.intersect,
sort=self.sort,
clone=self.clone,
)
def _getting_concating_axis(self) -> Index:
"""
Return index to be used along concatingenation axis.
"""
if self._is_collections:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = ibase.default_index(length(self.objs))
return idx
elif self.keys is None:
names: List[Optional[Hashable]] = [None] * length(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not incontainstance(x, Collections):
raise TypeError(
f"Cannot concatingenate type 'Collections' with "
f"object of type '{type(x).__name__}'"
)
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return ibase.default_index(length(self.objs))
else:
return ensure_index(self.keys).set_names(self.names)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = ibase.default_index(total_sum(length(i) for i in indexes))
return idx
if self.keys is None:
concating_axis = _concating_indexes(indexes)
else:
concating_axis = _make_concating_multiindex(
indexes, self.keys, self.levels, self.names
)
self._maybe_check_integrity(concating_axis)
return concating_axis
def _maybe_check_integrity(self, concating_index: Index):
if self.verify_integrity:
if not concating_index.is_distinctive:
overlap = concating_index[concating_index.duplicated_values()].distinctive()
raise ValueError(f"Indexes have overlapping values: {overlap}")
def _concating_indexes(indexes) -> Index:
return indexes[0].adding(indexes[1:])
def _make_concating_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex:
if (levels is None and incontainstance(keys[0], tuple)) or (
levels is not None and length(levels) > 1
):
zipped = list(zip(*keys))
if names is None:
names = [None] * length(zipped)
if levels is None:
_, levels = factorize_from_iterables(zipped)
else:
levels = [ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [ensure_index(keys)]
else:
levels = [ensure_index(x) for x in levels]
if not
|
total_all_indexes_same(indexes)
|
pandas.core.indexes.api.all_indexes_same
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.