complexity int64 1 56 | n_identifiers int64 1 114 | code stringlengths 19 12.7k | path stringlengths 8 134 | n_ast_nodes int64 12 2.35k | ast_errors stringlengths 0 4.01k | repo stringlengths 3 28 | documentation dict | n_words int64 2 866 | language stringclasses 1
value | vocab_size int64 2 323 | commit_id stringlengths 40 40 | file_name stringlengths 5 79 | id int64 243 338k | nloc int64 1 228 | token_counts int64 5 1.4k | fun_name stringlengths 1 77 | url stringlengths 31 60 | commit_message stringlengths 3 15.3k | n_whitespaces int64 1 3.23k | n_ast_errors int64 0 20 | d_id int64 74 121k | ast_levels int64 4 29 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | 2 | def minzoom(self):
return self["minzoom"]
| packages/python/plotly/plotly/graph_objs/layout/mapbox/_layer.py | 22 | plotly.py | {
"docstring": "\n Sets the minimum zoom level (mapbox.layer.minzoom). At zoom\n levels less than the minzoom, the layer will be hidden.\n\n The 'minzoom' property is a number and may be specified as:\n - An int or float in the interval [0, 24]\n\n Returns\n -------\n ... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _layer.py | 232,037 | 2 | 11 | minzoom | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 63,481 | 7 | |
16 | 27 | def telescopic(L, R, limits):
(i, a, b) = limits
if L.is_Add or R.is_Add:
return None
# We want to solve(L.subs(i, i + m) + R, m)
# First we try a simple match since this does things that
# solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails
k = Wild("k")
sol = (-R).match(L.sub... | sympy/concrete/summations.py | 374 | sympy | {
"docstring": "\n Tries to perform the summation using the telescopic property.\n\n Return None if not possible.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 14,
"vocab_size": 13
} | 189 | Python | 104 | f757f3daae6e11ea0cfb7dadc133274d8d74315f | summations.py | 196,771 | 27 | 242 | telescopic | https://github.com/sympy/sympy.git | Reordered imports 2 | 391 | 0 | 48,161 | 19 | |
1 | 2 | def startarrowsize(self):
return self["startarrowsize"]
| packages/python/plotly/plotly/graph_objs/layout/_annotation.py | 22 | plotly.py | {
"docstring": "\n Sets the size of the start annotation arrow head, relative to\n `arrowwidth`. A value of 1 (default) gives a head about 3x as\n wide as the line.\n\n The 'startarrowsize' property is a number and may be specified as:\n - An int or float in the interval [0.3, inf... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _annotation.py | 230,902 | 2 | 11 | startarrowsize | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 62,575 | 7 | |
4 | 12 | def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: ... | lib/ansible/galaxy/dependency_resolution/providers.py | 115 | ansible | {
"docstring": "Get direct dependencies of a candidate.\n\n :returns: A collection of requirements that `candidate` \\\n specifies as its dependencies.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 18,
"vocab_size": 17
} | 178 | Python | 125 | 8b2e6285650ec42ec4a19075a8567047e8304ba2 | providers.py | 266,879 | 13 | 60 | get_dependencies | https://github.com/ansible/ansible.git | galaxy - Clean up type hints and imports. | 364 | 0 | 78,638 | 11 | |
3 | 7 | def active_loop_name(self) -> Optional[Text]:
if not self.active_loop or self.active_loop.name == SHOULD_NOT_BE_SET:
return None
return self.active_loop.name
| rasa/shared/core/trackers.py | 54 | rasa | {
"docstring": "Get the name of the currently active loop.\n\n Returns: `None` if no active loop or the name of the currently active loop.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 22,
"vocab_size": 13
} | 15 | Python | 13 | e798bf049f036a5865c14d4343ed8a833864aabe | trackers.py | 159,564 | 8 | 33 | active_loop_name | https://github.com/RasaHQ/rasa.git | convert TrackerActiveLoop to a dataclass | 47 | 0 | 38,336 | 9 | |
1 | 6 | def get_lr(self) -> List:
return [self.config.lr_disc, self.config.lr_gen]
| TTS/tts/models/vits.py | 36 | TTS | {
"docstring": "Set the initial learning rates for each optimizer.\n\n Returns:\n List: learning rates for each optimizer.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 15,
"vocab_size": 10
} | 7 | Python | 7 | 00c7600103ee34ac50506af88f1b34b713f849e7 | vits.py | 262,246 | 7 | 22 | get_lr | https://github.com/coqui-ai/TTS.git | Update Vits model API | 21 | 0 | 77,157 | 8 | |
6 | 9 | def _convert_args_to_cli(vargs):
args = ['cleanup']
for option in ('exclude_strings', 'remove_images'):
if vargs.get(option):
args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))
for option in ('file_pattern', 'image_prune', 'process_isolation_executa... | awx/main/tasks/receptor.py | 251 | awx | {
"docstring": "\n For the ansible-runner worker cleanup command\n converts the dictionary (parsed argparse variables) used for python interface\n into a string of CLI options, which has to be used on execution nodes.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 30,
"vocab_size": 28
} | 40 | Python | 31 | a4a3ba65d736045733cb49430d7076b73aec23bb | receptor.py | 80,333 | 11 | 141 | _convert_args_to_cli | https://github.com/ansible/awx.git | Refactored tasks.py to a package
--- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor
--- Modified the functional tests and unit tests accordingly | 109 | 0 | 17,051 | 17 | |
1 | 11 | def get_normal_vector(self) -> np.ndarray:
p0, p1, p2 = self.tip.get_start_anchors()[:3]
return normalize(np.cross(p2 - p1, p1 - p0))
| manim/mobject/geometry/line.py | 69 | manim | {
"docstring": "Returns the normal of a vector.\n\n Examples\n --------\n ::\n\n >>> np.round(Arrow().get_normal_vector()) + 0. # add 0. to avoid negative 0 in output\n array([ 0., 0., -1.])\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 26,
"vocab_si... | 16 | Python | 14 | e040bcacd38378386749db18aeba575b93f4ebca | line.py | 189,683 | 12 | 43 | get_normal_vector | https://github.com/ManimCommunity/manim.git | Improved structure of the :mod:`.mobject` module (#2476)
* group graphing and update its references
* group text and update its references
* group opengl and update its references
* group three_d and update its references
* group geometry and update (most) references
* move some chaning.py + updater fil... | 37 | 0 | 46,164 | 10 | |
4 | 19 | def get_loan_wise_pledges(filters):
loan_wise_unpledges = {}
current_pledges = {}
conditions = ""
if filters.get("company"):
conditions = "AND company = %(company)s"
unpledges = frappe.db.sql(
.format(
conditions=conditions
),
filters,
as_dict=1,
)
for unpledge in unpledges:
loan_wise_unpledge... | erpnext/loan_management/report/loan_interest_report/loan_interest_report.py | 236 | erpnext | {
"docstring": "\n\t\tSELECT up.loan, u.loan_security, sum(u.qty) as qty\n\t\tFROM `tabLoan Security Unpledge` up, `tabUnpledge` u\n\t\tWHERE u.parent = up.name\n\t\tAND up.status = 'Approved'\n\t\t{conditions}\n\t\tGROUP BY up.loan, u.loan_security\n\t\n\t\tSELECT lp.loan, p.loan_security, sum(p.qty) as qty\n\t\tFRO... | 61 | Python | 41 | 494bd9ef78313436f0424b918f200dab8fc7c20b | loan_interest_report.py | 66,352 | 42 | 154 | get_loan_wise_pledges | https://github.com/frappe/erpnext.git | style: format code with black | 33 | 0 | 14,173 | 12 | |
6 | 5 | def _check_multi_class(multi_class, solver, n_classes):
if multi_class == "auto":
if solver in ("liblinear", "newton-cholesky"):
multi_class = "ovr"
elif n_classes > 2:
multi_class = "multinomial"
else:
multi_class = "ovr"
if multi_class == "multi... | sklearn/linear_model/_logistic.py | 118 | scikit-learn | {
"docstring": "Computes the multi class type, either \"multinomial\" or \"ovr\".\n\n For `n_classes` > 2 and a solver that supports it, returns \"multinomial\".\n For all other cases, in particular binary classification, return \"ovr\".\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 31,
"voc... | 49 | Python | 33 | bb080aa690364d84d11232c73dc8db2f0dde3578 | _logistic.py | 261,494 | 11 | 62 | _check_multi_class | https://github.com/scikit-learn/scikit-learn.git | ENH add newton-cholesky solver to LogisticRegression (#24767) | 122 | 0 | 76,838 | 12 | |
5 | 31 | def get_data(filters, mode_of_payments):
data = []
conditions = get_conditions(filters)
entry = frappe.db.sql(
% (conditions),
as_dict=1,
)
branch_wise_entries, gross_pay = prepare_data(entry)
branches = frappe.db.sql_list(
% (conditions)
)
total_row = {"total": 0, "branch": "Total"}
for bran... | erpnext/payroll/report/salary_payments_based_on_payment_mode/salary_payments_based_on_payment_mode.py | 448 | erpnext | {
"docstring": "\n\t\tselect branch, mode_of_payment, sum(net_pay) as net_pay, sum(gross_pay) as gross_pay\n\t\tfrom `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\tgroup by branch, mode_of_payment\n\t\t\n\t\tselect distinct branch from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t",
"language": "en",
... | 107 | Python | 71 | 494bd9ef78313436f0424b918f200dab8fc7c20b | salary_payments_based_on_payment_mode.py | 66,953 | 45 | 270 | get_data | https://github.com/frappe/erpnext.git | style: format code with black | 72 | 0 | 14,387 | 16 | |
1 | 2 | def attribute_rule(allowed_attrs):
| wagtail/core/whitelist.py | 13 | wagtail | {
"docstring": "\n Generator for functions that can be used as entries in Whitelister.element_rules.\n These functions accept a tag, and modify its attributes by looking each attribute\n up in the 'allowed_attrs' dict defined here:\n * if the lookup fails, drop the attribute\n * if the lookup returns a... | 2 | Python | 2 | d10f15e55806c6944827d801cd9c2d53f5da4186 | whitelist.py | 74,706 | 3 | 10 | attribute_rule | https://github.com/wagtail/wagtail.git | Reformat with black | 5 | 0 | 16,302 | 6 | |
2 | 11 | def get_region_to_control_producer(self) -> KafkaProducer:
if self._publisher is None:
config = settings.KAFKA_TOPICS.get(settings.KAFKA_REGION_TO_CONTROL)
self._publisher = KafkaProducer(
kafka_config.get_kafka_producer_cluster_options(config["cluster"])
... | src/sentry/region_to_control/producer.py | 73 | sentry | {
"docstring": "\n Creates, if necessary, an arroyo.KafkaProducer client configured for region to control communication and returns\n it, caching it for future calls. Installs an exit handler to close the worker thread processes.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 30,
... | 16 | Python | 14 | fe07466a1449a5ae60526528ce7bf9399b59b47d | producer.py | 87,145 | 13 | 53 | get_region_to_control_producer | https://github.com/getsentry/sentry.git | chore(hybrid-cloud): Extract region to control silo into service abstraction (#40353)
1. Use the `silo_mode_delegator` to make the silo conditional sensitive
logic of region to control processing like other services that need to
be conditional based on deployment.
2. Leverage the lifecycle management offered by the... | 78 | 0 | 18,234 | 14 | |
2 | 6 | def dict_from_cookiejar(cj):
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
| .venv/lib/python3.8/site-packages/pip/_vendor/requests/utils.py | 45 | transferlearning | {
"docstring": "Returns a key/value dictionary from a CookieJar.\n\n :param cj: CookieJar object to extract cookies from.\n :rtype: dict\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 17,
"vocab_size": 16
} | 14 | Python | 12 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | utils.py | 63,636 | 5 | 27 | dict_from_cookiejar | https://github.com/jindongwang/transferlearning.git | upd; format | 33 | 0 | 13,432 | 10 | |
4 | 18 | def galois_group(T, max_tries=30, randomize=False):
r
from sympy.combinatorics.named_groups import CyclicGroup
gg = {
3: _galois_group_degree_3,
4: _galois_group_degree_4,
5: _galois_group_degree_5,
}
max_supported = max(gg.keys())
n = T.degree()
if n > max_supported:... | sympy/polys/numberfields/galoisgroups.py | 171 | sympy | {
"docstring": "\n Compute the Galois group for polynomials *T* up to degree 5.\n\n Parameters\n ==========\n\n T : Poly\n Irreducible, monic polynomial over :ref:`ZZ`, whose Galois group\n is to be determined.\n max_tries : int, default 30\n Make at most this many attempts in thos... | 62 | Python | 50 | d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe | galoisgroups.py | 195,681 | 52 | 109 | galois_group | https://github.com/sympy/sympy.git | Add a `galois_group()` function | 133 | 0 | 47,364 | 11 | |
2 | 12 | def seek(self, pos, whence=SEEK_SET):
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
| python3.10.4/Lib/_pyio.py | 69 | XX-Net | {
"docstring": "Move to new file position.\n\n Argument offset is a byte count. Optional argument whence defaults to\n SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values\n are SEEK_CUR or 1 (move relative to current position, positive or negative),\n and SEEK_E... | 17 | Python | 16 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | _pyio.py | 219,892 | 5 | 43 | seek | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 56 | 0 | 55,884 | 10 | |
8 | 19 | def deploy_dask_func(deployer, axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs):
result = deployer(axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs)
ip = get_ip()
if isinstance(result, pandas.DataFrame):
return result, len(result), len(result.columns), ip
elif all(isinstance(r, pan... | modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py | 192 | modin | {
"docstring": "\n Execute a function on an axis partition in a worker process.\n\n This is ALWAYS called on either ``PandasDataframeAxisPartition.deploy_axis_func``\n or ``PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions``, which both\n serve to deploy another dataframe function on a ... | 61 | Python | 36 | d6d503ac7c3028d871c34d9e99e925ddb0746df6 | virtual_partition.py | 154,492 | 9 | 136 | deploy_dask_func | https://github.com/modin-project/modin.git | FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715)
Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Jonathan Shi <jhshi@ponder.io> | 100 | 0 | 36,015 | 14 | |
3 | 11 | def get_used_airflow_sources() -> Path:
current_sources = search_upwards_for_airflow_sources_root(Path.cwd())
if current_sources is None:
current_sources = get_installation_airflow_sources()
if current_sources is None:
warn_non_editable()
sys.exit(1)
return curre... | dev/breeze/src/airflow_breeze/utils/path_utils.py | 88 | @lru_cache(maxsize=None) | airflow | {
"docstring": "\n Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found\n upwards in directory tree or sources where Breeze was installed from.\n :return: the Path for Airflow sources we use.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 36,... | 23 | Python | 15 | bca849b4586c7446438f959b62903da4b997b9ea | path_utils.py | 46,862 | 13 | 43 | get_used_airflow_sources | https://github.com/apache/airflow.git | Switch to `pipx` as the only installation Breeze2 method (#22740)
Switching Breeze2 to only use `pipx` for installation of Breeze2
due to problems it might cause for autocompletion if entrypoint
is not avaiable on PATH. | 70 | 1 | 9,023 | 11 |
1 | 2 | def require_spacy_model(model):
| tests/utils.py | 13 | datasets | {
"docstring": "\n Decorator marking a test that requires a spacy model.\n\n These tests are skipped when they aren't installed.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 16
} | 2 | Python | 2 | 0d9c12ad5155c6d505e70813a07c0aecd7120405 | utils.py | 105,894 | 3 | 10 | require_spacy_model | https://github.com/huggingface/datasets.git | Make torch.Tensor and spacy models cacheable (#5191)
* Make torch.Tensor and spacy models cacheable
* Use newest models
* Address comments
* Small optim | 5 | 0 | 22,215 | 6 | |
6 | 28 | def tutte_polynomial(G):
r
import sympy
x = sympy.Symbol("x")
y = sympy.Symbol("y")
stack = deque()
stack.append(nx.MultiGraph(G))
polynomial = 0
while stack:
G = stack.pop()
bridges = set(nx.bridges(G))
e = None
for i in G.edges:
if (i[0], ... | networkx/algorithms/polynomials.py | 314 | networkx | {
"docstring": "Returns the Tutte polynomial of `G`\n \n This function computes the Tutte polynomial via an iterative version of\n the deletion-contraction algorithm.\n\n The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in\n two variables. It encodes a wide array of informat... | 78 | Python | 59 | f11068c0115ede0c7b631f771c10be7efd0b950b | polynomials.py | 176,426 | 142 | 195 | tutte_polynomial | https://github.com/networkx/networkx.git | Add Tutte polynomial (#5265)
Add a new polynomial module to algorithms for characteristic polynomials.
Adds the Tutte polynomial, which is computed and ultimate represented as a
sympy expression.
Co-authored-by: Dan Schult <dschult@colgate.edu>
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> | 275 | 0 | 41,889 | 15 | |
1 | 7 | def idxmax(self, **kwargs): # noqa: PR02
return DataFrameDefault.register(pandas.DataFrame.idxmax)(self, **kwargs)
| modin/core/storage_formats/base/query_compiler.py | 44 | modin | {
"docstring": "\n Get position of the first occurrence of the maximum for each row or column.\n\n Parameters\n ----------\n axis : {0, 1}\n skipna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n ... | 9 | Python | 9 | 57e29bc5d82348006c5170ef9ac0a9eedcd9acf9 | query_compiler.py | 153,822 | 2 | 26 | idxmax | https://github.com/modin-project/modin.git | REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)
Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu>
Signed-off-by: jeffreykennethli <jkli@ponder.io> | 24 | 0 | 35,637 | 10 | |
1 | 5 | def get_mapped_pr_records():
return frappe._dict(
frappe.db.sql(
)
)
| erpnext/buying/report/procurement_tracker/procurement_tracker.py | 32 | erpnext | {
"docstring": "\n\t\tSELECT\n\t\t\tpr_item.purchase_order_item,\n\t\t\tpr.posting_date\n\t\tFROM `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item\n\t\tWHERE\n\t\t\tpr.docstatus=1\n\t\t\tAND pr.name=pr_item.parent\n\t\t\tAND pr_item.purchase_order_item IS NOT NULL\n\t\t\tAND pr.status not in (\"Closed\",... | 7 | Python | 6 | 494bd9ef78313436f0424b918f200dab8fc7c20b | procurement_tracker.py | 65,569 | 16 | 18 | get_mapped_pr_records | https://github.com/frappe/erpnext.git | style: format code with black | 2 | 0 | 13,945 | 10 | |
1 | 15 | def calc_mean_std(feat, eps=1e-5):
size = feat.size()
assert len(size) == 4, 'The input feature should be 4D tensor.'
b, c = size[:2]
feat_var = feat.view(b, c, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(b, c, 1, 1)
feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
... | modules/codeformer/codeformer_arch.py | 168 | stable-diffusion-webui | {
"docstring": "Calculate mean and std for adaptive_instance_normalization.\n\n Args:\n feat (Tensor): 4D tensor.\n eps (float): A small value added to the variance to avoid\n divide-by-zero. Default: 1e-5.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 25,
"vocab_size":... | 45 | Python | 34 | 6a9b33c848281cb02f38764e4f91ef767f5e3edd | codeformer_arch.py | 152,171 | 8 | 112 | calc_mean_std | https://github.com/AUTOMATIC1111/stable-diffusion-webui.git | codeformer support | 69 | 0 | 35,175 | 13 | |
4 | 17 | def _lg_directed(G, create_using=None):
L = nx.empty_graph(0, create_using, default=G.__class__)
# Create a graph specific edge function.
get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges
for from_node in get_edges():
# from_node is: (u,v) or (u,v,key)
L.ad... | networkx/generators/line.py | 128 | networkx | {
"docstring": "Returns the line graph L of the (multi)digraph G.\n\n Edges in G appear as nodes in L, represented as tuples of the form (u,v)\n or (u,v,key) if G is a multidigraph. A node in L corresponding to the edge\n (u,v) is connected to every node corresponding to an edge (v,w).\n\n Parameters\n ... | 42 | Python | 36 | e308b80f17264b89acf8defe185c71c6656d5105 | line.py | 176,348 | 8 | 82 | _lg_directed | https://github.com/networkx/networkx.git | MAINT: Remove unnecessary helper functions, use inbuilt methods for line graph generator (#5327)
* MAINT: Remove unnecessary helper functions, use inbuilt methods
* Use multigraph key to create node, add tests for multi(di)graphs | 92 | 0 | 41,851 | 11 | |
3 | 5 | def id_for_label(self, id_, index="0"):
if id_ and self.add_id_index:
id_ = "%s_%s" % (id_, index)
return id_
| django/forms/widgets.py | 51 | django | {
"docstring": "\n Use an incremented id for each option where the main widget\n references the zero index.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 14
} | 16 | Python | 14 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | widgets.py | 206,039 | 4 | 30 | id_for_label | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 48 | 0 | 51,334 | 10 | |
14 | 23 | def validate_snuba() -> None:
if not settings.DEBUG:
return
has_all_snuba_required_backends = (
settings.SENTRY_SEARCH
in (
"sentry.search.snuba.EventsDatasetSnubaSearchBackend",
"sentry.utils.services.ServiceDelegator",
)
and settings.SENTRY... | src/sentry/runner/initializer.py | 333 | sentry | {
"docstring": "\n Make sure everything related to Snuba is in sync.\n\n This covers a few cases:\n\n * When you have features related to Snuba, you must also\n have Snuba fully configured correctly to continue.\n * If you have Snuba specific search/tagstore/tsdb backends,\n you must also have a... | 133 | Python | 77 | 2f6716c264bbd916c2773edb8b75cf2e9b26c51b | initializer.py | 85,629 | 98 | 194 | validate_snuba | https://github.com/getsentry/sentry.git | ref: type devserver startup (#38598)
I noticed `sentry devserver 127.0.0.1` produced this error and decided
to prevent it using typing:
```console
$ sentry devserver 127.0.0.1
INFO:The Sentry runner will report development issues to Sentry.io. Use SENTRY_DEVENV_NO_REPORT to avoid reporting issues.
16:33:40 [WAR... | 580 | 0 | 18,018 | 13 | |
2 | 8 | def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
| src/datasets/features/image.py | 86 | datasets | {
"docstring": "If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 13
} | 23 | Python | 23 | 3804442bb7cfcb9d52044d92688115cfdc69c2da | image.py | 104,578 | 11 | 48 | flatten | https://github.com/huggingface/datasets.git | Fix flatten of complex feature types (#3723)
* Flatten Translation and TranslationVariableLanguages
* Add tests
* Style
* Flatten for decodable features
* Fix flatten for non-dict types
* Add test
* Descriptive message in flatten for Audio feature
* Small refactor
* Add flatten to features
*... | 125 | 0 | 21,903 | 12 | |
1 | 9 | def dot(self, other):
from dask.array.routines import tensordot
return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))
| dask/array/core.py | 66 | dask | {
"docstring": "Dot product of self and other.\n\n Refer to :func:`dask.array.tensordot` for full documentation.\n\n See Also\n --------\n dask.array.dot : equivalent function\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 19,
"vocab_size": 19
} | 16 | Python | 15 | 2820bae493a49cb1d0a6e376985c5473b8f04fa8 | core.py | 156,733 | 3 | 45 | dot | https://github.com/dask/dask.git | Don't include docs in ``Array`` methods, just refer to module docs (#9244)
Co-authored-by: James Bourbeau <jrbourbeau@users.noreply.github.com> | 37 | 0 | 36,743 | 12 | |
1 | 3 | def sort(self) -> None:
raise NotImplementedError()
| tools/sort/sort_methods.py | 23 | faceswap | {
"docstring": " Override for method specific logic for sorting the loaded statistics\n\n The scored list :attr:`_result` should be sorted in place\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 19,
"vocab_size": 18
} | 6 | Python | 6 | 98d01760e469fd2108eed8d0b0a1ba6297c3177c | sort_methods.py | 101,615 | 6 | 12 | sort | https://github.com/deepfakes/faceswap.git | Overhaul sort:
- Standardize image data reading and writing
- Optimize loading (just one pass required)
- Make all sort groups binnable (to greater or lesser results)
- Add sort by pitch
- Deprecate multiple options
- linting, docs + locales | 20 | 0 | 21,023 | 7 | |
6 | 14 | def losses(self):
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of
# an eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return ... | keras/engine/base_layer.py | 140 | keras | {
"docstring": "List of losses added using the `add_loss()` API.\n\n Variable regularization tensors are created when this property is\n accessed, so it is eager safe: accessing `losses` under a\n `tf.GradientTape` will propagate gradients back to the corresponding\n variables.\n\n ... | 93 | Python | 71 | fa6d9107a498f7c2403ff28c7b389a1a0c5cc083 | base_layer.py | 277,252 | 16 | 83 | losses | https://github.com/keras-team/keras.git | reduct too long lines | 369 | 0 | 81,916 | 14 | |
2 | 11 | def store_rendered_templates(store, signal, sender, template, context, **kwargs):
store.setdefault("templates", []).append(template)
if "context" not in store:
store["context"] = ContextList()
store["context"].append(copy(context))
| django/test/client.py | 96 | django | {
"docstring": "\n Store templates and contexts that are rendered.\n\n The context is copied so that it is an accurate representation at the time\n of rendering.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 23,
"vocab_size": 21
} | 18 | Python | 18 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | client.py | 206,346 | 5 | 57 | store_rendered_templates | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 37 | 0 | 51,498 | 10 | |
2 | 7 | def test_unicode_idval(self) -> None:
values = [
("", r""),
("ascii", r"ascii"),
("ação", r"a\xe7\xe3o"),
("josé@blah.com", r"jos\xe9@blah.com"),
(
r"δοκ.ιμή@παράδειγμα.δοκιμή",
r"\u03b4\u03bf\u03ba.\u03b9\u03bc... | testing/python/metafunc.py | 135 | pytest | {
"docstring": "Test that Unicode strings outside the ASCII character set get\n escaped, using byte escapes if they're in that range or unicode\n escapes if they're not.\n\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 25,
"vocab_size": 21
} | 39 | Python | 35 | b21b008118fc8cf65b4bcd9b059f1cd704e05c68 | metafunc.py | 190,666 | 21 | 88 | test_unicode_idval | https://github.com/pytest-dev/pytest.git | Refactor idmaker functions into class IdMaker
This commit only refactors, it does not change or add functionality yet. Public
API is retained. Reason or refactoring:
User provided parameter IDs (e.g. Metafunc.parametrize(ids=...)) had so far
only been used to calculate a unique test ID for each test invocation. That
... | 215 | 0 | 46,373 | 14 | |
2 | 7 | def _has_webengine(self) -> bool:
try:
import qutebrowser.qt.webenginewidgets # pylint: disable=unused-import
except ImportError:
return False
return True
| qutebrowser/config/configfiles.py | 40 | qutebrowser | {
"docstring": "Check if QtWebEngine is available.\n\n Note that it's too early to use objects.backend here...\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 14,
"vocab_size": 14
} | 16 | Python | 15 | 218f490484066660dd4e899da600b252f7edd468 | configfiles.py | 321,750 | 10 | 23 | _has_webengine | https://github.com/qutebrowser/qutebrowser.git | Warn on QtWebEngine downgrade and Qt 5 -> 6 upgrade | 67 | 0 | 117,884 | 8 | |
2 | 15 | def tag_resource(self, resource_ids, tags, resource_type="instance"):
request = TagResourcesRequest()
request.set_Tags(tags)
request.set_ResourceType(resource_type)
request.set_ResourceIds(resource_ids)
response = self._send_request(request)
if response is not No... | python/ray/autoscaler/_private/aliyun/utils.py | 117 | ray | {
"docstring": "Create and bind tags to specified ECS resources.\n\n :param resource_ids: The IDs of N resources.\n :param tags: The tags of the resource.\n :param resource_type: The type of the resource.\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 29,
"vocab_size": 19
} | 32 | Python | 26 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | utils.py | 130,357 | 10 | 69 | tag_resource | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 110 | 0 | 29,243 | 11 | |
3 | 22 | async def async_close_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_CLOSE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING... | homeassistant/components/mqtt/cover.py | 150 | core | {
"docstring": "Move the cover down.\n\n This method is a coroutine.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 9,
"vocab_size": 9
} | 35 | Python | 32 | d0c4f0fec4216e4193da716001b5e13e1e3f2106 | cover.py | 308,401 | 16 | 98 | async_close_cover | https://github.com/home-assistant/core.git | Add mqtt encoding support for publishing (#62739)
* encoding support for mqtt publishing - todo tests
* signature allows None values for qos and retain
* common test for mqtt publishing encoding
* better test with command templates
* more tests
* fix tests alarm control panel+tests light basic
* te... | 222 | 0 | 107,158 | 14 | |
4 | 9 | def links(self):
header = self.headers.get("link")
resolved_links = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get("rel") or link.get("url")
resolved_links[key] = link
return resolve... | pipenv/patched/pip/_vendor/requests/models.py | 100 | pipenv | {
"docstring": "Returns the parsed header links of the response, if any.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 27 | Python | 21 | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | models.py | 22,098 | 9 | 57 | links | https://github.com/pypa/pipenv.git | Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | 114 | 0 | 4,177 | 14 | |
1 | 8 | def text(self, body):
text_proto = TextProto()
text_proto.body = clean_text(body)
return self.dg._enqueue("text", text_proto)
| lib/streamlit/elements/text.py | 55 | streamlit | {
"docstring": "Write fixed-width and preformatted text.\n\n Parameters\n ----------\n body : str\n The string to display.\n\n Example\n -------\n >>> st.text('This is some text.')\n\n ",
"language": "en",
"n_whitespaces": 81,
"n_words": 21,
"vocab_s... | 12 | Python | 11 | 72703b38029f9358a0ec7ca5ed875a6b438ece19 | text.py | 118,743 | 4 | 32 | text | https://github.com/streamlit/streamlit.git | Replace static apps with live Cloud apps (#4317)
Co-authored-by: kajarenc <kajarenc@gmail.com> | 40 | 0 | 26,400 | 8 | |
1 | 2 | def baseratio(self):
return self["baseratio"]
| packages/python/plotly/plotly/graph_objs/_funnelarea.py | 22 | plotly.py | {
"docstring": "\n Sets the ratio between bottom length and maximum top length.\n\n The 'baseratio' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n\n Returns\n -------\n int|float\n ",
"language": "en",
"n_whitespaces": ... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _funnelarea.py | 226,825 | 2 | 11 | baseratio | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 58,498 | 7 | |
1 | 16 | def test_sum_distinct_aggregate(self):
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age... | tests/aggregation/tests.py | 132 | django | {
"docstring": "\n Sum on a distinct() QuerySet should aggregate only the distinct items.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | 26 | Python | 24 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | tests.py | 200,894 | 7 | 79 | test_sum_distinct_aggregate | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 82 | 0 | 49,822 | 11 | |
2 | 3 | def test_episodes_unit(self):
self.batch_id = 0
| rllib/utils/replay_buffers/tests/test_reservoir_buffer.py | 21 | ray | {
"docstring": "Tests adding, sampling, get-/set state, and eviction with\n experiences stored by timesteps.",
"language": "en",
"n_whitespaces": 18,
"n_words": 12,
"vocab_size": 12
} | 5 | Python | 5 | acf2bf9b2fa9f6cac8c599ec1eea6a9d5249905f | test_reservoir_buffer.py | 126,148 | 14 | 104 | test_episodes_unit | https://github.com/ray-project/ray.git | [RLlib] Get rid of all these deprecation warnings. (#27085) | 19 | 0 | 28,072 | 7 | |
1 | 2 | def test_presubmit_shortcircuit(ray_start_1_cpu):
| python/ray/util/dask/tests/test_dask_callback.py | 13 | ray | {
"docstring": "\n Test that presubmit return short-circuits task submission, and that task's\n result is set to the presubmit return value.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 18,
"vocab_size": 15
} | 2 | Python | 2 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | test_dask_callback.py | 133,142 | 8 | 43 | test_presubmit_shortcircuit | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 5 | 0 | 29,941 | 6 | |
3 | 17 | def _check_prepopulated_fields_value(self, obj, val, label):
if not isinstance(val, (list, tuple)):
return must_be("a list or tuple", option=label, obj=obj, id="admin.E029")
else:
return list(
chain.from_iterable(
self._check_prepopul... | django/contrib/admin/checks.py | 120 | django | {
"docstring": "Check a value of `prepopulated_fields` dictionary, i.e. it's an\n iterable of existing fields.",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 12
} | 37 | Python | 33 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | checks.py | 203,345 | 12 | 78 | _check_prepopulated_fields_value | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 201 | 0 | 50,319 | 16 | |
5 | 31 | def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r
requires_backends(cls, "pyctcdecode")
from pyctcdecode import BeamSearchDecoderCTC
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)
tokenizer = Wav2Vec2CTCTo... | src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py | 321 | transformers | {
"docstring": "\n Instantiate a [`Wav2Vec2ProcessorWithLM`] from a pretrained Wav2Vec2 processor.\n\n <Tip>\n\n This class method is simply calling Wav2Vec2FeatureExtractor's\n [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], Wav2Vec2CTCTokenizer's\n [`~tokeniz... | 137 | Python | 100 | efb35a4107478f7d2ebcf56572c0967e68536e15 | processing_wav2vec2_with_lm.py | 33,998 | 56 | 194 | from_pretrained | https://github.com/huggingface/transformers.git | [Wav2Vec2ProcessorWithLM] improve decoder downlaod (#15040) | 445 | 0 | 6,183 | 12 | |
2 | 8 | async def follower_loop(self):
try:
await self._connect_to_leaders()
except Exception as e:
logger.error("Exception occurred in follower loop: ")
logger.exception(e)
| freqtrade/rpc/replicate/__init__.py | 60 | freqtrade | {
"docstring": "\n Main follower coroutine\n\n This starts all of the leader connection coros\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 11,
"vocab_size": 11
} | 17 | Python | 17 | 9f6bba40af1a407f190a89f5c0c8b4e3f528ba46 | __init__.py | 150,412 | 6 | 31 | follower_loop | https://github.com/freqtrade/freqtrade.git | initial concept for replicate, basic leader and follower logic | 71 | 0 | 34,736 | 11 | |
1 | 4 | def coverage_ratio(self) -> float:
return self._coverage_ratio
| scripts/convert.py | 22 | faceswap | {
"docstring": " float: The coverage ratio that the model was trained at. ",
"language": "en",
"n_whitespaces": 11,
"n_words": 10,
"vocab_size": 10
} | 6 | Python | 6 | 1022651eb8a7741014f5d2ec7cbfe882120dfa5f | convert.py | 101,373 | 3 | 12 | coverage_ratio | https://github.com/deepfakes/faceswap.git | Bugfix: convert - Gif Writer
- Fix non-launch error on Gif Writer
- convert plugins - linting
- convert/fs_media/preview/queue_manager - typing
- Change convert items from dict to Dataclass | 20 | 0 | 20,788 | 6 | |
1 | 6 | def test_standard_get_document_model_string(self):
del settings.WAGTAILDOCS_DOCUMENT_MODEL
self.assertEqual(get_document_model_string(), "wagtaildocs.Document")
| wagtail/documents/tests/test_models.py | 37 | wagtail | {
"docstring": "Test get_document_model_string with no WAGTAILDOCS_DOCUMENT_MODEL",
"language": "en",
"n_whitespaces": 4,
"n_words": 5,
"vocab_size": 5
} | 6 | Python | 6 | d10f15e55806c6944827d801cd9c2d53f5da4186 | test_models.py | 74,850 | 3 | 20 | test_standard_get_document_model_string | https://github.com/wagtail/wagtail.git | Reformat with black | 27 | 0 | 16,328 | 9 | |
1 | 4 | def get_denominations() -> Dict[DENOMINATION, float]:
return {
"Trillions": 1_000_000_000_000,
"Billions": 1_000_000_000,
"Millions": 1_000_000,
"Thousands": 1_000,
"Units": 1,
}
| openbb_terminal/helpers_denomination.py | 61 | OpenBBTerminal | {
"docstring": "Gets all supported denominations and their lower bound value\n\n Returns:\n Dict[DENOMINATION, int]: All supported denominations and their lower bound value\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 20,
"vocab_size": 13
} | 18 | Python | 18 | 07c08df84e2af99be4ee32ab276128cafb9e7986 | helpers_denomination.py | 285,833 | 13 | 35 | get_denominations | https://github.com/OpenBB-finance/OpenBBTerminal.git | Bug/2583 (#2671)
* #2583 [CT] Add and use denomination helper
* #2583 [CT] Fix Yahoo Finance denomination
* #2583 [CT] Fix typings for dict
* #2583 [CT] Add YF model get financials tests
* #2583 [CT] Fix stubbed currency
* #2583 [CT] Add test coverage for denomination helpers
* #2583 [CT] Fix YF view... | 62 | 0 | 85,447 | 8 | |
5 | 10 | def update_parent_account_names(accounts):
name_to_account_map = {}
for d in accounts:
if d.account_number:
account_name = d.account_number + " - " + d.account_name
else:
account_name = d.account_name
name_to_account_map[d.name] = account_name
for account in accounts:
if account.parent_account:
... | erpnext/accounts/report/consolidated_financial_statement/consolidated_financial_statement.py | 118 | erpnext | {
"docstring": "Update parent_account_name in accounts list.\n\n\tparent_name is `name` of parent account which could have other prefix\n\tof account_number and suffix of company abbr. This function adds key called\n\t`parent_account_name` which does not have such prefix/suffix.\n\t",
"language": "en",
"n_whitesp... | 38 | Python | 25 | 494bd9ef78313436f0424b918f200dab8fc7c20b | consolidated_financial_statement.py | 65,199 | 12 | 71 | update_parent_account_names | https://github.com/frappe/erpnext.git | style: format code with black | 26 | 0 | 13,822 | 13 | |
12 | 15 | def test_cluster_interrupt_searcher(start_connected_cluster, tmpdir, searcher):
cluster = start_connected_cluster
dirpath = str(tmpdir)
local_checkpoint_dir = os.path.join(dirpath, "experiment")
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
| python/ray/tune/tests/test_cluster_searcher.py | 72 | ray | {
"docstring": "Tests restoration of HyperOptSearch experiment on cluster shutdown\n with actual interrupt.\n\n Restoration should restore both state of trials\n and previous search algorithm (HyperOptSearch) state.\n This is an end-to-end test.\n ",
"language": "en",
"n_whitespaces": 44,
"n_word... | 20 | Python | 18 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | test_cluster_searcher.py | 132,452 | 60 | 313 | test_cluster_interrupt_searcher | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 38 | 0 | 29,762 | 9 | |
1 | 3 | def __invert__(self):
return NotAny(self)
| .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 21 | transferlearning | {
"docstring": "\n Implementation of ~ operator - returns :class:`NotAny`\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | 4 | Python | 4 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | pyparsing.py | 63,387 | 2 | 11 | __invert__ | https://github.com/jindongwang/transferlearning.git | upd; format | 18 | 0 | 13,282 | 7 | |
1 | 2 | def solidity(self):
return self["solidity"]
| packages/python/plotly/plotly/graph_objs/bar/marker/_pattern.py | 22 | plotly.py | {
"docstring": "\n Sets the solidity of the pattern fill. Solidity is roughly the\n fraction of the area filled by the pattern. Solidity of 0 shows\n only the background color without pattern and solidty of 1\n shows only the foreground color without pattern.\n\n The 'solidity' prop... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _pattern.py | 228,784 | 2 | 11 | solidity | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 60,457 | 7 | |
5 | 13 | def extract_data(self, response):
try:
data = response.json()
except ValueError as e: # If there was no json to parse
data = {}
if response.text or response.status_code not in (200, 202, 204):
text = response.text
if len(text)... | awxkit/awxkit/api/pages/page.py | 137 | awx | {
"docstring": "Takes a `requests.Response` and returns a data dict.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 7
} | 60 | Python | 50 | 68a44529b6b77d2d43d7099b654560bfd8bbf518 | page.py | 81,964 | 11 | 83 | extract_data | https://github.com/ansible/awx.git | Register pages for the Instance peers and install bundle endpoints
This includes exposing a new interface for Page objects, Page.bytes,
to return the full bytestring contents of the response. | 186 | 0 | 17,284 | 16 | |
1 | 9 | def add_to_apply_calls(self, func, *args, **kwargs):
return PandasOnPythonDataframePartition(
self._data.copy(),
call_queue=self.call_queue + [(func, args, kwargs)],
)
| modin/core/execution/python/implementations/pandas_on_python/partitioning/partition.py | 63 | modin | {
"docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Addit... | 14 | Python | 14 | 4ec7f6347903f9133c65ebc5b6e0e15553b98577 | partition.py | 153,874 | 5 | 42 | add_to_apply_calls | https://github.com/modin-project/modin.git | REFACTOR-#4530: Standardize access to physical data in partitions (#4563)
Signed-off-by: Alexey Prutskov <lehaprutskov@gmail.com> | 57 | 0 | 35,677 | 11 | |
1 | 6 | def callback_data(self) -> JSONData:
return json.loads(self.data["callback_id"])
| src/sentry/integrations/slack/requests/action.py | 36 | sentry | {
"docstring": "\n We store certain data in ``callback_id`` as JSON. It's a bit hacky, but\n it's the simplest way to store state without saving it on the Sentry\n side.\n\n Data included in this field:\n - issue: the ID of the corresponding Issue\n - orig_response_ur... | 6 | Python | 6 | 10fbaf4b856f85879611d50b714fa47eb4a358c3 | action.py | 88,279 | 12 | 20 | callback_data | https://github.com/getsentry/sentry.git | ref: add src/sentry/utils/json.py to mypy.ini (#41133)
first commit I sorted some of the mypy files (separated out to make the
diff of the second commit easier to follow) | 20 | 0 | 18,370 | 9 | |
8 | 42 | def test_overlap_first(business_client, setup_before_upload, show_overlap_first):
c = business_client
config = dict(
title='test_overlap_first',
is_published=True,
maximum_annotations=1,
show_overlap_first=show_overlap_first,
sampling="Uniform sampling",
label_con... | label_studio/tests/test_next_task.py | 474 | label-studio | {
"docstring": "\n <View>\n <Text name=\"text\" value=\"$text\"></Text>\n <Choices name=\"text_class\" choice=\"single\">\n <Choice value=\"class_A\"></Choice>\n <Choice value=\"class_B\"></Choice>\n </Choices>\n </View>",
"l... | 122 | Python | 84 | 35125cca12ba1e8703c4284894e4e2db44ce7009 | test_next_task.py | 177,582 | 63 | 396 | test_overlap_first | https://github.com/heartexlabs/label-studio.git | fix: DEV-1348: Fix _rearrange_overlap_cohort filter condition for overlap bulk update with concurrent import (#1844)
* [fix] Rearrange overlap depending in annotations count
* Fix next task test for not random overlap assignment
* Delete unused method
* Rename rearrange method to have back compatibility
* ... | 377 | 0 | 42,449 | 17 | |
5 | 18 | def get_variant(template, args=None, variant=None, manufacturer=None, manufacturer_part_no=None):
item_template = frappe.get_doc("Item", template)
if item_template.variant_based_on == "Manufacturer" and manufacturer:
return make_variant_based_on_manufacturer(item_template, manufacturer, manufacturer_part_no)
el... | erpnext/controllers/item_variant.py | 143 | erpnext | {
"docstring": "Validates Attributes and their Values, then looks for an exactly\n\tmatching Item Variant\n\n\t:param item: Template Item\n\t:param args: A dictionary with \"Attribute\" as key and \"Attribute Value\" as value\n\t",
"language": "en",
"n_whitespaces": 26,
"n_words": 30,
"vocab_size": 26
} | 44 | Python | 40 | 494bd9ef78313436f0424b918f200dab8fc7c20b | item_variant.py | 65,637 | 10 | 90 | get_variant | https://github.com/frappe/erpnext.git | style: format code with black | 34 | 0 | 13,965 | 15 | |
1 | 30 | def replaceHTMLEntity(t):
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment =... | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 347 | transferlearning | {
"docstring": "Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or\nquoted strings, separated by commas.\n\nThis expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n",
"language": "en"... | 141 | Python | 91 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | pyparsing.py | 63,296 | 2 | 15 | replaceHTMLEntity | https://github.com/jindongwang/transferlearning.git | upd; format | 207 | 0 | 13,236 | 21 | |
1 | 7 | def test_reading_jsonl_dataset_should_be_successful(tasks_base_path):
dataset = JsonlDataset(tasks_base_path / "jsonl/train.jsonl")
assert len(dataset.sentences) == 5
assert dataset.sentences[0].to_tagged_string() == "This is New <B-LOC> Berlin <I-LOC>"
assert dataset.sentences[1].to_tagged_string... | tests/test_datasets.py | 133 | flair | {
"docstring": "\n Tests reading a JsonlDataset containing multiple tagged entries\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 59 | Python | 37 | a3120b5179f51308d4c0c1f4865873debb566bbd | test_datasets.py | 214,487 | 10 | 77 | test_reading_jsonl_dataset_should_be_successful | https://github.com/flairNLP/flair.git | refactor: :recycle: make label_type configurable for Jsonl corpora | 97 | 0 | 53,743 | 11 | |
1 | 8 | def rands(nchars) -> str:
return "".join(np.random.choice(RANDS_CHARS, nchars))
| pandas/_testing/_random.py | 42 | pandas | {
"docstring": "\n Generate one random byte string.\n\n See `rands_array` if you want to create an array of random strings.\n\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 16
} | 7 | Python | 7 | f538568afc2c76c2d738d32e3544cf9fe6742960 | _random.py | 167,582 | 8 | 24 | rands | https://github.com/pandas-dev/pandas.git | TYP: misc return type annotations (#47558) | 13 | 0 | 40,041 | 10 | |
1 | 8 | def test_get_action(self):
action_name = "delete_selected"
self.assertEqual(self.site.get_action(action_name), delete_selected)
self.site.disable_action(action_name)
self.assertEqual(self.site.get_action(action_name), delete_selected)
| tests/admin_views/test_adminsite.py | 79 | django | {
"docstring": "AdminSite.get_action() returns an action even if it's disabled.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 10 | Python | 8 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | test_adminsite.py | 207,506 | 5 | 47 | test_get_action | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 45 | 0 | 51,992 | 10 | |
1 | 2 | def arrowsize(self):
return self["arrowsize"]
| packages/python/plotly/plotly/graph_objs/layout/_annotation.py | 22 | plotly.py | {
"docstring": "\n Sets the size of the end annotation arrow head, relative to\n `arrowwidth`. A value of 1 (default) gives a head about 3x as\n wide as the line.\n\n The 'arrowsize' property is a number and may be specified as:\n - An int or float in the interval [0.3, inf]\n\n ... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _annotation.py | 230,881 | 2 | 11 | arrowsize | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 62,554 | 7 | |
39 | 80 | def get_basic_details(args, item, overwrite_warehouse=True):
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
item_defaults = get_item_defaults(item.name, args.company)
item_group_defaults = get_item_group_defaults(item.name, args.company)
... | erpnext/stock/get_item_details.py | 1,809 | erpnext | {
"docstring": "\n\t:param args: {\n\t \"item_code\": \"\",\n\t \"warehouse\": None,\n\t \"customer\": \"\",\n\t \"conversion_rate\": 1.0,\n\t \"selling_price_list\": None,\n\t \"price_list_currency\": None,\n\t \... | 468 | Python | 274 | 494bd9ef78313436f0424b918f200dab8fc7c20b | get_item_details.py | 67,797 | 142 | 1,097 | get_basic_details | https://github.com/frappe/erpnext.git | style: format code with black | 322 | 0 | 14,620 | 15 | |
1 | 4 | def outer_size(self) -> Size:
return self._size
| src/textual/widget.py | 22 | textual | {
"docstring": "The size of the widget (including padding and border).",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 6 | Python | 6 | 0ba3ffb1718bdd01a5136fd1bc30e8ed58e6a47c | widget.py | 184,066 | 3 | 12 | outer_size | https://github.com/Textualize/textual.git | size properties | 20 | 0 | 44,455 | 6 | |
2 | 54 | def test_retina_sepbn_head_loss(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
... | tests/test_models/test_dense_heads/test_retina_sepBN_head.py | 592 | mmdetection | {
"docstring": "Tests RetinaSepBN head loss when truth is empty and non-empty.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 216 | Python | 136 | 665b55f6768dd0c2c32f8e73cd3069eddc1677b0 | test_retina_sepBN_head.py | 245,233 | 51 | 364 | test_retina_sepbn_head_loss | https://github.com/open-mmlab/mmdetection.git | [Refactor] Refactor NAS-FPN and anchor-free | 929 | 0 | 70,717 | 16 | |
2 | 10 | def transpose(self) -> Tuple[int, int]:
if self.transpose_method is not None:
# Safety: `transpose` takes an int rather than e.g. an IntEnum.
# self.transpose_method is set above to be a value in
# EXIF_TRANSPOSE_MAPPINGS, and that only contains correct values.
... | synapse/rest/media/v1/thumbnailer.py | 125 | synapse | {
"docstring": "Transpose the image using its EXIF Orientation tag\n\n Returns:\n A tuple containing the new image size in pixels as (width, height).\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 21,
"vocab_size": 19
} | 66 | Python | 53 | 5949ab86f8db0ef3dac2063e42210030f17786fb | thumbnailer.py | 248,471 | 13 | 74 | transpose | https://github.com/matrix-org/synapse.git | Fix potential thumbnail memory leaks. (#12932) | 191 | 0 | 72,299 | 13 | |
1 | 12 | def test_get_command_line(self):
mock_context = MagicMock()
mock_context.parent.command_path = "streamlit"
with patch("click.get_current_context", return_value=mock_context):
with patch("click.get_os_args", return_value=["os_arg1", "os_arg2"]):
result = cli._... | lib/tests/streamlit/cli_test.py | 108 | streamlit | {
"docstring": "Test that _get_command_line_as_string correctly concatenates values\n from click.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 8,
"vocab_size": 8
} | 22 | Python | 19 | 5f39da13c0c551533a6d313dd0e2f6f9f0f9a5ac | cli_test.py | 118,707 | 7 | 57 | test_get_command_line | https://github.com/streamlit/streamlit.git | Get rid of preheated script runs (#4259)
* Get rid of preheated script runs
When a streamlit server is first started, we currently trigger a run of the
script defining an app and save the resulting deltas so that the very first
page load of an app can be more or less instantaneous.
This optimization is current... | 91 | 0 | 26,370 | 14 | |
18 | 42 | def get_positions_from_labels(self, row_loc, col_loc):
from modin.pandas.indexing import (
is_boolean_array,
is_list_like,
is_range_like,
boolean_mask_to_numeric,
)
lookups = []
for axis, axis_loc in enumerate((row_loc, col_loc)):... | modin/core/storage_formats/base/query_compiler.py | 557 | modin | {
"docstring": "\n Compute index and column positions from their respective locators.\n\n Inputs to this method are arguments the the pandas user could pass to loc.\n This function will compute the corresponding index and column positions\n that the user could equivalently pass to iloc.\n\... | 274 | Python | 160 | dc7abf04518230d102bb5272c5ebf9fe20092338 | query_compiler.py | 155,388 | 58 | 353 | get_positions_from_labels | https://github.com/modin-project/modin.git | REFACTOR-#5202: Pass loc arguments to query compiler. (#5305)
Some Modin implementations may prefer to take rows and columns by label rather than by position.
Signed-off-by: mvashishtha <mahesh@ponder.io> | 1,449 | 0 | 36,372 | 21 | |
1 | 5 | def test_get_stored_cert_serials(certutil, populate_store):
serials = certutil.get_stored_cert_serials("TrustedPublisher")
assert "5be1cc5d51b78dbd49a0b7c00d44806d" in serials
| tests/pytests/functional/modules/test_win_certutil.py | 38 | salt | {
"docstring": "\n Test get_stored_cert_serials with a certificate we put in\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 10 | Python | 9 | a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857 | test_win_certutil.py | 215,907 | 3 | 20 | test_get_stored_cert_serials | https://github.com/saltstack/salt.git | Add tests, fix state module | 19 | 0 | 54,240 | 9 | |
15 | 13 | def _generate_sparse6_bytes(G, nodes, header):
n = len(G)
if n >= 2**36:
raise ValueError(
"sparse6 is only defined if number of nodes is less " "than 2 ** 36"
)
if header:
yield b">>sparse6<<"
yield b":"
for d in n_to_data(n):
yield str.encode(chr(d ... | networkx/readwrite/sparse6.py | 122 | networkx | {
"docstring": "Yield bytes in the sparse6 encoding of a graph.\n\n `G` is an undirected simple graph. `nodes` is the list of nodes for\n which the node-induced subgraph will be encoded; if `nodes` is the\n list of all nodes in the graph, the entire graph will be\n encoded. `header` is a Boolean that spec... | 55 | Python | 44 | f6755ffa00211b523c6c0bec5398bc6c3c43c8b1 | sparse6.py | 176,499 | 49 | 393 | _generate_sparse6_bytes | https://github.com/networkx/networkx.git | Update black (#5438)
* CI: sync up black dev requirements version with precommit
* Run black
Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com> | 125 | 0 | 41,938 | 13 | |
3 | 29 | def load_linnerud(*, return_X_y=False, as_frame=False):
data_filename = "linnerud_exercise.csv"
target_filename = "linnerud_physiological.csv"
# Read header and data
with _open_text(DATA_MODULE, data_filename) as f:
header_exercise = f.readline().split()
f.seek(0) # reset file obj... | sklearn/datasets/_base.py | 284 | scikit-learn | {
"docstring": "Load and return the physical exercise Linnerud dataset.\n\n This dataset is suitable for multi-output regression tasks.\n\n ============== ============================\n Samples total 20\n Dimensionality 3 (for both data and target)\n Features integer\n Targets ... | 85 | Python | 58 | f2c78fe8c5cf2576f8351238c55dace23fb1d691 | _base.py | 261,741 | 34 | 178 | load_linnerud | https://github.com/scikit-learn/scikit-learn.git | MAINT handle deprecations from `importlib.resources` (#25157)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com> | 304 | 0 | 76,971 | 12 | |
19 | 61 | def train_epoch(self, iterator, info=None, num_steps=None, epoch_idx=0):
| python/ray/util/sgd/torch/training_operator.py | 177 | """Runs one standard training pass over the training dataloader.
Bythis method will iterate over the givencall ``self.train_batch`` over each batch. Ifscheduler_step_freqis set, this default method will also step the scheduler accordingly.
You do not need to call ``train_batch`` in this method if you ... | ray | {
"docstring": "Runs one standard training pass over the training dataloader.\n\n By default, this method will iterate over the given iterator and\n call ``self.train_batch`` over each batch. If ``scheduler_step_freq``\n is set, this default method will also step the scheduler accordingly.\n\n ... | 6 | Python | 6 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | training_operator.py | 133,361 | 46 | 318 | train_epoch | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 13 | 11 | 29,990 | 12 |
3 | 13 | def solve_linear_system_LU(matrix, syms):
if matrix.rows != matrix.cols - 1:
raise ValueError("Rows should be equal to columns - 1")
A = matrix[:matrix.rows, :matrix.rows]
b = matrix[:, matrix.cols - 1:]
soln = A.LUsolve(b)
solutions = {}
for i in range(soln.rows):
solutions... | sympy/solvers/solvers.py | 140 | sympy | {
"docstring": "\n Solves the augmented matrix system using ``LUsolve`` and returns a\n dictionary in which solutions are keyed to the symbols of *syms* as ordered.\n\n Explanation\n ===========\n\n The matrix must be invertible.\n\n Examples\n ========\n\n >>> from sympy import Matrix, solve_... | 44 | Python | 36 | 59d22b6bb7287613d598611027f640d068ca5748 | solvers.py | 196,428 | 10 | 89 | solve_linear_system_LU | https://github.com/sympy/sympy.git | Moved imports to higher level | 82 | 0 | 47,928 | 10 | |
1 | 2 | def args2(self):
return self["args2"]
| packages/python/plotly/plotly/graph_objs/layout/updatemenu/_button.py | 22 | plotly.py | {
"docstring": "\n Sets a 2nd set of `args`, these arguments values are passed to\n the Plotly method set in `method` when clicking this button\n while in the active state. Use this to create toggle buttons.\n\n The 'args2' property is an info array that may be specified as... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _button.py | 232,763 | 2 | 11 | args2 | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 64,207 | 7 | |
1 | 16 | def test_suppresses_second_cancellation(self):
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = delay_cancellation(deferred)
# Cancel the new `Deferred`, twice.
wrapper_deferred.cancel()
wrapper_deferred.cancel()
self.assertNoResult(wrapper_deferred)
... | tests/util/test_async_helpers.py | 133 | synapse | {
"docstring": "Test that a second cancellation is suppressed.\n\n Identical to `test_cancellation` except the new `Deferred` is cancelled twice.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 16
} | 69 | Python | 55 | 90b2327066d2343faa86c464a182b6f3c4422ecd | test_async_helpers.py | 247,579 | 12 | 72 | test_suppresses_second_cancellation | https://github.com/matrix-org/synapse.git | Add `delay_cancellation` utility function (#12180)
`delay_cancellation` behaves like `stop_cancellation`, except it
delays `CancelledError`s until the original `Deferred` resolves.
This is handy for unifying cleanup paths and ensuring that uncancelled
coroutines don't use finished logcontexts.
Signed-off-by: Sea... | 192 | 0 | 71,755 | 10 | |
3 | 18 | def do_extends(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
... | django/template/loader_tags.py | 166 | @register.tag("include") | django | {
"docstring": "\n Signal that this template extends a parent template.\n\n This tag may be used in two ways: ``{% extends \"base\" %}`` (with quotes)\n uses the literal value \"base\" as the name of the parent template to extend,\n or ``{% extends variable %}`` uses the value of ``variable`` as either th... | 48 | Python | 42 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | loader_tags.py | 206,284 | 12 | 94 | do_extends | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 103 | 1 | 51,466 | 11 |
2 | 20 | def get_temp_export_dir(timestamped_export_dir):
(dirname, basename) = os.path.split(timestamped_export_dir)
if isinstance(basename, bytes):
str_name = basename.decode("utf-8")
else:
str_name = str(basename)
temp_export_dir = tf.io.gfile.join(
tf.compat.as_bytes(dirname),
... | keras/saving/utils_v1/export_utils.py | 132 | keras | {
"docstring": "Builds a directory name based on the argument but starting with 'temp-'.\n\n This relies on the fact that TensorFlow Serving ignores subdirectories of\n the base directory that can't be parsed as integers.\n\n Args:\n timestamped_export_dir: the name of the eventual export directory, e.g... | 24 | Python | 19 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | export_utils.py | 276,298 | 11 | 80 | get_temp_export_dir | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 73 | 0 | 81,620 | 13 | |
1 | 20 | def test_callbacks(self) -> None:
cache: DeferredCache[str, int] = DeferredCache("test")
callbacks = set()
# start with an entry, with a callback
cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
# now replace that entry with a pending result
o... | tests/util/caches/test_deferred_cache.py | 300 | synapse | {
"docstring": "Invalidation callbacks are called at the right time",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 140 | Python | 100 | 4ae967cf6308e80b03da749f0cbaed36988e235e | test_deferred_cache.py | 249,857 | 16 | 171 | test_callbacks | https://github.com/matrix-org/synapse.git | Add missing type hints to test.util.caches (#14529) | 315 | 0 | 73,173 | 13 | |
3 | 9 | def components(self) -> Dict[str, BaseComponent]:
all_components = self._find_all_components()
return {component.name: component for component in all_components if component.name is not None}
| haystack/pipelines/base.py | 61 | haystack | {
"docstring": "\n Returns all components used by this pipeline.\n Note that this also includes such components that are being utilized by other components only and are not being used as a pipeline node directly.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 32,
"vocab_size": 24
... | 20 | Python | 18 | f6e3a639063887f9f5b27f574a04c7fe602b3185 | base.py | 257,346 | 7 | 39 | components | https://github.com/deepset-ai/haystack.git | Prevent losing names of utilized components when loaded from config (#2525)
* Prevent losing names of utilized components when loaded from config
* Update Documentation & Code Style
* update test
* fix failing tests
* Update Documentation & Code Style
* fix even more tests
* Update Documentation & Co... | 41 | 0 | 75,070 | 9 | |
3 | 12 | def set_xcomargs_dependencies(self) -> None:
from airflow.models.xcom_arg import XComArg
for field in self.template_fields:
if hasattr(self, field):
arg = getattr(self, field)
XComArg.apply_upstream_relationship(self, arg)
| airflow/models/baseoperator.py | 73 | airflow | {
"docstring": "\n Resolves upstream dependencies of a task. In this way passing an ``XComArg``\n as value for a template field will result in creating upstream relation between\n two tasks.\n\n **Example**: ::\n\n with DAG(...):\n generate_content = GenerateConte... | 21 | Python | 21 | 10f5db863e387c0fd7369cf521d624b6df77a65d | baseoperator.py | 44,076 | 26 | 47 | set_xcomargs_dependencies | https://github.com/apache/airflow.git | Set dependencies in MappedOperator via XComArgs (#20931)
Co-authored-by: Kaxil Naik <kaxilnaik@gmail.com>
Co-authored-by: Ephraim Anierobi <splendidzigy24@gmail.com> | 83 | 0 | 8,139 | 12 | |
1 | 13 | def _hyab(self, y_true, y_pred):
delta = y_true - y_pred
root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None))
delta_norm = frobenius_norm(delta[..., 1:3])
return root + delta_norm
| lib/model/loss/perceptual_loss_plaid.py | 97 | faceswap | {
"docstring": " Compute the HyAB distance between true and predicted images.\n\n Parameters\n ----------\n y_true: :class:`plaidml.tile.Value`\n The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space\n y_pred: :class:`plaidml.tile.Value`\n T... | 24 | Python | 20 | 582c2ce40c11ef235dd3f9100f70e1e2832f8dd3 | perceptual_loss_plaid.py | 101,059 | 5 | 65 | _hyab | https://github.com/deepfakes/faceswap.git | Add Flip Loss Function
- Add Flip for AMD and TF
- Split Perceptual Loss functions to own modules
- Fix allowed input shape for models
- Allow GUI tooltip to display at higher width | 59 | 0 | 20,496 | 14 | |
1 | 25 | def test_valid_full_refresh_read_no_slices(mocker):
stream_output = [{"k1": "v1"}, {"k2": "v2"}]
s1 = MockStream([({"sync_mode": SyncMode.full_refresh}, stream_output)], name="s1")
s2 = MockStream([({"sync_mode": SyncMode.full_refresh}, stream_output)], name="s2")
mocker.patch.object(MockStream, "... | airbyte-cdk/python/unit_tests/sources/test_abstract_source.py | 256 | airbyte | {
"docstring": "Tests that running a full refresh sync on streams which don't specify slices produces the expected AirbyteMessages",
"language": "en",
"n_whitespaces": 16,
"n_words": 17,
"vocab_size": 17
} | 51 | Python | 39 | f83eca58eaf2129d21b5796a301732ab22675130 | test_abstract_source.py | 3,357 | 12 | 156 | test_valid_full_refresh_read_no_slices | https://github.com/airbytehq/airbyte.git | CDK: Fix typing errors (#9037)
* fix typing, drop AirbyteLogger
* format
* bump the version
* use logger instead of fixture logger
Co-authored-by: Eugene Kulak <kulak.eugene@gmail.com>
Co-authored-by: auganbay <auganenu@gmail.com> | 91 | 0 | 459 | 13 | |
6 | 18 | def depth_first_search(self):
if self.isSolvable() == False:
return (None, None)
closed = list()
q = list()
q.append(Node(state=self.state, depth=0))
while q:
node = q.pop()
if node.isGoalState():
return (node.moves, le... | Eight_Puzzle_Solver/eight_puzzle.py | 190 | Python | {
"docstring": "\n Parameters: State\n Returns: List of Moves to solve the state, otherwise None if unsolvable\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 14
} | 41 | Python | 31 | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | eight_puzzle.py | 22,419 | 15 | 118 | depth_first_search | https://github.com/geekcomputers/Python.git | refactor: clean code
Signed-off-by: slowy07 <slowy.arfy@gmail.com> | 198 | 0 | 4,325 | 15 | |
3 | 20 | def update(self) -> bool:
try:
# Add or remove DeploymentReplica instances in self._replicas.
# This should be the only place we adjust total number of replicas
# we manage.
running_replicas_changed = self._scale_deployment_replicas()
# Chec... | python/ray/serve/_private/deployment_state.py | 138 | ray | {
"docstring": "Attempts to reconcile this deployment to match its goal state.\n\n This is an asynchronous call; it's expected to be called repeatedly.\n\n Also updates the internal DeploymentStatusInfo based on the current\n state of the system.\n\n Returns true if this deployment was suc... | 70 | Python | 56 | 65d0c0aa48be8f9f7faae857d3ab71444997755a | deployment_state.py | 128,240 | 24 | 72 | update | https://github.com/ray-project/ray.git | [Serve] add alpha gRPC support (#28175) | 279 | 0 | 28,641 | 17 | |
7 | 50 | def _finished_processing(self) -> None:
assert self.logcontext is not None
assert self.finish_time is not None
usage = self.logcontext.get_resource_usage()
if self._processing_finished_time is None:
# we completed the request without anything calling processing()
... | synapse/http/site.py | 432 | synapse | {
"docstring": "Log the completion of this request and update the metrics",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 250 | Python | 168 | d8df8e6c1432d25ea1c0310a5f2dc48d1688345f | site.py | 246,117 | 46 | 262 | _finished_processing | https://github.com/matrix-org/synapse.git | Don't print HTTPStatus.* in "Processed..." logs (#11827)
* Don't print HTTPStatus.* in "Processed..." logs
Fixes #11812. See also #7118 and
https://github.com/matrix-org/synapse/pull/7188#r401719326 in
particular.
Co-authored-by: Brendan Abolivier <babolivier@matrix.org> | 769 | 0 | 71,021 | 11 | |
1 | 7 | def test_submit_with_logs_instant_job(self, ray_start_stop):
cmd = "echo hello"
stdout, _ = _run_cmd(f"ray job submit -- bash -c '{cmd}'")
assert "hello" in stdout
| dashboard/modules/job/tests/test_cli_integration.py | 49 | ray | {
"docstring": "Should exit immediately and print logs even if job returns instantly.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | 21 | Python | 20 | 813e1a857d5dfc060b3b6cb846157fdca425e6b0 | test_cli_integration.py | 134,271 | 4 | 24 | test_submit_with_logs_instant_job | https://github.com/ray-project/ray.git | Revert "Revert "[Job Submission][refactor 5/N] Remove the head node dependency on the `Raylet` process"" (#29008)
Reverts #28931 and fixes the tests that were made flaky by that PR.
Fix address="auto" in cpp job test (fixed by @Catch-Bull )
Fix len_new_owner_port flakiness in test_sdk(fixed by @Catch-Bull )
Fi... | 49 | 0 | 30,235 | 10 | |
3 | 21 | def serialize_model_as_bytecode(model):
# Note: we don't use a RAM path for this because zipfile cannot write
# to such paths.
temp_dir = tempfile.mkdtemp()
try:
filepath = os.path.join(temp_dir, "model.keras")
saving_lib.save_model(model, filepath)
with open(filepath, "rb")... | keras/saving/pickle_utils.py | 134 | keras | {
"docstring": "Convert a Keras Model into a bytecode representation for pickling.\n\n Args:\n model: Keras Model instance.\n\n Returns:\n Tuple that can be read by `deserialize_from_bytecode`.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 23,
"vocab_size": 20
} | 49 | Python | 44 | 2ed044d06d0ae552477672aa8b778f8edafb52f1 | pickle_utils.py | 279,795 | 13 | 75 | serialize_model_as_bytecode | https://github.com/keras-team/keras.git | Use new saving logic for pickling. This is somewhat cleaner since it restores the exact same model (no usage of traces). It may however be less convenient since it requires get_config() to be implemented and the use of a custom_object_scope.
PiperOrigin-RevId: 474146108 | 126 | 0 | 83,134 | 13 | |
1 | 6 | def get_value_data_from_instance(self, instance):
return {
"id": instance.pk,
"edit_url": AdminURLFinder().get_edit_url(instance),
}
| wagtail/admin/widgets/chooser.py | 49 | wagtail | {
"docstring": "\n Given a model instance, return a value that we can pass to both the server-side template\n and the client-side rendering code (via telepath) that contains all the information needed\n for display. Typically this is a dict of id, title etc; it must be JSON-serialisable.\n ... | 10 | Python | 10 | 39f7886a6f8ee98db7e73ce33d94c06139f35bd8 | chooser.py | 77,547 | 5 | 28 | get_value_data_from_instance | https://github.com/wagtail/wagtail.git | Split out common logic from get_value_data | 53 | 0 | 16,673 | 11 | |
2 | 17 | def copy_files(from_dir, to_dir):
if from_dir.exists():
shutil.copytree(from_dir, to_dir, dirs_exist_ok=True)
dirs_list = [
SETTINGS_DIRECTORY,
USER_DATA_DIRECTORY,
USER_DATA_DIRECTORY / "styles",
CUSTOM_IMPORTS_DIRECTORY,
CUSTOM_IMPORTS_DIRECTORY / "econometrics",
]
dirs_files =... | openbb_terminal/core/config/paths_helper.py | 109 | OpenBBTerminal | {
"docstring": "\n Copy default/example files from the repo\n to the user data folder",
"language": "en",
"n_whitespaces": 17,
"n_words": 11,
"vocab_size": 10
} | 31 | Python | 28 | c4658b63a936ad219625d30dcbd12a1aa798af09 | paths_helper.py | 285,729 | 3 | 27 | copy_files | https://github.com/OpenBB-finance/OpenBBTerminal.git | Add path for custom_imports outside the terminal (#2567)
* add log path
* add test to check if log file is in correct dir
* env path
* black
* mypy fix
* add styles folder and styles from repo
* add timezone as env variable
* fix changes with main
* fix test
* flake8
* fix linting
* fi... | 53 | 0 | 85,399 | 10 | |
4 | 27 | def build_query_compiler(cls, path, columns, index_columns, **kwargs):
col_partitions, column_widths = cls.build_columns(columns)
partition_ids = cls.call_deploy(path, col_partitions, **kwargs)
index, sync_index = cls.build_index(path, partition_ids, index_columns)
remote_parts ... | modin/core/io/column_stores/parquet_dispatcher.py | 204 | modin | {
"docstring": "\n Build query compiler from deployed tasks outputs.\n\n Parameters\n ----------\n path : str, path object or file-like object\n Path to the file to read.\n columns : list\n List of columns that should be read from file.\n index_columns :... | 55 | Python | 44 | 8864bc197974da6d8cda2de2f35ca31d561be1cc | parquet_dispatcher.py | 154,122 | 20 | 136 | build_query_compiler | https://github.com/modin-project/modin.git | PERF-#4305: Parallelize `read_parquet` over row groups (#4700)
Co-authored-by: mvashishtha <mahesh@ponder.io> | 231 | 0 | 35,795 | 12 | |
1 | 2 | def packing(self):
return self["packing"]
| packages/python/plotly/plotly/graph_objs/treemap/_tiling.py | 22 | plotly.py | {
"docstring": "\n Determines d3 treemap solver. For more info please refer to\n https://github.com/d3/d3-hierarchy#treemap-tiling\n\n The 'packing' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['squarify', 'binary', '... | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _tiling.py | 235,599 | 2 | 11 | packing | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 67,043 | 7 | |
1 | 5 | def kg_to_pounds(n):
return float(n) * 2.204623
@register.filter("startswith") | netbox/utilities/templatetags/helpers.py | 38 | @register.filter("startswith") | netbox | {
"docstring": "\n Convert a weight from kilograms to pounds.\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | 7 | Python | 7 | 87fd09ca8b5a0d3ec692e241351e1bbc4ac298a7 | helpers.py | 266,144 | 2 | 15 | kg_to_pounds | https://github.com/netbox-community/netbox.git | Cleanup for #9654 | 12 | 1 | 78,308 | 8 |
4 | 13 | def set_weights(self, weights):
if not getattr(self, "_built", False):
raise ValueError(
"You are calling `set_weights()` on an optimizer that has not "
"yet been built. Please call "
"`optimizer.build(trainable_variables)` to create the "
... | keras/optimizers/optimizer_experimental/optimizer.py | 150 | keras | {
"docstring": "Set the weights of the optimizer.\n\n Args:\n weights: a list of `tf.Variable`s or numpy arrays, the target values\n of optimizer variables. It should have the same order as\n `self._variables`.\n ",
"language": "en",
"n_whitespaces": 84,
"n... | 67 | Python | 53 | 571d8786df580d6daa5c57c77b5b15a125631c8f | optimizer.py | 279,802 | 16 | 66 | set_weights | https://github.com/keras-team/keras.git | Add method `set_weights` for optimizer backward compatibility.
Remove @doc_controls.do_not_generate_docs for `variables()` method because optimizer is no longer a `tf.Module`.
PiperOrigin-RevId: 474149115 | 279 | 0 | 83,138 | 17 | |
1 | 25 | def test_subdag_pools(self):
dag = DAG('parent', default_args=default_args)
subdag = DAG('parent.child', default_args=default_args)
session = airflow.settings.Session()
pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)
pool_10 = airflow.models.Pool(pool='test_po... | tests/operators/test_subdag_operator.py | 287 | airflow | {
"docstring": "\n Subdags and subdag tasks can't both have a pool with 1 slot\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | 53 | Python | 38 | 49e336ae0302b386a2f47269a6d13988382d975f | test_subdag_operator.py | 47,650 | 17 | 169 | test_subdag_pools | https://github.com/apache/airflow.git | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | 183 | 0 | 9,191 | 11 | |
2 | 9 | def __call__(self, results):
assert 'mix_results' in results
num_images = len(results['mix_results'])
assert num_images == 1, \
f'CopyPaste only supports processing 2 images, got {num_images}'
if self.selected:
selected_results = self._select_object(resu... | mmdet/datasets/pipelines/transforms.py | 116 | mmdetection | {
"docstring": "Call function to make a copy-paste of image.\n\n Args:\n results (dict): Result dict.\n Returns:\n dict: Result dict with copy-paste transformed.\n ",
"language": "en",
"n_whitespaces": 63,
"n_words": 20,
"vocab_size": 18
} | 35 | Python | 30 | 9a166a380229d2aaf5986fa1ff303a941865961a | transforms.py | 244,183 | 10 | 68 | __call__ | https://github.com/open-mmlab/mmdetection.git | [Feature] Support simple copy paste with some configs. (#7501)
* Testing pre-commit hooks
* Added base code in transforms
* Added Simple Copy Paste working version
* Added checks to simple copy paste
* refactor simplecopypaste and provide some configs
* remove lvis-api in .gitignore
* refactor simple... | 117 | 0 | 70,272 | 13 | |
3 | 7 | def safe_quote_currency(self) -> str:
try:
return self.stake_currency or self.pair.split('/')[1].split(':')[0]
except IndexError:
return ''
| freqtrade/persistence/models.py | 70 | freqtrade | {
"docstring": "\n Compatibility layer for asset - which can be empty for old trades.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | 13 | Python | 12 | 8e98a2ff9f4fabf81bf5a4f4e1f772f5c4a091ec | models.py | 149,525 | 8 | 39 | safe_quote_currency | https://github.com/freqtrade/freqtrade.git | api - provide assset_currency via API | 56 | 0 | 34,441 | 15 | |
6 | 18 | def _make_twin_axes(self, *args, **kwargs):
if 'sharex' in kwargs and 'sharey' in kwargs:
# The following line is added in v2.2 to avoid breaking Seaborn,
# which currently uses this internal API.
if kwargs["sharex"] is not self and kwargs["sharey"] is not self:
... | lib/matplotlib/axes/_base.py | 222 | matplotlib | {
"docstring": "Make a twinx Axes of self. This is used for twinx and twiny.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 12
} | 78 | Python | 63 | c73f4c455514cf5422d27bf38c93250de8316b21 | _base.py | 109,447 | 16 | 135 | _make_twin_axes | https://github.com/matplotlib/matplotlib.git | Merge SubplotBase into AxesBase. | 260 | 0 | 23,592 | 15 | |
2 | 10 | def setup_awaitable_errors() -> Callable[[], None]:
warnings.simplefilter("error", RuntimeWarning)
# unraisablehook was added in Python 3.8.
if not hasattr(sys, "unraisablehook"):
return lambda: None
# State shared between unraisablehook and check_for_unraisable_exceptions.
unraisable... | tests/test_utils/__init__.py | 76 | synapse | {
"docstring": "\n Convert warnings from a non-awaited coroutines into errors.\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 34 | Python | 31 | 646324437543c096e737777c81b4fe4b45c3e1a7 | __init__.py | 248,078 | 13 | 54 | setup_awaitable_errors | https://github.com/matrix-org/synapse.git | Remove unused `# type: ignore`s (#12531)
Over time we've begun to use newer versions of mypy, typeshed, stub
packages---and of course we've improved our own annotations. This makes
some type ignore comments no longer necessary. I have removed them.
There was one exception: a module that imports `select.epoll`. Th... | 62 | 0 | 72,089 | 9 | |
1 | 4 | def required_columns(self) -> List[str]:
return []
| ludwig/data/split.py | 25 | ludwig | {
"docstring": "Returns the list of columns that are required for splitting.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 6 | Python | 6 | d85269cd60734790a65c11673bfdd98516b62b6c | split.py | 8,629 | 3 | 14 | required_columns | https://github.com/ludwig-ai/ludwig.git | Use clearer error messages in ludwig serving, and enable serving to work with configs that have stratified splitting on target columns. (#2740)
* Use clearer serving error messages, and enable serving to work with configs that have stratified splitting on target columns.
* Adjust warning message | 20 | 0 | 1,468 | 6 | |
2 | 11 | def get_local_ip_address() -> str:
try:
ip_address = requests.get(
"https://checkip.amazonaws.com/", timeout=3
).text.strip()
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
ip_address = "No internet connection"
return ip_address
| gradio/utils.py | 78 | gradio | {
"docstring": "Gets the public IP address or returns the string \"No internet connection\" if unable to obtain it.",
"language": "en",
"n_whitespaces": 16,
"n_words": 17,
"vocab_size": 16
} | 21 | Python | 18 | 51824608865b66ab04b018f55055124edbe603f3 | utils.py | 181,347 | 9 | 45 | get_local_ip_address | https://github.com/gradio-app/gradio.git | Patching `test_get_ip` attempt 2 (#2810)
* ip-patch-2
* formatting
* patch 2 | 65 | 0 | 43,310 | 14 | |
1 | 4 | def path(self):
self._deprecate("path")
return self._path
| pandas/io/excel/_base.py | 31 | pandas | {
"docstring": "\n Path to Excel file.\n\n .. deprecated:: 1.5.0\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 7,
"vocab_size": 7
} | 5 | Python | 5 | 047137ce2619cfe2027e3999dfb92eb614d9a485 | _base.py | 164,688 | 3 | 16 | path | https://github.com/pandas-dev/pandas.git | DEP: Protect some ExcelWriter attributes (#45795)
* DEP: Deprecate ExcelWriter attributes
* DEP: Deprecate ExcelWriter attributes
* Fixup for test
* Move tests and restore check_extension
y
* Deprecate xlwt fm_date and fm_datetime; doc improvements | 26 | 0 | 39,592 | 8 |
End of preview. Expand in Data Studio
No dataset card yet
- Downloads last month
- 23