message
stringlengths
13
484
diff
stringlengths
38
4.63k
Lint fixes Remove extra lines
@@ -406,5 +406,4 @@ def event_return(events): if ret and "saltutil.find_job" not in ret['fun'] or "salt/auth" not in ret['tag']: results = returner(ret, event_rtn=True) - return results
tools: add MultiLogIterator example to readme * update LogReader doc use MultiLogIterator to read the entire route, get timestamps and deal with exceptions * bring back the old example * clean f-strings * simplify
@@ -31,3 +31,21 @@ for msg in lr: if msg.which() == "carState": print(msg.carState.steeringAngleDeg) ``` + +### MultiLogIterator + +`MultiLogIterator` is similar to `LogReader`, but reads multiple logs. + +```python +from tools.lib.route import Route +from tools.lib.logreader import MultiLogIterator + +# setup a MultiLogIterator to read all the logs in the route +r = Route("4cf7a6ad03080c90|2021-09-29--13-46-36") +lr = MultiLogIterator(r.log_paths()) + +# print all the steering angles values from all the logs in the route +for msg in lr: + if msg.which() == "carState": + print(msg.carState.steeringAngleDeg) +```
GraphBookmarksUI : Change shortcut from "Ctrl+B" to "B" Folks prefer single-key hotkeys for frequently accessed things. I've deliberately been lax in not checking that `event.modifiers == None`, to allow for a smooth transition for folks already used to `Ctrl+B`.
@@ -146,7 +146,7 @@ def appendNodeSetMenuDefinitions( editor, menuDefinition ) : { "command" : functools.partial( __findBookmark, editor, bookmarks ), "active" : len( bookmarks ), - "shortCut" : "Ctrl+B", + "shortCut" : "B", } ) @@ -303,7 +303,7 @@ def __findNumericBookmark( editor, numericBookmark ) : def __editorKeyPress( editor, event ) : - if event.key == "B" and event.modifiers == event.modifiers.Control : + if event.key == "B" : __findBookmark( editor ) return True
settings: Show enable_spectator_access option if server-setting is enabled. We show the "Allow creating web-public streams" setting in UI only if settings.WEB_PUBLIC_STREAMS_ENABLED is true on the server.
setting_name="realm_enable_spectator_access" prefix="id_" is_checked=realm_enable_spectator_access - render_only=page_params.development_environment + render_only=page_params.server_web_public_streams_enabled label=admin_settings_label.realm_enable_spectator_access}} <div class="input-group"> <label for="realm_create_private_stream_policy" class="dropdown-title">{{t "Who can create private streams" }}</label>
Update ua.txt ```It used a hard-coded user agent string in order to contact Gdrive. Mozilla / 5.0 (compatible; MSIE 7.0; Windows NT 5.1; InfoPath.1)```
@@ -1314,6 +1314,10 @@ msie 44 WebMonitor Client +# Reference: https://blog.prevailion.com/2020/03/the-curious-case-of-criminal-curriculum.html + +Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.1; InfoPath.1) + # Misc information_schema
cephadm-adopt: use ceph_osd_flag module There's no reason to not use the ceph_osd_flag module to set/unset osd flags. Also if there's no OSD nodes in the inventory then we don't need to execute the set/unset play.
when: not containerized_deployment | bool - name: set osd flags - hosts: "{{ mon_group_name|default('mons') }}[0]" + hosts: "{{ osd_group_name|default('osds') }}" become: true gather_facts: false tasks: name: ceph-defaults - name: set osd flags - command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} osd set {{ item }}" - changed_when: false + ceph_osd_flag: + cluster: "{{ cluster }}" + name: "{{ item }}" + state: present with_items: - noout - nodeep-scrub + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true environment: - CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" - name: adopt ceph osd daemons hosts: "{{ osd_group_name|default('osd') }}" CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - name: unset osd flags - hosts: "{{ mon_group_name|default('mons') }}[0]" + hosts: "{{ osd_group_name|default('osds') }}" become: true gather_facts: false tasks: name: ceph-defaults - name: unset osd flags - command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} osd unset {{ item }}" - changed_when: false + ceph_osd_flag: + cluster: "{{ cluster }}" + name: "{{ item }}" + state: absent with_items: - noout - nodeep-scrub + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: true environment: - CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" - name: redeploy mds daemons hosts: "{{ mds_group_name|default('mdss') }}"
Fix incorrect attributes in isherm_csr Some attributes were still trying to access the data as if it were scipy matrices. This occurred as part of a slightly failed merge up of master in dev.major, and was not caught at the time as the tests were not merged up.
@@ -51,13 +51,13 @@ cdef bint _isherm_csr_full(CSR matrix, double tol) except 2: cdef idxint row, ptr_a, ptr_b, col_a, col_b for row in range(matrix.shape[0]): ptr_a, ptr_a_end = matrix.row_index[row], matrix.row_index[row + 1] - ptr_b, ptr_b_end = transpose.indptr[row], transpose.indptr[row + 1] + ptr_b, ptr_b_end = transpose.row_index[row], transpose.row_index[row + 1] while ptr_a < ptr_a_end and ptr_b < ptr_b_end: # Doing this on every loop actually involves a few more # de-references than are strictly necessary, but just # simplifies the logic checking for the end of the row. col_a = matrix.col_index[ptr_a] - col_b = transpose.indices[ptr_b] + col_b = transpose.col_index[ptr_b] if col_a == col_b: if not _conj_feq(matrix.data[ptr_a], transpose.data[ptr_b], tol): return False
[client] make dir after clobbering cache directory This is for
@@ -835,6 +835,7 @@ class NamedCache(Cache): logging.exception( 'NamedCache: failed to load named cache state file; obliterating') file_path.rmtree(self.cache_dir) + fs.makedirs(self.cache_dir) with self._lock: self._try_upgrade() if time_fn:
Update README.md add Youtube link
## What's New +* *Aug 2021:* We now have a tutorial that introduces our toolkit, you can **[watch it on Youtube](https://youtu.be/PkMFnS6cjAc)**! * *July 2021:* We are now working on packaging s3prl and reorganizing the file structure in **v0.3**. Please consider using the stable **v0.2.0** for now. We will test and release **v0.3** before August. * *June 2021:* Support [**SUPERB:** **S**peech processing **U**niversal **PER**formance **B**enchmark](https://arxiv.org/abs/2105.01051), submitted to Interspeech 2021. Use the tag **superb-interspeech2021** or **v0.2.0**. * *June 2021:* Support extracting multiple hidden states from the SSL pretrained models
Remove pool_destroy and fs_destroy from StratisCli class They are now really unnecessary since the list methods already filter out all non-test devices and the check for the devlinks has gone away.
@@ -55,16 +55,6 @@ class StratisCli: if fields[0].startswith(TEST_PREF) ) - @staticmethod - def pool_destroy(name): - """ - Destroy a pool - :param name: Name of pool to destroy - :return: None - """ - if name.startswith(TEST_PREF): - exec_command([STRATIS_CLI, "pool", "destroy", name]) - @staticmethod def destroy_all(): """ @@ -74,23 +64,12 @@ class StratisCli: umount_mdv() # Remove FS - for name, pool_name in StratisCli.fs_list().items(): - StratisCli.fs_destroy(pool_name, name) + for fs_name, pool_name in StratisCli.fs_list().items(): + exec_command([STRATIS_CLI, "fs", "destroy", pool_name, fs_name]) # Remove Pools for name in StratisCli.pool_list(): - StratisCli.pool_destroy(name) - - @staticmethod - def fs_destroy(pool_name, fs_name): - """ - Destroy a FS - :param pool_name: Pool which contains the FS - :param fs_name: Name of FS to destroy - :return: None - """ - if pool_name.startswith(TEST_PREF): - exec_command([STRATIS_CLI, "fs", "destroy", pool_name, fs_name]) + exec_command([STRATIS_CLI, "pool", "destroy", name]) def clean_up():
Fix TensorProtosDBInput AttributeError Summary: Pull Request resolved:
@@ -5,7 +5,8 @@ from __future__ import division from __future__ import print_function from __future__ import unicode_literals -from caffe2.python import core, scope, workspace, helpers +from caffe2.python import core, scope, workspace +from caffe2.python.helpers.db_input import db_input from caffe2.python.modeling import parameter_info from caffe2.python.modeling.parameter_sharing import ( parameter_sharing_context, @@ -414,7 +415,7 @@ class ModelHelper(object): """You cannot pass reader to model_helper.TensorProtosDBInput. Use model.net.TensorProtosDBInput instead to create the op.""" - return helpers.db_input.db_input( + return db_input( self, blob_out, batch_size, db, db_type, **kwargs) def GetDevices(self):
Deleting Trailing whitespace When we define several networks (ice1-2, ice1-3, ...) , the dhcpd.subnet.conf.j2 template generate many trailing whitespace in DHCP configuration files. This is not a problem to run the DHCP service, but the configuration files generate is too big and not very clean.
+#jinja2: lstrip_blocks: "True" #### Blue Banquise file #### ## {{ansible_managed}} {% endfor %} {% else %} {% set range = groups['all'] %} -{% endif %} +{% endif -%} -{% for host in range %} +{%- for host in range %} {% if hostvars[host]['network_interfaces'] is defined %} {% for nic, nic_args in hostvars[host]['network_interfaces'].items() %} {% if (nic_args.network is defined and not none) and (nic_args.network == item) and (nic_args.ip4 is defined and not none) and (nic_args.mac is defined and not none) %} @@ -23,8 +24,9 @@ host {{host}}-{{item}} { fixed-address {{nic_args.ip4}}; } {% endif %} -{% endfor %} -{% if hostvars[host]['bmc'] is defined %} + {% endfor -%} + + {%- if hostvars[host]['bmc'] is defined %} {% set bmc_args = hostvars[host]['bmc'] %} {% if (bmc_args.network is defined and not none) and (bmc_args.network == item) and (bmc_args.name is defined and not none) and (bmc_args.mac is defined and not none) and (bmc_args.ip4 is defined and not none) %} host {{bmc_args.name}} { @@ -36,4 +38,3 @@ host {{host}}-{{item}} { {% endif %} {% endif %} {% endfor %} -
name cleanup cleaning up bad strings in naming when coming in from manual identification. also tried to set up logging, but found that it was logging for the web-server to the job's individual file, not a webserver log file that i wanted it to.
@@ -3,6 +3,7 @@ from time import strftime, localtime import urllib import json import re +#import logging # import omdb from arm.config.config import cfg @@ -26,6 +27,8 @@ def clean_for_filename(string): string = re.sub('\s+', ' ', string) string = string.replace(' : ', ' - ') string = string.replace(':', '-') + string = string.replace('&', 'and') + string = string.replace("\\", " - ") string = string.strip() #return re.sub('[^\w\-_\.\(\) ]', '', string) return string @@ -67,10 +70,12 @@ def call_omdb_api(title=None, year=None, imdbID=None, plot="short"): return(None) # strurl = urllib.parse.quote(strurl) + #logging.info("OMDB string query"+str(strurl)) print(strurl) title_info_json = urllib.request.urlopen(strurl).read() title_info = json.loads(title_info_json.decode()) print(title_info) + #logging.info("Response from Title Info command"+str(title_info)) # d = {'year': '1977'} # dvd_info = omdb.get(title=title, year=year) print("call was successful")
Update odp-noaa-nesdis-ncei-csb.yaml Updates for new locations in Big Data Program.
Name: Crowdsourced Bathymetry Description: Community provided bathymetry data collected in collaboration with the International Hydrographic Organization. -Documentation: https://odp-noaa-nesdis-ncei-csb-docs.s3-us-west-2.amazonaws.com/readme.htm +Documentation: https://noaa-bathymetry-pds.s3.amazonaws.com/readme.html Contact: [email protected] ManagedBy: "[NOAA](http://www.noaa.gov/)" UpdateFrequency: New data is added once a week. @@ -16,10 +16,10 @@ Tags: License: There are no restrictions on the use of this data. Resources: - Description: Crowdsourced bathymetry data - ARN: arn:aws:s3:::odp-noaa-nesdis-ncei-csb - Region: us-west-2 + ARN: arn:aws:s3:::noaa-bathymetry-pds + Region: us-east-1 Type: S3 Bucket - Description: Notifications for CSB data - ARN: arn:aws:sns:us-west-2:541768555562:odp-noaa-nesdis-ncei-csb - Region: us-west-2 + ARN: arn:aws:sns:us-east-1:123901341784:NewBathymetryObject + Region: us-east-1 Type: SNS Topic
Remove duplicate section (also removed fix that was merged into master)
# Studio Changelog -## Unreleased -#### Changes - -#### Issues Resolved - - ## Upcoming release #### Changes -* [[@jayoshih](https://github.com/jayoshih)] Don't allow users to set prerequisites on topics +* #### Issues Resolved -* [#1254](https://github.com/learningequality/studio/issues/1254) +* ## 2019-02-11 Release #### Changes
Fix RefreshTokenGrant modifiers The RefreshTokenGrant modifiers now take the same arguments as the AuthorizationCodeGrant modifiers
@@ -63,7 +63,7 @@ class RefreshTokenGrant(GrantTypeBase): refresh_token=self.issue_new_refresh_tokens) for modifier in self._token_modifiers: - token = modifier(token) + token = modifier(token, token_handler, request) self.request_validator.save_token(token, request)
Fix InlineQuery.event.geo returning None Closes
@@ -130,7 +130,7 @@ class InlineQuery(EventBuilder): and the user's device is able to send it, this will return the :tl:`GeoPoint` with the position of the user. """ - return + return self.query.geo @property def builder(self):
daemon: Recover `sys.stdout.close()` call Fixing the following output on macOS when starting/stopping the daemon on CLI: ``` Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'> BrokenPipeError: [Errno 32] Broken pipe ```
@@ -1363,7 +1363,7 @@ async def async_run_daemon(root_path: Path, wait_for_unlock: bool = False) -> in await ws_server.start() await shutdown_event.wait() log.info("Daemon WebSocketServer closed") - # sys.stdout.close() + sys.stdout.close() return 0 except LockfileError: print("daemon: already launching")
State correct environment variable in README.md `_OAUTH_AUTHORIZE_URL` is the internal variable holding the content of the environment variable `OAUTH2_AUTHORIZE_URL` (see `generic.GenericEnvMixin`). Therefore `OAUTH2_AUTHORIZE_URL` must be stated here instead of `_OAUTH_AUTHORIZE_URL`.
@@ -372,6 +372,6 @@ c.GenericOAuthenticator.extra_params = { 'client_secret': 'MOODLE-CLIENT-SECRET-KEY'} ``` -And set your environmental variable `_OAUTH_AUTHORIZE_URL` to: +And set your environmental variable `OAUTH2_AUTHORIZE_URL` to: `http://YOUR-MOODLE-DOMAIN.com/local/oauth/login.php?client_id=MOODLE-CLIENT-ID&response_type=code`
Label-based lookups in [add/remove]_data_from_viewer * delays loading data from a label that is also a path, now only used as a last resort * changes `data_path` arg name to `data_label` * removes now unnecessary extension kwargs * checked that no notebooks need updates
@@ -1004,8 +1004,8 @@ class Application(VuetifyTemplate, HubListener): return data_label - def add_data_to_viewer(self, viewer_reference, data_path, - clear_other_data=False, ext=None): + def add_data_to_viewer(self, viewer_reference, data_label, + clear_other_data=False): """ Plots a data set from the data collection in the specific viewer. @@ -1014,21 +1014,18 @@ class Application(VuetifyTemplate, HubListener): viewer_reference : str The reference to the viewer defined with the ``reference`` key in the yaml configuration file. - data_path : str - Either the data filename or the Glue data label found in the ``DataCollection``. + data_label : str + The Glue data label found in the ``DataCollection``. clear_other_data : bool Removes all other currently plotted data and only shows the newly defined data set. - ext: str - The data extension to access from a file. If data_path is a filename, ext - is required. """ viewer_item = self._viewer_item_by_reference(viewer_reference) if viewer_item is None: # Maybe they mean the ID viewer_item = self._viewer_item_by_id(viewer_reference) if viewer_item is None: raise ValueError(f"Could not identify viewer with reference {viewer_reference}") - data_label = self.return_data_label(data_path, ext=ext, check_unique=False) + data_id = self._data_id_from_label(data_label) if clear_other_data: @@ -1040,6 +1037,11 @@ class Application(VuetifyTemplate, HubListener): selected_data_items[data_id] = 'visible' self._update_selected_data_items(viewer_item.get('id'), selected_data_items) else: + # This block provides backward compatibility for version<=3.0, where the second arg + # in `add_to_viewer` was `data_path` instead of `data_label`. When `data_label` is + # a file path and that file exists, load its data + if os.path.exists(data_label): + self.load_data(data_label) raise ValueError( f"No data item found with label '{data_label}'. Label must be one " "of:\n\t" + "\n\t".join([ @@ -1058,7 +1060,7 @@ class Application(VuetifyTemplate, HubListener): viewer.set_plot_axes() - def remove_data_from_viewer(self, viewer_reference, data_path, ext=None): + def remove_data_from_viewer(self, viewer_reference, data_label): """ Removes a data set from the specified viewer. @@ -1067,14 +1069,10 @@ class Application(VuetifyTemplate, HubListener): viewer_reference : str The reference to the viewer defined with the ``reference`` key in the yaml configuration file. - data_path : str - Either the data filename or the Glue data label found in the ``DataCollection``. - ext: str - The data extension to access from a file. If data_path is a filename, ext - is required. + data_label : str + The Glue data label found in the ``DataCollection``. """ viewer_item = self._viewer_item_by_reference(viewer_reference) - data_label = self.return_data_label(data_path, ext=ext, check_unique=False) data_id = self._data_id_from_label(data_label) selected_items = viewer_item['selected_data_items']
addresses: Add IPAddr.get_network() This gets the network portion of an IPAddr.
@@ -349,6 +349,17 @@ class IPAddr (object): return (self.toUnsigned() & ~((1 << (32-b))-1)) == n.toUnsigned() + def get_network (self, netmask_or_bits): + """ + Gets just the network part by applying a mask or prefix length + + Returns (IPAddr,preifx_bits) + """ + prefix = parse_cidr("255.255.255.255/" + str(netmask_or_bits), + allow_host=True)[1] + netmask = cidr_to_netmask(prefix).unsigned_h + return (IPAddr(self.unsigned_h & netmask, networkOrder=False),prefix) + @property def is_multicast (self): return ((self.toSigned(networkOrder = False) >> 24) & 0xe0) == 0xe0
change behavior of 'name' field in buildtest cdash it will now show name with its identifier. This was an issue where single test can be run multiple times. Currently cdash was uploading first entry of the test whereas it needed to read all test records
@@ -161,12 +161,13 @@ def upload_test_cdash(build_name, configuration, site=None, report_file=None): with open(abspath_report_file) as json_file: buildtest_data = json.load(json_file) - for file_name in buildtest_data.keys(): - for test_name, tests_data in buildtest_data[file_name].items(): - test_data = tests_data[0] + for buildspec in buildtest_data.keys(): + for test_name in buildtest_data[buildspec].keys(): + for test_data in buildtest_data[buildspec][test_name]: + # test_data = tests_data[0] test = {} - test["name"] = test_name + test["name"] = test_name + "/" + test_data["id"] state = test_data["state"] if state == "PASS":
Overload GetSymbol.__repr__ TN:
@@ -1472,6 +1472,9 @@ class GetSymbol(AbstractExpression): return CallExpr('Sym', 'Get_Symbol', Symbol, [token_expr], abstract_expr=abstract_expr) + def __repr__(self): + return '<GetSymbol>' + class SymbolLiteral(AbstractExpression): """
stream settings: Use full space in subscribers tab for listing them. We increase the height of the widget used for listing subscribers in stream settings to 100% so that it occupies the remaining space in the tab.
.subscriber_list_container { position: relative; - max-height: 300px; + max-height: 100%; overflow: auto; text-align: left; -webkit-overflow-scrolling: touch;
Fix incorrect wording in Message.edit docstring 'role' -> 'message'
@@ -1220,7 +1220,7 @@ class Message(Hashable): The ``suppress`` keyword-only parameter was added. .. versionchanged:: 2.0 - Edits are no longer in-place, the newly edited role is returned instead. + Edits are no longer in-place, the newly edited message is returned instead. .. versionchanged:: 2.0 This function no-longer raises ``InvalidArgument`` instead raising
Update maltrail-sensor.service Adding Wiki-link to Documentation section
[Unit] Description=Maltrail IDS/IPS. Sensor of malicious traffic detection system Documentation=https://github.com/stamparm/maltrail#readme +Documentation=https://github.com/stamparm/maltrail/wiki Requires=network.target Wants=maltrail-server.service After=network-online.target maltrail-server.service
mgr: fix a typo this tasks isn't using the right container_exec_cmd, that's delegating to the wrong node. Let's use the right fact to fix this command.
when: dashboard_enabled | bool - name: wait for all mgr to be up - shell: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json | python -c 'import sys, json; print(json.load(sys.stdin)[\"available\"])'" + shell: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr dump -f json | python -c 'import sys, json; print(json.load(sys.stdin)[\"available\"])'" register: mgr_dump retries: 30 delay: 5
Adds a try/catch block to PlotManager queue processing. This prevents an error in the callback function (occasional in plotly) from stopping the queue processing by preventing the reset of busy=false.
@@ -378,10 +378,16 @@ PlotManager.prototype.run = function(){ var callback = pm.queue.shift(); //pop(); $("#status").text(label + " (" + pm.queue.length + " remaining)"); console.log("PLOTMANAGER: " + label + " (" + pm.queue.length + " remaining)"); + try { callback(); + } finally { + pm.busy = false; // in case an error occurs, don't block queue } + } + else { pm.busy = false; } + } if (pm.queue.length <= 0) { console.log("PLOTMANAGER: queue empty!"); clearInterval(pm.processor);
deactivate pytest-sugar as dependency for release git dependencies are not allowed on PyPI and pytest-sugar >0.9.5 is still not released
@@ -33,7 +33,7 @@ pytest = { version = ">=6", optional = true } pytest-xdist = { version = ">=2.5", extras = ["psutil"], optional = true } # TODO:#i# wait for new release # pytest-sugar = { version = ">=0.9.5", optional = true } -pytest-sugar = { git = "https://github.com/Teemu/pytest-sugar.git", rev = "ee02ada200026f4bfabf2252c6c59b08f5564255", optional = true } +# pytest-sugar = { git = "https://github.com/Teemu/pytest-sugar.git", rev = "ee02ada200026f4bfabf2252c6c59b08f5564255", optional = true } pytest-randomly = { version = ">=3", optional = true } [tool.poetry.dev-dependencies]
Update grammar.md Removing double 'and'.
@@ -13,7 +13,7 @@ in the following form: ; This rule is called `Hello`. After the rule name, there is a colon. The body of the -rule is given as a textX expression, starting at the colon and and ending with a +rule is given as a textX expression, starting at the colon and ending with a semicolon. This rule tells us that the pattern of `Hello` objects in input strings consists of the string literal `hello`, followed by the ID rule (here ID is a reference to a built-in rule, more about this in a moment).
Update python dependencies We depend on 'cryptography', not 'pycryptodome'
@@ -98,7 +98,7 @@ Create a Python virtual environment (virtualenv) and activate it python3 -mvenv venv source venv/bin/activate pip install 'pip>=19.1.1' wheel -pip install PyYaml ansible netaddr pyOpenSSL pycryptodome +pip install PyYaml ansible netaddr pyOpenSSL cryptography>=3.0 ``` To create a virtualbox build (the default):
TST: added test for Sequence to_html [ADDED} to_html test for sequence in test_sequence.py
@@ -949,6 +949,34 @@ class SequenceTests(TestCase): with self.assertRaises(AttributeError): s.is_annotated() + def test_to_html(self): + """produce correct html formatted text""" + seq = DnaSequence("ACGGTGGGGGGGGG") + got = seq.to_html() + # ensure balanced tags are in the txt + for tag in ["<style>", "</style>", "<div", "</div>", "<table>", "</table>"]: + self.assertTrue(tag in got) + + seq_row = ( + '<tr><td class="label">None</td>' + '<td><span class="A_dna">A</span>' + '<span class="C_dna">C</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="T_dna">T</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span>' + '<span class="G_dna">G</span></td></tr>' + ) + + self.assertTrue(seq_row in got) + class SequenceSubclassTests(TestCase): """Only one general set of tests, since the subclasses are very thin."""
Fix tracking URL name It was causing people to see our special code who hadn't typed it in :(
@@ -135,6 +135,6 @@ urlpatterns = [ # redirect post March 2018 url(r'^(?P<ccg_code>[A-Za-z\d]{3})/$', frontend_views.measures_for_one_ccg, - name='measures_for_one_ccg'), + name='measures_for_one_ccg_tracking'), ]
Fix functional test for creating subnet subnet create failed by some bad random subnet range, so retry it with new random range when the test failed.
@@ -31,18 +31,22 @@ class FloatingIpTests(base.TestCase): cls.re_description = re.compile("description\s+\|\s+([^|]+?)\s+\|") cls.re_network_id = re.compile("floating_network_id\s+\|\s+(\S+)") - # Make a random subnet - cls.subnet = ".".join(map( - str, - (random.randint(0, 223) for _ in range(3)) - )) + ".0/26" - # Create a network for the floating ip raw_output = cls.openstack( 'network create --external ' + cls.NETWORK_NAME ) cls.network_id = re.search(cls.re_id, raw_output).group(1) + # Try random subnet range for subnet creating + # Because we can not determine ahead of time what subnets are already + # in use, possibly by another test running in parallel, try 4 times + for i in range(4): + # Make a random subnet + cls.subnet = ".".join(map( + str, + (random.randint(0, 223) for _ in range(3)) + )) + ".0/26" + try: # Create a subnet for the network raw_output = cls.openstack( 'subnet create ' + @@ -50,6 +54,15 @@ class FloatingIpTests(base.TestCase): '--subnet-range ' + cls.subnet + ' ' + cls.SUBNET_NAME ) + except Exception: + if (i == 3): + # raise the exception at the last time + raise + pass + else: + # break and no longer retry if create sucessfully + break + cls.subnet_id = re.search(cls.re_id, raw_output).group(1) @classmethod
[Doc] [Jobs] Add `ray dashboard` docs to jobs doc To use Jobs on a remote cluster, you need to set up port forwarding. When using the cluster launcher, the `ray dashboard` command provides this automatically. This PR adds a how-to to the docs for this feature.
@@ -177,7 +177,9 @@ Monitoring cluster status (``ray dashboard/status``) The Ray also comes with an online dashboard. The dashboard is accessible via HTTP on the head node (by default it listens on ``localhost:8265``). You can -also use the built-in ``ray dashboard`` to do this automatically. +also use the built-in ``ray dashboard`` to set up port forwarding +automatically, making the remote dashboard viewable in your local browser at +``localhost:8265``. .. code-block:: shell
Update android_cerberus.txt Not a ```Cerberus``` one, moving to neutral ```Bankbot``` trail:
@@ -6422,13 +6422,6 @@ ultimatemoon.top freecclleaner.com -# Reference: https://twitter.com/AgidCert/status/1353763168909225987 -# Reference: https://cert-agid.gov.it/news/individuato-sito-che-veicola-in-italia-un-apk-malevolo/ -# Reference: https://www.virustotal.com/gui/file/9ae593c5611fa04fc0b7cf85f356b0ac92dcbe51fc5f481425ec7d6743368447/detection - -montanatony.xyz -supportoapp.com - # Reference: https://twitter.com/Vlad86317048/status/1353938656977805313 # Reference: https://www.virustotal.com/gui/ip-address/35.236.33.93/relations # Reference: https://www.virustotal.com/gui/ip-address/47.91.92.27/relations @@ -6622,7 +6615,6 @@ descargar2021flplayer.site /ANZ_security.apk /ASISTAN.apk /asistangoogle.apk -/Assistenzaclienti.apk /atesolcer.apk /app-release-100_obf.apk /BancaOnline.apk
Make tox.ini tox 4.0.0 compatible * removed skipsdist=True to make sure cloudkitty is available in the virtual env * added find to allowed external commands in tox * replaced full path to find for readability
[tox] minversion = 3.18.0 -skipsdist = True envlist = py3,pep8 ignore_basepython_conflict = True [testenv] basepython = python3 -allowlist_externals = rm +allowlist_externals = + find + rm setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning usedevelop = True @@ -16,7 +17,7 @@ deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/up -r{toxinidir}/test-requirements.txt commands = - /usr/bin/find . -type f -name "*.py[co]" -delete + find . -type f -name "*.py[co]" -delete rm -f .testrepository/times.dbm stestr run {posargs}
remote: optimize UrlInfo.isin() This will use cached `_path`, which will use cached `_cparts`. Saves some time skipping repetitive parsing.
@@ -172,7 +172,7 @@ class URLInfo(object): @cached_property def _path(self): - return pathlib.PurePosixPath(self.parsed.path) + return PosixPathInfo(self.parsed.path) @property def name(self): @@ -210,7 +210,7 @@ class URLInfo(object): return ( self.scheme == other.scheme and self.netloc == other.netloc - and PathInfo(self.path).isin(PathInfo(other.path)) + and self._path.isin(other._path) )
clear old values when setting a new objective new parameter 'clear' can be set to 'false' to keep old values fixes
@@ -441,18 +441,28 @@ cdef class Model: """ PY_SCIP_CALL(SCIPsetObjlimit(self._scip, objlimit)) - def setObjective(self, coeffs, sense = 'minimize'): - """Establish the objective function, either as a variable dictionary or as a linear expression. + def setObjective(self, coeffs, sense = 'minimize', clear = 'true'): + """Establish the objective function as a linear expression. Keyword arguments: coeffs -- the coefficients sense -- the objective sense (default 'minimize') + clear -- set all other variables objective coefficient to zero (default 'true') """ assert isinstance(coeffs, Expr) if coeffs.degree() > 1: raise ValueError("Nonlinear objective functions are not supported!") if coeffs[CONST] != 0.0: raise ValueError("Constant offsets in objective are not supported!") + + # clear existing objective function + cdef SCIP_VAR** _vars + cdef int _nvars + _vars = SCIPgetOrigVars(self._scip) + _nvars = SCIPgetNOrigVars(self._scip) + for i in range(_nvars): + PY_SCIP_CALL(SCIPchgVarObj(self._scip, _vars[i], 0.0)) + for term, coef in coeffs.terms.items(): # avoid CONST term of Expr if term != CONST:
Update FindARM.cmake Fix typos
@@ -68,9 +68,9 @@ if(NOT NEON_FOUND) MESSAGE(STATUS "Could not find hardware support for NEON on this machine.") endif(NOT NEON_FOUND) if(NOT CORTEXA8_FOUND) - MESSAGE(STATUS "No OMAP3 processor on this on this machine.") + MESSAGE(STATUS "No OMAP3 processor on this machine.") endif(NOT CORTEXA8_FOUND) if(NOT CORTEXA9_FOUND) - MESSAGE(STATUS "No OMAP4 processor on this on this machine.") + MESSAGE(STATUS "No OMAP4 processor on this machine.") endif(NOT CORTEXA9_FOUND) mark_as_advanced(NEON_FOUND)
Deep copy work block in miner.py Issue
@@ -280,7 +280,7 @@ class Miner: if header_hash not in self.work_map: return False # this copy is necessary since there might be multiple submissions concurrently - block = copy.copy(self.work_map[header_hash]) + block = copy.deepcopy(self.work_map[header_hash]) header = block.header header.nonce, header.mixhash = nonce, mixhash
command: refactor Move the message to parent class. Fixes
@@ -7,6 +7,10 @@ from dvc.command.base import CmdBase class CmdDataBase(CmdBase): + def __init__(self, args): + self.UP_TO_DATE_MSG = "Everything is up-to-date." + super().__init__(args) + def do_run(self, target): pass @@ -22,8 +26,6 @@ class CmdDataBase(CmdBase): class CmdDataPull(CmdDataBase): - UP_TO_DATE_MSG = "Everything is up-to-date." - def do_run(self, target=None): try: processed_files_count = self.repo.pull( @@ -46,8 +48,6 @@ class CmdDataPull(CmdDataBase): class CmdDataPush(CmdDataBase): - UP_TO_DATE_MSG = "Everything is up-to-date." - def do_run(self, target=None): try: processed_files_count = self.repo.push( @@ -69,8 +69,6 @@ class CmdDataPush(CmdDataBase): class CmdDataFetch(CmdDataBase): - UP_TO_DATE_MSG = "Everything is up-to-date." - def do_run(self, target=None): try: processed_files_count = self.repo.fetch(
Updated requirements [formerly e50ed4c38ed428133b09eb8a9c900e8c48885614] [formerly 6338ab71dc30febcedac0b7faa9fc2fa63942806] [formerly 70cfdcd1a79b31dd255868b40ec1dbc2802f7ab0]
@@ -7,3 +7,55 @@ scipy~=1.4.1 setuptools~=46.1.3 cipheycore~=0.1.4 cipheydists~=0.1.2 +absl-py==0.9.0 +astunparse==1.6.3 +attrs==19.3.0 +cachetools==4.1.0 +certifi==2020.4.5.1 +chardet==3.0.4 +cipheycore==0.1.1 +cipheydists==0.0.2 +colorama==0.4.3 +commonmark==0.9.1 +coverage==5.1 +gast==0.3.3 +google-auth==1.16.0 +google-auth-oauthlib==0.4.1 +google-pasta==0.2.0 +grpcio==1.29.0 +h5py==2.10.0 +idna==2.9 +Keras-Preprocessing==1.1.2 +loguru==0.5.0 +Markdown==3.2.2 +more-itertools==8.3.0 +numpy==1.18.4 +oauthlib==3.1.0 +opt-einsum==3.2.1 +packaging==20.4 +pluggy==0.13.1 +pprintpp==0.4.0 +protobuf==3.12.2 +py==1.8.1 +pyasn1==0.4.8 +pyasn1-modules==0.2.8 +Pygments==2.6.1 +pyparsing==2.4.7 +pytest==5.4.2 +pytest-cov==2.9.0 +requests==2.23.0 +requests-oauthlib==1.3.0 +rich==1.3.0 +rsa==4.0 +scipy==1.4.1 +six==1.15.0 +tensorboard==2.2.2 +tensorboard-plugin-wit==1.6.0.post3 +tensorflow==2.2.0 +tensorflow-estimator==2.2.0 +termcolor==1.1.0 +typing-extensions==3.7.4.2 +urllib3==1.25.9 +wcwidth==0.1.9 +Werkzeug==1.0.1 +wrapt==1.12.1
pin sqlalchemy below 2.0.0 new sqlalchemy dropped, our bk is broken
@@ -89,7 +89,7 @@ def get_version() -> str: "tomli", "tqdm", "typing_extensions>=4.0.1", - "sqlalchemy>=1.0", + "sqlalchemy>=1.0,<2.0.0", "toposort>=1.0", "watchdog>=0.8.3", 'psutil >= 1.0; platform_system=="Windows"',
[fix] Don't double log messages when sending a command to a running daemon. [fix] Prevent errors when rotating log file. fix
@@ -146,8 +146,6 @@ class Manager: except: flexget.log.start(level=self.options.loglevel, to_file=False) raise - else: - self._init_logging() manager = self @@ -182,7 +180,7 @@ class Manager: sys.exit(1) return options - def _init_logging(self): + def _init_logging(self, to_file=True): """ Initialize logging facilities """ @@ -191,7 +189,9 @@ class Manager: if not os.path.isabs(log_file): log_file = os.path.join(self.config_base, log_file) self.log_filename = log_file - flexget.log.start(log_file, self.options.loglevel, to_console=not self.options.cron) + flexget.log.start( + log_file, self.options.loglevel, to_file=to_file, to_console=not self.options.cron + ) def initialize(self): """ @@ -319,16 +319,14 @@ class Manager: and results will be streamed back. If not, this will attempt to obtain a lock, initialize the manager, and run the command here. """ - if sys.version_info <= (2, 7): - console('-' * 79) - console('Python 2.7 will not be maintained past 2020 !') - console('Consider upgrading to 3.6 or newer at your earliest convenience.') - console('-' * 79) # When we are in test mode, we use a different lock file and db if self.options.test: self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name) # If another process is started, send the execution to the running process ipc_info = self.check_ipc_info() + # If we are connecting to a running daemon, we don't want to log to the log file, + # the daemon is already handling that. + self._init_logging(to_file=not ipc_info) if ipc_info: console( 'There is a FlexGet process already running for this config, sending execution there.'
Bugfix Add the templates_auto_reload API This is present in Flask (since version 1) and missing in Quart.
@@ -343,6 +343,19 @@ class Quart(PackageStatic): """Return if the app has received a request.""" return self._got_first_request + @property + def templates_auto_reload(self) -> bool: + """Returns True if templates should auto reload.""" + result = self.config["TEMPLATES_AUTO_RELOAD"] + if result is None: + return self.debug + else: + return result + + @templates_auto_reload.setter + def templates_auto_reload(self, value: Optional[bool]) -> None: + self.config["TEMPLATES_AUTO_RELOAD"] = value + def auto_find_instance_path(self) -> Path: """Locates the instance_path if it was not provided""" prefix, package_path = find_package(self.import_name) @@ -400,7 +413,7 @@ class Quart(PackageStatic): if "autoescape" not in options: options["autoescape"] = self.select_jinja_autoescape if "auto_reload" not in options: - options["auto_reload"] = self.config["TEMPLATES_AUTO_RELOAD"] or self.debug + options["auto_reload"] = self.templates_auto_reload jinja_env = self.jinja_environment(self, **options) jinja_env.globals.update( {
Added Confirmation Dialog to Clear Pipeline Due to how the Clear Pipeline functionality is accomplished would be difficult to impossible to make it "undo-able" so instead I've added a confirmation dialog before clearing.
@@ -492,10 +492,18 @@ class Pipeline extends React.Component<Pipeline.Props, Pipeline.State> { } handleClear() { + return showDialog({ + title: 'Clear Pipeline?', + body: 'Are you sure you want to clear? You can not undo this.', + buttons: [Dialog.cancelButton(), Dialog.okButton({ label: 'Clear' })] + }).then( result => { + if (result.button.accept) { this.canvasController.clearPipelineFlow(); this.widgetContext.model.fromJSON(this.canvasController.getPipelineFlow()); this.position = 10; } + }); + } /** * Handles submitting pipeline runs
Add min(debug_level, 2) So that 3 v's e.g. `-vvv` would not cause a KeyError, and default to logging.DEBUG
@@ -43,7 +43,9 @@ def _set_debug_level(self, debug_level): 2: logging.DEBUG, } - self.setLevel(mapping[debug_level]) + self.setLevel( + mapping[min(debug_level, 2)], + ) log = get_logger()
settings: Add perfectScrollbar to uploads table. This adds the perfectScrollbar to the uploads table so that it will function properly in the settings container since the parent node has a perfectScrollbar.
@@ -47,9 +47,13 @@ exports.set_up_attachments = function () { callback: function (item, value) { return item.name.toLocaleLowerCase().indexOf(value) >= 0; }, + onupdate: function () { + ui.update_scrollbar(uploaded_files_table.closest(".progressive-table-wrapper")); + }, }, }).init(); + ui.set_up_scrollbar(uploaded_files_table.closest(".progressive-table-wrapper")); uploaded_files_table.empty(); _.each(attachments, function (attachment) {
Eagerly upload to github release assets. Don't do final release steps for prereleases.
@@ -67,11 +67,32 @@ jobs: GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} TESTPYPI_API_TOKEN: ${{ secrets.TESTPYPI_API_TOKEN }} PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + github_upload: + name: Upload to Github release + runs-on: ubuntu-latest + needs: [whl, pex, dmg, deb, exe, zip] + steps: + - uses: actions/github-script@v6 + with: + script: | + const utils = require('./.github/githubUtils.js') + const filesToUpload = [ + '${{ needs.whl.outputs.whl-file-name }}', + '${{ needs.pex.outputs.pex-file-name }}', + '${{ needs.dmg.outputs.dmg-file-name }}', + '${{ needs.deb.outputs.deb-file-name }}', + '${{ needs.exe.outputs.exe-file-name }}', + '${{ needs.zip.outputs.zip-file-name }}', + ] + for (let filename of filesToUpload) { + await utils.uploadReleaseAsset(github, context, filename, '${{ github.event.release.release_id }}') + } block_release_step: # This step ties to the release environment which requires manual approval # before it can execute. Once manual approval has been granted, the release is # unblocked and all the subsequent steps in this workflow will happen. name: Job to block publish of a release until it has been manually approved + if: ${{ !github.event.release.prerelease }} needs: [whl, pex, dmg, deb, exe, test_pypi_upload] runs-on: ubuntu-latest environment: release @@ -79,6 +100,7 @@ jobs: - run: echo "Release now publishing!" pypi_upload: name: Upload to PyPi + if: ${{ !github.event.release.prerelease }} needs: [whl, block_release_step] uses: ./.github/workflows/pypi_upload.yml with: @@ -90,28 +112,9 @@ jobs: GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} TESTPYPI_API_TOKEN: ${{ secrets.TESTPYPI_API_TOKEN }} PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} - github_upload: - name: Upload to Github release - runs-on: ubuntu-latest - needs: [block_release_step, whl, pex, dmg, deb, exe, zip] - steps: - - uses: actions/github-script@v6 - with: - script: | - const utils = require('./.github/githubUtils.js') - const filesToUpload = [ - '${{ needs.whl.outputs.whl-file-name }}', - '${{ needs.pex.outputs.pex-file-name }}', - '${{ needs.dmg.outputs.dmg-file-name }}', - '${{ needs.deb.outputs.deb-file-name }}', - '${{ needs.exe.outputs.exe-file-name }}', - '${{ needs.zip.outputs.zip-file-name }}', - ] - for (let filename of filesToUpload) { - await utils.uploadReleaseAsset(github, context, filename, '${{ github.event.release.release_id }}') - } gcs_upload: name: Upload to Google Cloud Storage + if: ${{ !github.event.release.prerelease }} runs-on: ubuntu-latest needs: [block_release_step, whl, pex, dmg, deb, exe, zip] steps:
BUG: Suspenders now call .resume() upon resuming The suspenders where calling the obj.pause() upon being triggered, but were not calling the obj.resume() when they resumed.
@@ -280,6 +280,7 @@ class RunEngine: 'clear_checkpoint': self._clear_checkpoint, 'rewindable': self._rewindable, 'pause': self._pause, + 'resume': self._resume, 'collect': self._collect, 'kickoff': self._kickoff, 'complete': self._complete, @@ -892,6 +893,7 @@ class RunEngine: self._plan_stack.append(ensure_generator(post_plan)) self._response_stack.append(None) # add the wait on the future to the stack + self._plan_stack.append(single_gen(Msg('resume', None, ))) self._plan_stack.append(single_gen(Msg('wait_for', None, [fut, ]))) self._response_stack.append(None) # if there is a pre plan add on top of the wait @@ -1991,6 +1993,25 @@ class RunEngine: """ self.request_pause(*msg.args, **msg.kwargs) + @asyncio.coroutine + def _resume(self, msg): + """Request the run engine to resume + + Expected message object is: + + Msg('resume', defer=False, name=None, callback=None) + + See RunEngine.resume() docstring for explanation of the three + keyword arguments in the `Msg` signature + """ + # Re-instate monitoring callbacks. + for obj, (cb, kwargs) in self._monitor_params.items(): + obj.subscribe(cb, **kwargs) + # Notify Devices of the resume in case they want to clean up. + for obj in self._objs_seen: + if hasattr(obj, 'resume'): + obj.resume() + @asyncio.coroutine def _checkpoint(self, msg): """Instruct the RunEngine to create a checkpoint so that we can rewind
Pass Generator not Sequence for GraphSAGE in hateful-twitter demo The GraphSAGE model was changed to take a generator rather than a sequence (created with `generator.flow`) in but the `demos/use-cases/hateful-twitter.ipynb` notebook was mistakenly not updated. See:
"source": [ "if model_type == \"graphsage\":\n", " base_model = GraphSAGE(\n", - " layer_sizes=[32, 32], generator=train_gen, bias=True, dropout=0.5,\n", + " layer_sizes=[32, 32], generator=generator, bias=True, dropout=0.5,\n", " )\n", " x_inp, x_out = base_model.default_model(flatten_output=True)\n", " prediction = layers.Dense(units=1, activation=\"sigmoid\")(x_out)\n",
Added additional default plans to the migration that creates them. Since this modifies an existing migration and the logic doesn't run if any plans already exist, this shouldn't have any impact on existing sites.
@@ -36,7 +36,7 @@ def create_default_plans(apps, schema_editor): org='packaging', context='Upload Beta', ) - # Upload Beta + # Beta Test Plan.objects.create( name='Beta Test', description=( @@ -49,6 +49,43 @@ def create_default_plans(apps, schema_editor): org='beta', context='Beta Test', ) + # Upload Release + Plan.objects.create( + name='Upload Release', + description=( + 'Deploy the metadata to the packaging org, upload a production ' + 'version, and create a GitHub release with release notes' + ), + type='manual', + flows='ci_master,release_production', + org='packaging', + context='Upload Release', + ) + # Release Test + Plan.objects.create( + name='Release Test', + description=( + 'Installs the latest release in the standard release test org and ' + 'runs apex tests' + ), + type='tag', + regex='rel/.*', + flows='ci_release', + org='release', + context='Release Test', + ) + # Dev Org + Plan.objects.create( + name='Dev Org', + description=( + 'Runs the dev_org flow against the dev scratch org config. This ' + 'plan is used by the New Org Please feature' + ), + type='org', + flows='dev_org', + org='dev', + public=False, + ) class Migration(migrations.Migration):
Enable RPM debug packages Now that the pulp infrastructure can handle the separate debug packages, enable RPM debuginfo extraction. This should reduce the install size substantially.
@@ -18,8 +18,9 @@ config_opts[f'{config_opts.package_manager}_builddep_opts'] = config_opts.get(f' config_opts['environment']['@env_key'] = '@env_val' @[end for] @[end if]@ -# Disable debug packages until infrastructure can handle it -config_opts['macros']['%debug_package'] = '%{nil}' +# Make debuginfo/debugsource packages best-effort +config_opts['macros']['%_empty_manifest_terminate_build'] = '%{nil}' +config_opts['macros']['%_missing_build_ids_terminate_build'] = '%{nil}' # Hack the %{dist} macro to allow release suffixing config_opts['macros']['%dist'] = '.' + config_opts['dist'] + '%{?dist_suffix}'
Replace metrics unwraps with expects Using expect gives the line number where the error occured and more contextual information about what the error was for debugging.
@@ -20,10 +20,11 @@ use cpython::{NoArgs, ObjectProtocol, PyDict, PyModule, PyObject, Python, ToPyOb pub fn get_collector<S: AsRef<str>>(name: S) -> MetricsCollectorHandle { let gil = Python::acquire_gil(); let py = gil.python(); - let py_metrics = py.import("sawtooth_validator.metrics").unwrap(); + let py_metrics = py.import("sawtooth_validator.metrics") + .expect("Failed to import sawtooth_validator.metrics module"); let py_collector = py_metrics .call(py, "get_collector", (name.as_ref(),), None) - .unwrap(); + .expect("Failed to call metrics.get_collector()"); MetricsCollectorHandle { py_collector, py_metrics, @@ -102,7 +103,7 @@ impl MetricsCollectorHandle { let py_level = self.py_metrics .get(py, into_level_str(level.unwrap_or(Default::default()))) - .unwrap(); + .expect("Failed to get metric level"); let py_tags: PyDict = tags.unwrap_or_else(|| HashMap::new()).into_py_object(py); let kwargs = PyDict::new(py); kwargs.set_item(py, "level", py_level).unwrap(); @@ -110,7 +111,7 @@ impl MetricsCollectorHandle { self.py_collector .call_method(py, metric_type, (metric_name.as_ref(),), Some(&kwargs)) - .unwrap() + .expect("Failed to create new metric") } } @@ -124,7 +125,7 @@ impl Gauge { let py = gil.python(); self.py_gauge .call_method(py, "set_value", (value,), None) - .unwrap(); + .expect("Failed to call Gauge.set_value()"); } } @@ -138,7 +139,7 @@ impl Counter { let py = gil.python(); self.py_counter .call_method(py, "inc", NoArgs, None) - .unwrap(); + .expect("Failed to call Counter.inc()"); } pub fn dec(&mut self) { @@ -146,7 +147,7 @@ impl Counter { let py = gil.python(); self.py_counter .call_method(py, "dec", NoArgs, None) - .unwrap(); + .expect("Failed to call Counter.dec()"); } } @@ -159,7 +160,9 @@ impl Timer { let gil = Python::acquire_gil(); let py = gil.python(); TimerHandle { - py_timer_ctx: self.py_timer.call_method(py, "time", NoArgs, None).unwrap(), + py_timer_ctx: self.py_timer + .call_method(py, "time", NoArgs, None) + .expect("Failed to call Timer.time()"), } } } @@ -174,6 +177,6 @@ impl Drop for TimerHandle { let py = gil.python(); self.py_timer_ctx .call_method(py, "stop", NoArgs, None) - .unwrap(); + .expect("Failed to call TimerContext.stop()"); } }
Fix next_level issue Check if level is None before comparing
@@ -176,7 +176,7 @@ class FlagSubmissionHandler(BaseHandler): # Unlock next level if based on Game Progress next_level = GameLevel.by_id(level.next_level_id) - if next_level._type == "progress" and level_progress * 100 >= next_level.buyout and next_level not in user.team.game_levels: + if next_level and next_level._type == "progress" and level_progress * 100 >= next_level.buyout and next_level not in user.team.game_levels: logging.info("%s (%s) unlocked %s" % ( user.handle, user.team.name, next_level.name ))
Replace some <i> tags with <em>. Required by sonarcloud for code changes.
<form> <div class="field is-horizontal"> <div class="field-label is-small"> - <label class="label"><span class="icon is-small"><i class="fas fa-search"></i></span></label> + <label class="label"><span class="icon is-small"><em class="fas fa-search"></em></span></label> </div> <div class="field-body"> <div class="field"> </divp> <div class="level-item" x-show="selectedAbility"> <button class="button is-primary is-small" @click="saveAbility(true)" x-bind:disabled="!selectedAbilityId"> - <span class="icon"><i class="fas fa-plus"></i></span> + <span class="icon"><em class="fas fa-plus"></em></span> <span>Save & Add</span> </button> </div>
[BUG] Fix `write_ndarray_to_tsfile` for `classLabel = False` Fixes `IOError` when reading a tsfile written with `write_ndarray_to_tsfile` The issue in `load_from_tsfile_to_dataframe` happend attributes for "class label" were inconsistent. These are changed consistently to `@classlabel`.
@@ -1573,9 +1573,9 @@ def write_ndarray_to_tsfile( # write class label line if class_label is not None: space_separated_class_label = " ".join(str(label) for label in class_label) - file.write(f"@classLabel true {space_separated_class_label}\n") + file.write(f"@classlabel true {space_separated_class_label}\n") else: - file.write("@class_label false\n") + file.write("@classlabel false\n") # begin writing the core data for each case # which are the series and the class value list if there is any file.write("@data\n")
Remove --no-deps from mxnet installation. The latest version do not downgrade numpy anymore:
@@ -89,8 +89,7 @@ RUN pip uninstall -y tensorflow && \ pip install /tmp/tensorflow_gpu/tensorflow*.whl && \ rm -rf /tmp/tensorflow_gpu && \ pip uninstall -y mxnet && \ - # b/126259508 --no-deps prevents numpy from being downgraded. - pip install --no-deps mxnet-cu$CUDA_MAJOR_VERSION$CUDA_MINOR_VERSION && \ + pip install mxnet-cu$CUDA_MAJOR_VERSION$CUDA_MINOR_VERSION && \ /tmp/clean-layer.sh # Reinstall TensorFlow addons (TFA) with GPU support.
Non-INTERFACE AT_LINK_STYLE is dead code Summary: Pull Request resolved:
@@ -305,39 +305,7 @@ if(USE_CUDA OR USE_ROCM) add_library(ATen_cuda INTERFACE) list(APPEND ATen_CUDA_DEPENDENCY_LIBS ATEN_CUDA_FILES_GEN_LIB) else() - # A hack to deal with cuda library dependencies and modern CMake: the - # CUDA_ADD_LIBRARY includes a target_link_libraries, and as a result, - # one cannot use PUBLIC/PRIVATE/INTERFACE for the target anymore. This - # hack adds the PRIVATE keywords to CUDA_LIBRARIES so we can deal with - # it. We will then manually add the cudart library as interface libs. - set(__tmp ${CUDA_LIBRARIES}) - set(CUDA_LIBRARIES PRIVATE ${CUDA_LIBRARIES}) - torch_cuda_based_add_library(ATen_cuda ${AT_LINK_STYLE} ${ATen_CUDA_SRCS}) - set(CUDA_LIBRARIES ${__tmp}) - target_link_libraries(ATen_cuda INTERFACE caffe2::cudart) - - target_include_directories( - ATen_cuda INTERFACE $<INSTALL_INTERFACE:include>) - target_include_directories( - ATen_cuda PRIVATE ${ATen_THIRD_PARTY_INCLUDE}) - target_include_directories( - ATen_cuda PRIVATE ${ATen_CUDA_INCLUDE}) - target_link_libraries( - ATen_cuda PRIVATE ${ATen_CUDA_DEPENDENCY_LIBS} ATEN_CUDA_FILES_GEN_LIB) - - # These public dependencies must go after the previous dependencies, as the - # order of the libraries in the linker call matters here when statically - # linking; libculibos and cublas must be last. - target_link_libraries( - ATen_cuda PUBLIC ATen_cpu ${ATen_PUBLIC_CUDA_DEPENDENCY_LIBS}) - - # Set standard properties on the target - torch_set_target_props(ATen_cuda) - - caffe2_interface_library(ATen_cuda ATen_cuda_library) - - # Make sure these don't get built by parent - set(ATen_CUDA_SRCS) + message(FATAL_ERROR "Non-INTERFACE AT_LINK_STYLE no longer supported") endif() endif()
Added batch processing to onnx Fixed a bug introduced 2021/03/27 where a for loop was directed at a dictionary Fixed duplicate prediction calls
@@ -1018,30 +1018,39 @@ class NERModel: ] if self.args.onnx: + + # Encode model_inputs = self.tokenizer.batch_encode_plus( to_predict, return_tensors="pt", padding=True, truncation=True ) - for inputs in tqdm(model_inputs): + # Change shape for batching + encoded_model_inputs = [] + if self.args.model_type in ["bert", "xlnet", "albert", "layoutlm"]: + for (input_ids, attention_mask, token_type_ids) in tqdm( + zip(model_inputs["input_ids"], model_inputs["attention_mask"], + model_inputs["token_type_ids"])): + encoded_model_inputs.append( (input_ids, attention_mask, token_type_ids) ) + else: + for (input_ids, attention_mask) in tqdm( + zip(model_inputs["input_ids"], model_inputs["attention_mask"])): + encoded_model_inputs.append((input_ids, attention_mask)) + + # Setup batches + eval_sampler = SequentialSampler(encoded_model_inputs) + eval_dataloader = DataLoader(encoded_model_inputs, sampler=eval_sampler, batch_size=args.eval_batch_size) + for batch in tqdm(eval_dataloader, disable=args.silent, desc="Running Prediction"): if self.args.model_type in ["bert", "xlnet", "albert", "layoutlm"]: - input_ids, attention_mask, token_type_ids = ( - model_inputs["input_ids"], - model_inputs["attention_mask"], - model_inputs["token_type_ids"], - ) - input_ids = input_ids.detach().cpu().numpy() - attention_mask = attention_mask.detach().cpu().numpy() - token_type_ids = token_type_ids.detach().cpu().numpy() inputs_onnx = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "token_type_ids": token_type_ids, + "input_ids": batch[0].detach().cpu().numpy(), + "attention_mask": batch[1].detach().cpu().numpy(), + "token_type_ids": batch[2].detach().cpu().numpy(), } else: - input_ids, attention_mask = (model_inputs["input_ids"], model_inputs["attention_mask"]) - input_ids = input_ids.detach().cpu().numpy() - attention_mask = attention_mask.detach().cpu().numpy() - inputs_onnx = {"input_ids": input_ids, "attention_mask": attention_mask} + inputs_onnx = { + "input_ids": batch[0].detach().cpu().numpy(), + "attention_mask": batch[1].detach().cpu().numpy(), + } # Run the model (None = get all the outputs) output = self.model.run(None, inputs_onnx)
Don't use `patcher.start()`, specially if `patcher.stop()` is not called it breaks unrelated tests. Refs
@@ -11,9 +11,8 @@ from tests.support.mock import MagicMock, patch @pytest.fixture def configure_loader_modules(): - patcher = patch("salt.utils.path.which", lambda exe: exe) - patcher.start() - return {djangomod: {}} + with patch("salt.utils.path.which", lambda exe: exe): + yield {djangomod: {}} def test_command():
Move call to QAct out from _ApplyActivationFunction / _ApplyProjectionKernel. It was only enabled at a single call site, call it there instead.
@@ -1176,8 +1176,8 @@ class ProjectionLayer(quant_utils.QuantizableLayer): if self._is_bn_folded or not p.batch_norm: # Everything folded together. This is the only variant that supports # quantization. - out = self._ApplyProjectionKernel( - w, b, inputs, quant=True, **proj_kwargs) + out = self._ApplyProjectionKernel(w, b, inputs, **proj_kwargs) + out = self.QAct(self._output_qact_name, out) else: # Projection kernel(no activation fn) -> BN -> Activation fn. out = self._ApplyProjectionKernel( @@ -1277,7 +1277,6 @@ class ProjectionLayer(quant_utils.QuantizableLayer): b, inputs, with_activation=True, - quant=False, mix_kernel=None): """Applies matmul/bias/activation in one step. @@ -1291,7 +1290,6 @@ class ProjectionLayer(quant_utils.QuantizableLayer): b: Bias vector (or None). inputs: FProp inputs. with_activation: Whether to also compute the activation function. - quant: Whether to apply quantization. mix_kernel: (optional) mix_kernel for block diagonal matmul. Returns: @@ -1343,18 +1341,14 @@ class ProjectionLayer(quant_utils.QuantizableLayer): out += b # NOTE: Bias on matmul is never quantized. out = gshard_utils.MeshSplit(out, p.device_mesh, p.activation_split_dims_mapping) - return self._ApplyActivationFunction(out, with_activation, quant) + return self._ApplyActivationFunction(out, with_activation) - def _ApplyActivationFunction(self, - out, - with_activation=True, - quant=False): + def _ApplyActivationFunction(self, out, with_activation=True): """Applies the activation function in one step. Args: out: The result of applying the weight matrix (and bias) to the inputs. with_activation: Whether to also compute the activation function. - quant: Whether to apply quantization. Returns: Output tensor reshaped. @@ -1367,8 +1361,6 @@ class ProjectionLayer(quant_utils.QuantizableLayer): if not p.is_inference: out = py_utils.CheckNumerics(out) out = activations.GetFn(p.activation)(out) - if quant: - out = self.QAct(self._output_qact_name, out) return out @classmethod
mongoengine.errors.InvalidQueryError: Cannot resolve field "id" fixes this
@@ -42,10 +42,10 @@ class QueryAjaxModelLoader(AjaxModelLoader): if not model: return None - return (as_unicode(model.id), as_unicode(model)) + return (as_unicode(model.pk), as_unicode(model)) def get_one(self, pk): - return self.model.objects.filter(id=pk).first() + return self.model.objects.filter(pk=pk).first() def get_list(self, term, offset=0, limit=DEFAULT_PAGE_SIZE): query = self.model.objects
Silence tests: fix unawaited coro warnings Because the Scheduler is mocked, it doesn't actually do anything with the coroutines passed to the schedule() functions, hence the warnings.
@@ -68,7 +68,9 @@ class SilenceNotifierTests(unittest.IsolatedAsyncioTestCase): with self.subTest(current_loop=current_loop): with mock.patch.object(self.notifier, "_current_loop", new=current_loop): await self.notifier._notifier() - self.alert_channel.send.assert_called_once_with(f"<@&{Roles.moderators}> currently silenced channels: ") + self.alert_channel.send.assert_called_once_with( + f"<@&{Roles.moderators}> currently silenced channels: " + ) self.alert_channel.send.reset_mock() async def test_notifier_skips_alert(self): @@ -158,7 +160,7 @@ class RescheduleTests(unittest.IsolatedAsyncioTestCase): async def test_skipped_missing_channel(self): """Did nothing because the channel couldn't be retrieved.""" - self.cog.unsilence_timestamps.items.return_value = [(123, -1), (123, 1), (123, 100000000000)] + self.cog.unsilence_timestamps.items.return_value = [(123, -1), (123, 1), (123, 10000000000)] self.bot.get_channel.return_value = None await self.cog._reschedule() @@ -230,6 +232,9 @@ class SilenceTests(unittest.IsolatedAsyncioTestCase): self.cog._init_task = asyncio.Future() self.cog._init_task.set_result(None) + # Avoid unawaited coroutine warnings. + self.cog.scheduler.schedule_later.side_effect = lambda delay, task_id, coro: coro.close() + asyncio.run(self.cog._async_init()) # Populate instance attributes. self.channel = MockTextChannel()
Update SECURITY.md Update supported releases
@@ -10,9 +10,10 @@ Along those lines, OWASP CRS team may not issue security notifications for unsup | Version | Supported | | --------- | ------------------ | -| 3.3.x-dev | :white_check_mark: | +| 3.4.x-dev | :white_check_mark: | +| 3.3.x | :white_check_mark: | | 3.2.x | :white_check_mark: | -| 3.1.x | :white_check_mark: | +| 3.1.x | :x: | | 3.0.x | :x: | ## Reporting a Vulnerability
Add missing private field to NamedTuple It is very useful for doing introspection.
@@ -535,6 +535,7 @@ def cast(tp: Type[_T], obj: Any) -> _T: ... # NamedTuple is special-cased in the type checker class NamedTuple(tuple): + _field_types = ... # type: collections.OrderedDict[str, Type[Any]] _fields = ... # type: Tuple[str, ...] _source = ... # type: str
Average losses before logging is called Fixes by averaging losses before loggers are called. It's guarded with `n_gpu > 1` to avoid synchronizing processes in DDP runs.
@@ -386,6 +386,8 @@ class RecipeManagerTrainerInterface: teacher_inputs = None loss = student_outputs["loss"] + if self.args.n_gpu > 1: # DataParallel + loss = loss.mean() loss = self.manager.loss_update( loss, model,
id_parser: refactor _valid_url The nested if is ugly
@@ -6,8 +6,9 @@ class IDParser: pass def _valid_url(self, input): - if input is not None: - if (input[:7] == "http://") or (input[:8] == "https://"): + if input is None: + return False + if input[:7] == "http://" or input[:8] == "https://": return True return False
Test QR and cholesky These are the same as the ones recently merged into symengine.
from symengine import symbols from symengine.lib.symengine_wrapper import (DenseMatrix, Symbol, Integer, - function_symbol, I, NonSquareMatrixError, ShapeError, zeros, ones, eye, - ImmutableMatrix) + Rational, function_symbol, I, NonSquareMatrixError, ShapeError, zeros, + ones, eye, ImmutableMatrix) from symengine.utilities import raises @@ -353,6 +353,25 @@ def test_FFLDU(): assert U == DenseMatrix(3, 3, [1, 2, 3, 0, -13, -13, 0, 0, 91]) +def test_QR(): + A = DenseMatrix(3, 3, [12, -51, 4, 6, 167, -68, -4, 24, -41]) + Q, R = A.QR() + + assert Q == DenseMatrix(3, 3, [Rational(6, 7), Rational(-69, 175), + Rational-58, 175), Rational(3, 7), + Rational(158, 175), Rational(6, 175), + Rational(-2, 7), Rational(6, 35), + Rational(-33, 35)]) + assert R == DenseMatrix(3, 3, [14, 21, -14, 0, 175, -70, 0, 0, 35]) + + +def test_cholesky(): + A = DenseMatrix(3, 3, [4, 12, -16, 12, 37, -43, -16, -43, 98]) + L = A.cholesky() + + assert L == DenseMatrix(3, 3, [2, 0, 0, 6, 1, 0, -8, 5, 3]) + + def test_str_repr(): d = DenseMatrix(3, 2, [1, 2, 3, 4, 5, 6]) assert str(d) == '[1, 2]\n[3, 4]\n[5, 6]\n'
inte-tests: correct path to migrations mount After this path has changed
@@ -93,7 +93,7 @@ sources = [ # directory directly. sources_static = [ ( - 'cloudify-manager/resources/rest-service/cloudify/migrations', + 'cloudify-manager/rest-service/migrations', ['/opt/manager/resources/cloudify/migrations'] ), (
Added alias for tags get traceback - !exception. I find myself writing !exception by mistake too often and just figured i'd see if others would want this alias in.
@@ -139,6 +139,14 @@ class Alias: await self.invoke(ctx, "defcon disable") + @command(name="exception", hidden=True) + async def tags_get_traceback_alias(self, ctx): + """ + Alias for invoking <prefix>tags get traceback. + """ + + await self.invoke(ctx, "tags get traceback") + @group(name="get", aliases=("show", "g"), hidden=True,
docstring fix * docstring fix ModelCheckpoint: added missing argument * docstring fix simplified
@@ -42,6 +42,8 @@ class ModelCheckpoint(object): in the directory 'dirname' create_dir (bool, optional): If True, will create directory 'dirname' if it doesnt exist. + save_as_state_dict (bool, optional): + If True, will save only the `state_dict` of the objects specified, otherwise the whole object will be saved. Notes: This handler expects two arguments: an `Engine` object and a `dict`
Updated README.md [ci skip] Tiny docs-only PR to do some tests with Github permissions.
@@ -75,4 +75,4 @@ This is a set of template applications that exist in a real project space and ar These applications are visible via the `app_exchange` view. -To add a new app, add a new `ExchangeApplication` model via django admin. You must supply a domain and an app id. Use the "canonical" app id used in app manager URLs, not a specific build id. You may also provide a help link and/or a link to a version history. All released versions of the app will be available via the app library, with the versions labeled by date of release, *not* by version number. +To add a new app, add a new `ExchangeApplication` model via django admin. You must supply a domain and an app id. Use the "canonical" app id used in app manager URLs, not a specific build id. You may also provide a help link and/or a link to a version history. All released versions of the app will be available via the app library, with the versions labeled by date of release, *not* by version number. The application title displayed in the library will be from the latest version of the app.
Load permissions from the file during migrations * Load permissions from the file during migrations When migrating the db on a machine that already has the auth.conf file, load permissions from that file. And insert them. * add label
@@ -7,8 +7,10 @@ Revises: 387fcd049efb Create Date: 2020-11-09 15:12:12.055532 """ +import yaml from alembic import op import sqlalchemy as sa +from sqlalchemy.sql import table, column, select from manager_rest.storage.models_base import UTCDateTime @@ -27,7 +29,8 @@ def upgrade(): nullable=False, server_default="0")) create_deployments_labels_table() - _create_permissions_table() + permissions_table = _create_permissions_table() + _load_permissions(permissions_table) _create_maintenance_mode_table() op.add_column( 'roles', @@ -114,7 +117,7 @@ def _create_labels_table(table_name, fk_column, fk_refcolumn, fk_index): def _create_permissions_table(): - op.create_table( + return op.create_table( 'permissions', sa.Column('id', sa.Integer(), nullable=False, autoincrement=True), sa.Column('role_id', sa.Integer(), nullable=False), @@ -148,3 +151,29 @@ def _create_maintenance_mode_table(): 'maintenance_mode', ['_requested_by'], unique=False) + + +def _load_permissions(permissions_table): + """Load permissions from the conf file, if it exists.""" + try: + with open('/opt/manager/authorization.conf') as f: + data = yaml.safe_load(f) + permissions = data['permissions'] + except (IOError, KeyError): + return + roles_table = table('roles', column('id'), column('name')) + + for permission, roles in permissions.items(): + for role in roles: + op.execute( + permissions_table.insert() + .from_select( + ['name', 'role_id'], + select([ + op.inline_literal(permission).label('name'), + roles_table.c.id + ]) + .where(roles_table.c.name == op.inline_literal(role)) + .limit(op.inline_literal(1)) + ) + )
ceph-common: remove copr and sepia repositories All EL8 dependencies are now present on EPEL 8 so we don't need the additional repositories that were only a temporary solution.
--- -- name: specific el 8 dependencies - when: ansible_distribution_major_version | int == 8 - block: - - name: install dnf-plugins-core - package: - name: dnf-plugins-core - register: result - until: result is succeeded - tags: with_pkg - - - name: enable ceph-el8 copr - command: dnf copr enable -y ktdreyer/ceph-el8 - args: - creates: /etc/yum.repos.d/_copr:copr.fedorainfracloud.org:ktdreyer:ceph-el8.repo - warn: false - register: result - until: result is succeeded - - - name: enable ceph lab extras repository - yum_repository: - name: lab-extras - baseurl: http://apt-mirror.front.sepia.ceph.com/lab-extras/8/ - description: Sepia Lab Extras repository - enabled: true - gpgcheck: false - - name: fetch ceph red hat development repository uri: # Use the centos repo since we don't currently have a dedicated red hat repo
Update README.md update slack channel
@@ -74,7 +74,7 @@ make test ## For Contributors -If you are interested in contributing to Syft, first check out our [Contributor Quickstart Guide](https://github.com/OpenMined/Docs/blob/master/contributing/quickstart.md) and then sign into our [Slack Team](https://openmined.slack.com/) channel #syft to let us know which projects sound interesting to you! (or propose your own!). +If you are interested in contributing to Syft, first check out our [Contributor Quickstart Guide](https://github.com/OpenMined/Docs/blob/master/contributing/quickstart.md) and then sign into our [Slack Team](https://openmined.slack.com/) channel #team_pysyft to let us know which projects sound interesting to you! (or propose your own!). ## Relevant Literature
Update docs to recommend using MSVC on Windows See I don't think MinGW has worked since around Python 3.4 (but I'm not completely confident in that) while I know that MSVC does work. Therefore we should recommend that.
@@ -22,13 +22,16 @@ according to the system used: XCode, which can be retrieved from the Mac OS X's install DVDs or from https://developer.apple.com/. - - **Windows** A popular option is to use the open source MinGW (a + - **Windows** The CPython project recommends building extension modules + (including Cython modules) with the same compiler that Python was + built with. This is usually a specific version of Microsoft Visual + C/C++ (MSVC) - see https://wiki.python.org/moin/WindowsCompilers. + MSVC is the only compiler that Cython is currently tested with on + Windows. A possible alternative is the open source MinGW (a Windows distribution of gcc). See the appendix for instructions for setting up MinGW manually. Enthought Canopy and Python(x,y) bundle MinGW, but some of the configuration steps in the appendix might - still be necessary. Another option is to use Microsoft Visual C/C++ - (MSVC). One must then use the same version which the installed Python was - compiled with. + still be necessary. .. dagss tried other forms of ReST lists and they didn't look nice .. with rst2latex.
Updates text of Approximate Algorithm [ci-skip] [ci-skip]
@@ -173,7 +173,7 @@ Internally, these hafnians are calculated by using the recursion relation of the Approximate algorithm --------------------- -In 1999 Barvinok :cite:`barvinok1999polynomial` provided a surprisingly simple algorithm to approximate the hafnian of a symmetric matrix with positive entries. Let the matrix have entries :math:`A_{i,j}` and define the antisymmetric stochastic matrix with entries that distribute according to :math:`W_{i,j} = -W_{i,j} \sim \mathcal{N}(0,A_{i,j})`, where :math:`\mathcal{N}(\mu,\sigma^2)` is the normal distribution with mean :math:`\mu` and variance :math:`\sigma^2`. The following now holds: +In 1999 Barvinok :cite:`barvinok1999polynomial` provided a surprisingly simple algorithm to approximate the hafnian of a symmetric matrix with non-negative entries. Let the matrix have entries :math:`A_{i,j}` and define the antisymmetric stochastic matrix with entries that distribute according to :math:`W_{i,j} = -W_{i,j} \sim \mathcal{N}(0,A_{i,j})`, where :math:`\mathcal{N}(\mu,\sigma^2)` is the normal distribution with mean :math:`\mu` and variance :math:`\sigma^2`. The following now holds: .. math:: \text{haf}(\mathbf{A}) = \mathbb{E} \left( \text{det}(\mathbf{W}) \right)
Use with context manager and a few PEP8 changes. First File is opened using the with context manager Broke two long strings into multiple lines Add space between arguments in a method call
@@ -2,13 +2,19 @@ import pypandoc import os output = pypandoc.convert('README.md', 'rst') -f = open('README.txt','w+') +with open('README.txt' 'w+') as f: f.write(str(output.encode('utf-8'))) -f.close() readme_rst = open('./README.txt').read() -replace = '.. figure:: https://uiux.s3.amazonaws.com/2016-logos/email-logo%402x.png\n :alt: SendGrid Logo\n\n SendGrid Logo\n' -replacement = '|SendGrid Logo|\n\n.. |SendGrid Logo| image:: https://uiux.s3.amazonaws.com/2016-logos/email-logo%402x.png\n :target: https://www.sendgrid.com' +replace = ''' + .. figure:: https://uiux.s3.amazonaws.com/2016-logos/email-logo + %402x.png\n :alt: SendGrid Logo\n\n SendGrid Logo\n + ''' +replacement = ''' + |SendGrid Logo|\n\n.. |SendGrid Logo| image:: + https://uiux.s3.amazonaws.com/2016-logos/email-logo%402x.png + \n :target: https://www.sendgrid.com + ''' final_text = readme_rst.replace(replace, replacement) with open('./README.txt', 'w') as f: f.write(final_text)
Adjusted to pass the updated requirements Removed six and Py2 support Small changes to pass new lint tests
-# -*- coding: utf-8 -*- - # Import Python Libs -from __future__ import absolute_import, print_function, unicode_literals import os @@ -11,7 +8,6 @@ import salt.modules.yumpkg as yumpkg # Import Salt libs from salt.exceptions import CommandExecutionError -from salt.ext import six # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -332,10 +328,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): ], } for pkgname, pkginfo in pkgs.items(): - if six.PY3: self.assertCountEqual(pkginfo, expected_pkg_list[pkgname]) - else: - self.assertItemsEqual(pkginfo, expected_pkg_list[pkgname]) def test_list_patches(self): """ @@ -632,7 +625,7 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): except AssertionError: continue else: - self.fail("repo '{0}' not checked".format(repo)) + self.fail("repo '{}' not checked".format(repo)) def test_list_upgrades_dnf(self): """
[woff2] Fix seeds Use seeds from OSS-Fuzz instead of getting them manually (and incorrectly). Fixes
@@ -22,10 +22,13 @@ apt-get update && \ autoconf \ libtool +# Get seeds. +get_git_revision https://github.com/google/oss-fuzz.git e8ffee4077b59e35824a2e97aa214ee95d39ed13 oss-fuzz +mkdir -p $OUT/seeds +cp oss-fuzz/projects/woff2/corpus/* $OUT/seeds + get_git_revision https://github.com/google/woff2.git 9476664fd6931ea6ec532c94b816d8fbbe3aed90 SRC get_git_revision https://github.com/google/brotli.git 3a9032ba8733532a6cd6727970bade7f7c0e2f52 BROTLI -get_git_revision https://github.com/FontFaceKit/roboto.git 0e41bf923e2599d651084eece345701e55a8bfde $OUT/seeds -rm -rf $OUT/seeds/.git # Remove unneeded .git folder. rm -f *.o for f in font.cc normalize.cc transform.cc woff2_common.cc woff2_dec.cc woff2_enc.cc glyph.cc table_tags.cc variable_length.cc woff2_out.cc; do
Refactor: Use `id` instead of `pk` as key. Use `id` instead of `pk` as key to get RealmFilter object in `do_remove_linkifier` function in `actions.py`.
@@ -6643,7 +6643,7 @@ def do_remove_linkifier( if pattern is not None: RealmFilter.objects.get(realm=realm, pattern=pattern).delete() else: - RealmFilter.objects.get(realm=realm, pk=id).delete() + RealmFilter.objects.get(realm=realm, id=id).delete() notify_linkifiers(realm)
Add support for localised item types / fields Only en-US is currently supported, but it will eventually just work.
@@ -797,10 +797,10 @@ Pyzotero allows you to retrieve, delete, or modify saved searches: Item Methods ================= - .. py:method:: Zotero.item_types() + .. py:method:: Zotero.item_types([locale]) Returns a dict containing all available item types - + :param string locale: Clients can optionally request names in other languages by passing a locale parameter (e.g., "fr-FR"), however, only "en-US" is currently supported. :rtype: dict .. py:method:: Zotero.item_fields()
updated new meaning
"a person who employs or superintends workers; manager.", "a politician who controls the party organization, as in a particular district.", "a person who makes decisions, exercises authority, dominates, etc." + "a person who gives only commands,not lead them" ], "parts-of-speech": "Noun" }
Fix code generation for hashing of analysis units TN:
@@ -1038,7 +1038,7 @@ package body ${ada_lib_name}.Analysis.Implementation is % if T.AnalysisUnitType.requires_hash_function: function Hash (Unit : Analysis_Unit) return Hash_Type is - (Ada.Strings.Unbounded.Hash (Unit.File_Name)); + (GNATCOLL.VFS.Full_Name_Hash (Unit.File_Name)); % endif -------------
Updating the glanceclient reference doc Added the missing commands from glanceclient, updated the OSC equivalent and removed the deprecated commands.
+cache-clear,,"Clear all images from cache, queue or both." +cache-delete,,Delete image from cache/caching queue. +cache-list,,Get cache state. +cache-queue,,Queue image(s) for caching. explain,WONTFIX,Describe a specific model. image-create,image create,Create a new image. -image-create-via-import,,EXPERIMENTAL: Create a new image via image import. +image-create-via-import, image create --import,"EXPERIMENTAL: Create a new image via image import using glance-direct import method. Missing support for web-download, copy-image and glance-download import methods. The OSC command is also missing support for importing image to specified store as well as all stores (--store, --stores, --all-stores) and skip or stop processing if import fails to one of the store (--allow-failure)" image-deactivate,image set --deactivate,Deactivate specified image. image-delete,image delete,Delete specified image. image-download,image save,Download a specific image. @@ -11,6 +15,7 @@ image-show,image show,Describe a specific image. image-stage,,Upload data for a specific image to staging. image-tag-delete,image unset --tag <tag>,Delete the tag associated with the given image. image-tag-update,image set --tag <tag>,Update an image with the given tag. +image-tasks,,Get tasks associated with image. image-update,image set,Update an existing image. image-upload,,Upload data for a specific image. import-info,,Print import methods available from Glance. @@ -49,6 +54,7 @@ md-tag-show,,Describe a specific metadata definitions tag inside a namespace. md-tag-update,,Rename a metadata definitions tag inside a namespace. member-create,image add project,Create member for a given image. member-delete,image remove project,Delete image member. +member-get,,Show details of an image member member-list,image member list,Describe sharing permissions by image. member-update,image set --accept --reject --status,Update the status of a member for a given image. stores-delete,,Delete image from specific store. @@ -56,5 +62,6 @@ stores-info,,Print available backends from Glance. task-create,WONTFIX,Create a new task. task-list,image task list,List tasks you can access. task-show,image task show,Describe a specific task. +usage,,Get quota usage information. bash-completion,complete,Prints arguments for bash_completion. help,help,Display help about this program or one of its subcommands.
ENH: add `arm64` support with native `cmake` no `cmake` for `ppc64le` available
-# This Dockerfile supports amd64,ppc64le +# This Dockerfile supports amd64,arm64,ppc64le # Note: QEMU emulated ppc64le build might take ~6 hours # Use conda to resolve dependencies cross-platform FROM continuumio/miniconda3:4.11.0 as builder +ARG TARGETPLATFORM # install libpng to system for cross-architecture support # https://github.com/ANTsX/ANTs/issues/1069#issuecomment-681131938 @@ -22,16 +23,18 @@ RUN apt-get update && \ # apt install fails because libssl1.0.0 is not available for newer Debian # Download verification stuff from https://cmake.org/install/ ARG CMAKE_VERSION=3.23.1 -RUN curl -OL https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-SHA-256.txt && \ - curl -OL https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.sh && \ +RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then ARCHITECTURE=x86_64; elif [ "$TARGETPLATFORM" = "linux/arm/v7" ]; then ARCHITECTURE=arm; elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then ARCHITECTURE=aarch64; elif [ "$TARGETPLATFORM" = "linux/ppc64le" ]; then ARCHITECTURE=x86_64; else ARCHITECTURE=x86_64; fi && \ + curl -OL https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-SHA-256.txt && \ + curl -OL https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-${ARCHITECTURE}.sh && \ sha256sum -c --ignore-missing cmake-${CMAKE_VERSION}-SHA-256.txt && \ curl -OL https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-SHA-256.txt.asc && \ gpg --keyserver hkps://keyserver.ubuntu.com --recv-keys C6C265324BBEBDC350B513D02D2CEF1034921684 && \ gpg --verify cmake-${CMAKE_VERSION}-SHA-256.txt.asc cmake-${CMAKE_VERSION}-SHA-256.txt -RUN mkdir /opt/cmake && \ - chmod +x cmake-${CMAKE_VERSION}-linux-x86_64.sh && \ - ./cmake-${CMAKE_VERSION}-linux-x86_64.sh --skip-license --prefix=/opt/cmake +RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then ARCHITECTURE=x86_64; elif [ "$TARGETPLATFORM" = "linux/arm/v7" ]; then ARCHITECTURE=arm; elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then ARCHITECTURE=aarch64; elif [ "$TARGETPLATFORM" = "linux/ppc64le" ]; then ARCHITECTURE=x86_64; else ARCHITECTURE=x86_64; fi && \ + mkdir /opt/cmake && \ + chmod +x cmake-${CMAKE_VERSION}-linux-${ARCHITECTURE}.sh && \ + ./cmake-${CMAKE_VERSION}-linux-${ARCHITECTURE}.sh --skip-license --prefix=/opt/cmake ENV PATH=/opt/cmake/bin:${PATH}
Update test case to pass interface parameter to pod_factory Pod name will have 'cephfs' or 'rbd' based on the interface. This update does not bring any change in the functionality
@@ -323,10 +323,14 @@ class TestDeleteResourceDuringPodPvcDeletion(DisruptionBase): for pvc_obj in pvc_objs: if pvc_obj.access_mode == constants.ACCESS_MODE_RWX: pod_obj = pod_factory( - pvc=pvc_obj, status=constants.STATUS_RUNNING + interface=interface, pvc=pvc_obj, + status=constants.STATUS_RUNNING ) pod_objs.append(pod_obj) - pod_obj = pod_factory(pvc=pvc_obj, status=constants.STATUS_RUNNING) + pod_obj = pod_factory( + interface=interface, pvc=pvc_obj, + status=constants.STATUS_RUNNING + ) pod_objs.append(pod_obj) log.info(f"Created {len(pod_objs)} pods.")
init: ensure a newline is present before appending certs Make sure the script still works when mounting certs as a read-only fs.
@@ -9,16 +9,8 @@ PYTHONUSERBASE_SITE_PACKAGE=${PYTHONUSERBASE_SITE_PACKAGE:-"$(python -m site --u cd ${QUAYDIR:-"/quay-registry"} -function ensure_newline() { - lastline=$(tail -c 1 $1) - if [ "$lastline" != "" ]; then - echo >> "$1" - fi -} - # Add the custom LDAP certificate -if [ -e $QUAYCONFIG/ldap.crt ] -then +if [ -e $QUAYCONFIG/ldap.crt ]; then cp $QUAYCONFIG/ldap.crt ${SYSTEM_CERTDIR}/ldap.crt fi @@ -29,12 +21,17 @@ if [ -d $CERTDIR ]; then cp $CERTDIR/* ${SYSTEM_CERTDIR} CERT_FILES="$CERTDIR/*" + for f in $CERT_FILES do - ensure_newline "$f" - done + lastline=$(tail -c 1 $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem) - cat $CERTDIR/* >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem + if [ "$lastline" != "" ]; then + echo >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem + fi + + cat $CERTDIR/$f >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem + done fi fi @@ -42,7 +39,13 @@ fi if [ -f $CERTDIR ]; then echo "Installing extra certificates found in $CERTDIR file" csplit -z -f ${SYSTEM_CERTDIR}/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}' - ensure_newline "$CERTDIR" + + lastline=$(tail -c 1 $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem) + + if [ "$lastline" != "" ]; then + echo >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem + fi + cat $CERTDIR >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem fi @@ -51,7 +54,12 @@ for f in $(find -L $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*") do echo "Installing extra cert $f" cp "$f" ${SYSTEM_CERTDIR} - ensure_newline "$f" + + lastline=$(tail -c 1 $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem) + if [ "$lastline" != "" ]; then + echo >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem + fi + cat "$f" >> $PYTHONUSERBASE_SITE_PACKAGE/certifi/cacert.pem done
Rearranges Dockerfile. Does timezone setup at the beginning. Separates python installation and requirements installation.
@@ -4,6 +4,11 @@ ENV DEBIAN_FRONTEND noninteractive RUN apt-get update -qqy RUN apt-get install -qqy --no-install-recommends apt-utils +# Setup timezone. +ENV TZ=America/Los_Angeles +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +# Install Python 3.7. WORKDIR /root RUN apt-get install -qqy build-essential libsqlite3-dev sqlite3 bzip2 \ libbz2-dev zlib1g-dev libssl-dev openssl libgdbm-dev \ @@ -15,16 +20,15 @@ RUN tar -xf Python-3.7.0.tgz WORKDIR Python-3.7.0 RUN ./configure > /dev/null && make -s && make -s install RUN python3 -m pip install --upgrade pip +WORKDIR /root +RUN rm -rf Python-3.7.0* + +# Install requirements. RUN apt-get install -qqy libcairo2-dev libjpeg-dev libgif-dev COPY requirements.txt requirements.txt RUN python3 -m pip install -r requirements.txt -WORKDIR /root -RUN rm -rf Python-3.7.0* RUN apt-get install -qqy ffmpeg - -ENV TZ=America/Los_Angeles -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN apt-get install -qqy apt-transport-https RUN apt-get install -qqy texlive-full RUN apt-get install -qqy sox
fix: use '>' instead of 'gt' in j2 templates Fixes:
@@ -61,7 +61,7 @@ allow {{ networks[nic.network]['subnet'] }}/{{ networks[nic.network]['prefix'] } # Serve time even if not synchronized to a time source. # Fairly unreliable time source {% if iceberg_level is defined and iceberg_level is not none %} - {% if (iceberg_level|int) is gt 3 %} + {% if (iceberg_level|int) > 3 %} local stratum 15 {% else %} local stratum {{ 12+(iceberg_level|int) }}
TST: stats: Fix the expected r-value of a linregress test. The test TestRegression.test_nist_norris() in stats/test_stats.py uses the "Norris" data set from The certified r-squared value is 0.999993745883712. `stats.linregress` returns the r-value (not squared), so we must square it before comparing it to the certified value.
@@ -1163,13 +1163,13 @@ class TestRegression(object): # Expected values exp_slope = 1.00211681802045 exp_intercept = -0.262323073774029 - exp_rvalue = 0.999993745883712 + exp_rsquared = 0.999993745883712 actual = stats.linregress(x, y) assert_almost_equal(actual.slope, exp_slope) assert_almost_equal(actual.intercept, exp_intercept) - assert_almost_equal(actual.rvalue, exp_rvalue, decimal=5) + assert_almost_equal(actual.rvalue**2, exp_rsquared) def test_empty_input(self): assert_raises(ValueError, stats.linregress, [], [])
Prepare 2.5.2rc3. [ci skip-rust] [ci skip-build-wheels]
See https://www.pantsbuild.org/v2.5/docs/release-notes-2-5 for an overview of the changes in this release series. +## 2.5.2rc3 (Aug 16, 2021) + +### Bug fixes + +* Fix shlexing of passthrough args. (cherrypick of #12547) ([#12550](https://github.com/pantsbuild/pants/pull/12550)) + ## 2.5.2rc2 (Aug 06, 2021) ### Bug fixes
Update generic.txt Moving + Dedup of ```smokealoader```
@@ -8739,18 +8739,6 @@ microsoft-hohm.space quickmaildrive.com -# Reference: https://twitter.com/malwrhunterteam/status/1247931172811874305 -# Reference: https://app.any.run/tasks/15f42296-0d96-4536-a255-04105ec7339d/ -# Reference: https://www.virustotal.com/gui/file/d3c075c5c6d9c6e8fcfda4a408c5bd8f5fc4c6ff6acf339293c50f72f89f585f/detection - -scproducts7.ru -informatioshopname.ru -yamaha.ug -crocopexpire.ug -opetileon.ru -siciliyaopartion.ru -amfibiyapolyakova.com - # Reference: https://twitter.com/JayTHL/status/1247971248291880962 medicacademic.com/aza/
Fixed: Under Linux with TK the column number in the editor does not change during editing Now, only row and character number is displayed.
@@ -345,8 +345,9 @@ if TOOLKIT in (GTK, GTKSOURCEVIEW): else: col += 1 start.forward_char() - pos_label.set_text('char: %d, line: %d, column: %d' % (nchars, row, col + 1)) + else: + pos_label.set_text('char: %d, line: %d' % (nchars, row)) @staticmethod def load_file(text_buffer, path):
test(TestLoginLogout): mark as a user test Also remove the initial login status check. RequireUserMixin will login to the site automatically.
@@ -3660,17 +3660,13 @@ class TestLoginLogout(DefaultSiteTestCase): """Test for login and logout methods.""" - @unittest.skipIf(os.environ.get('APPVEYOR', 'false') in ('true', 'True'), - 'No user defined for APPVEYOR tests') + user = True + def test_login_logout(self): """Validate login and logout methods by toggling the state.""" site = self.get_site() loginstatus = pywikibot.site.LoginStatus - self.assertFalse(site.logged_in()) - self.assertEqual(site._loginstatus, loginstatus.NOT_ATTEMPTED) - - site.login() self.assertTrue(site.logged_in()) self.assertIn(site._loginstatus, (loginstatus.IN_PROGRESS, loginstatus.AS_USER))
MNT: rename self._task -> self._task_fut run_coroutine_threadsafe returns a future.Future, not the task it's self. Suspect this is because the tasks are not thread-safe so this is protecting us from an obvious foot-cannon.
@@ -323,6 +323,7 @@ class RunEngine: self._exit_status = 'success' # optimistic default self._reason = '' # reason for abort self._task = None # asyncio.Task associated with call to self._run + self._task_fut = None # asyncio.Task associated with call to self._run self._status_tasks = deque() # from self._status_object_completed self._pardon_failures = None # will hold an asyncio.Event self._plan = None # the plan instance from __call__ @@ -541,6 +542,7 @@ class RunEngine: self._exit_status = 'success' self._reason = '' self._task = None + self._task_fut = None self._status_tasks.clear() self._pardon_failures = asyncio.Event(loop=self.loop) self._plan = None @@ -767,13 +769,13 @@ class RunEngine: self._blocking_event.clear() self.log.info("Executing plan %r", self._plan) - self._task = asyncio.run_coroutine_threadsafe(self._run(), + self._task_fut = asyncio.run_coroutine_threadsafe(self._run(), loop=self.loop) def set_blocking_event(future): self._blocking_event.set() - self._task.add_done_callback(set_blocking_event) + self._task_fut.add_done_callback(set_blocking_event) try: # Block until plan is complete or exception is raised. @@ -786,14 +788,14 @@ class RunEngine: self._interrupted = True self._blocking_event.wait(1) except Exception as raised_er: - self._task.cancel() + self._task_fut.cancel() self._interrupted = True raise raised_er finally: - if self._task.done(): + if self._task_fut.done(): # get exceptions from the main task try: - exc = self._task.exception() + exc = self._task_fut.exception() except (asyncio.CancelledError, concurrent.futures.CancelledError): exc = None @@ -869,7 +871,7 @@ class RunEngine: with ExitStack() as stack: for mgr in self.context_managers: stack.enter_context(mgr(self)) - if self._task.done(): + if self._task_fut.done(): return # Clear the blocking Event so that we can wait on it below. @@ -882,10 +884,10 @@ class RunEngine: # Block until plan is complete or exception is raised. self._during_task(self._blocking_event) finally: - if self._task.done(): + if self._task_fut.done(): # get exceptions from the main task try: - exc = self._task.exception() + exc = self._task_fut.exception() except asyncio.CancelledError: exc = None # if the main task exception is not None, re-raise
Report if there are any errors in local_settings. This will issue a `UserWarning` in case some kind of exception is being raised inside local_settings.py
@@ -149,5 +149,9 @@ INBOUND_EMAIL_VALIDATION_KEY = 'totally-unsecure-validation-string' # If you have settings you want to overload, put them in a local_settings.py. try: from local_settings import * # noqa -except ImportError: - pass +except ImportError as exc: + import warnings + import traceback + + warnings.warn('Could not import local_settings module. {}'.format( + traceback.format_exc()))
Guard against 'aws._profile_env_var' getting overwritten by controller tests CR:
@@ -84,6 +84,7 @@ class TestProfileSelection(unittest.TestCase): def run(self, result=None): aws._flush() + aws._profile_env_var = 'AWS_EB_PROFILE' aws._region_name = 'us-west-2' self.root_dir = os.getcwd() if os.path.exists('testDir'):