message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Adds date format on `get_companies()`
Necessary for irregular companies classifier | @@ -47,4 +47,6 @@ class Dataset:
dtype={'cnpj': np.str},
low_memory=False)
dataset['cnpj'] = dataset['cnpj'].str.replace(r'\D', '')
+ dataset['situation_date'] = pd.to_datetime(dataset['situation_date'],
+ errors='coerce')
return dataset
|
satellite: update tests to v2.0.0
Fixes as renames were done in a different commit. | @@ -3,7 +3,7 @@ import unittest
from satellite import tree_from_traversals
-# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
+# Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0
class SatelliteTest(unittest.TestCase):
def test_empty_tree(self):
|
allergies: Update test cases
Updates the tests according to the canonical test data
and stores the test data version. | @@ -2,8 +2,14 @@ import unittest
from allergies import Allergies
+# Python 2/3 compatibility
+if not hasattr(unittest.TestCase, 'assertCountEqual'):
+ unittest.TestCase.assertCountEqual = unittest.TestCase.assertItemsEqual
-class AllergiesTests(unittest.TestCase):
+
+# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0
+
+class AllergiesTests():
def test_no_allergies_means_not_allergic(self):
allergies = Allergies(0)
self.assertFalse(allergies.is_allergic_to('peanuts'))
@@ -13,7 +19,7 @@ class AllergiesTests(unittest.TestCase):
def test_is_allergic_to_eggs(self):
self.assertTrue(Allergies(1).is_allergic_to('eggs'))
- def test_has_the_right_allergies(self):
+ def test_allergic_to_eggs_in_addition_to_other_stuff(self):
allergies = Allergies(5)
self.assertTrue(allergies.is_allergic_to('eggs'))
self.assertTrue(allergies.is_allergic_to('shellfish'))
@@ -22,19 +28,43 @@ class AllergiesTests(unittest.TestCase):
def test_no_allergies_at_all(self):
self.assertEqual(Allergies(0).lst, [])
+ def test_allergic_to_just_eggs(self):
+ self.assertEqual(Allergies(1).lst, ['eggs'])
+
def test_allergic_to_just_peanuts(self):
self.assertEqual(Allergies(2).lst, ['peanuts'])
+ def test_allergic_to_just_strawberries(self):
+ self.assertEqual(Allergies(8).lst, ['strawberries'])
+
+ def test_allergic_to_eggs_and_peanuts(self):
+ self.assertCountEqual(Allergies(3).lst, ['eggs', 'peanuts'])
+
+ def test_allergic_to_more_than_eggs_but_not_peanuts(self):
+ self.assertCountEqual(Allergies(5).lst, ['eggs', 'shellfish'])
+
+ def test_allergic_to_lots_of_stuff(self):
+ self.assertCountEqual(
+ Allergies(248).lst,
+ ['strawberries', 'tomatoes', 'chocolate', 'pollen', 'cats'])
+
def test_allergic_to_everything(self):
- self.assertEqual(
- sorted(Allergies(255).lst),
- sorted(('eggs peanuts shellfish strawberries tomatoes '
- 'chocolate pollen cats').split()))
+ self.assertCountEqual(
+ Allergies(255).lst, [
+ 'eggs', 'peanuts', 'shellfish', 'strawberries', 'tomatoes',
+ 'chocolate', 'pollen', 'cats'
+ ])
- @unittest.skip('Extra Credit: Passes with a specific type of solution')
- def test_ignore_non_allergen_score_parts(self):
+ def test_ignore_non_allergen_score_parts_only_eggs(self):
self.assertEqual(Allergies(257).lst, ['eggs'])
+ def test_ignore_non_allergen_score_parts(self):
+ self.assertCountEqual(
+ Allergies(509).lst, [
+ 'eggs', 'shellfish', 'strawberries', 'tomatoes', 'chocolate',
+ 'pollen', 'cats'
+ ])
+
if __name__ == '__main__':
unittest.main()
|
[Triggers] Fix running openstack actions via triggers
Closes-Bug: | @@ -64,10 +64,18 @@ def create_context(trust_id, project_id):
if CONF.pecan.auth_enable:
client = keystone.client_for_trusts(trust_id)
+ if client.session:
+ # Method get_token is deprecated, using get_auth_headers.
+ token = client.session.get_auth_headers().get('X-Auth-Token')
+ user_id = client.session.get_user_id()
+ else:
+ token = client.auth_token
+ user_id = client.user_id
+
return auth_ctx.MistralContext(
- user=client.user_id,
+ user=user_id,
tenant=project_id,
- auth_token=client.auth_token,
+ auth_token=token,
is_trust_scoped=True,
trust_id=trust_id,
)
|
Use extras when running the test server
Make sure that `travis_moto_server.sh` script
actually installs `all` and `server` extras. | #!/usr/bin/env bash
set -e
-pip install flask
# TravisCI on bionic dist uses old version of Docker Engine
# which is incompatibile with newer docker-py
# See https://github.com/docker/docker-py/issues/2639
pip install "docker>=2.5.1,<=4.2.2"
-pip install /moto/dist/moto*.gz
+pip install $(ls /moto/dist/moto*.gz)[server,all]
moto_server -H 0.0.0.0 -p 5000
|
Fix range description in `suggest_float` docstring
Also fixes the same piece of docstring in deprecated `suggest` APIs as
those point to `suggest_float` now | @@ -127,12 +127,7 @@ class Trial(BaseTrial):
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
- Upper endpoint of the range of suggested values. ``high`` is excluded from the
- range.
-
- .. note::
- If ``step`` is specified, ``high`` is included as well as ``low``.
-
+ Upper endpoint of the range of suggested values. ``high`` is included in the range.
step:
A step of discretization.
@@ -191,8 +186,7 @@ class Trial(BaseTrial):
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
- Upper endpoint of the range of suggested values. ``high`` is excluded from the
- range.
+ Upper endpoint of the range of suggested values. ``high`` is included in the range.
Returns:
A suggested float value.
@@ -214,8 +208,7 @@ class Trial(BaseTrial):
low:
Lower endpoint of the range of suggested values. ``low`` is included in the range.
high:
- Upper endpoint of the range of suggested values. ``high`` is excluded from the
- range.
+ Upper endpoint of the range of suggested values. ``high`` is included in the range.
Returns:
A suggested float value.
|
Tests: Output command that failed during coverage taking
* This makes it unnecessary to attempt to reconstruct what happened
from flags given. | @@ -641,6 +641,8 @@ Taking coverage of '{filename}' using '{python}' with flags {args} ...""".format
nuitka_cmd1
)
+ python_path_used = os.environ["PYTHONPATH"]
+
if exit_nuitka1 != 0:
if (
not expect_failure
@@ -649,12 +651,12 @@ Taking coverage of '{filename}' using '{python}' with flags {args} ...""".format
):
sys.exit(
"""\
-Error, failed to take coverage with '%s'.
+Error, failed to take coverage with '%s' (PYTHONPATH was '%s').
Stderr was:
%s
"""
- % (os.environ["PYTHON"], stderr_nuitka1)
+ % (nuitka_cmd1, python_path_used, stderr_nuitka1)
)
exit_nuitka = exit_nuitka1
|
Update README.md
clear explanation for running clusters on testnet/private net | @@ -58,6 +58,7 @@ To activate the virtual environment
```bash
source ~/virtualenv/qc/bin/activate
+# the rest of the tutorial assumes virtual environment
```
Install rocksdb which is required by the `python-rocksdb` module in the next step
@@ -78,7 +79,7 @@ pip install -e .
Once all the modules are installed, try running all the unit tests under `pyquarkchain`
```
-python -m pytest quarkchain
+python -m pytest
```
## Development Flow
@@ -92,30 +93,35 @@ pre-commit install
[black](https://github.com/ambv/black) is used to format modified python code, which will be automatically triggered on new commit after running the above commands. Refer to [STYLE](https://github.com/QuarkChain/pyquarkchain/blob/master/STYLE) for coding style suggestions.
-## Cluster Launch
+## Joining Testnet
-### Run a private cluster on the QuarkChain testnet 2.0
-If you are on a private network (e.g. running QuarkChain clusters from a laptop which connects to a router), you need to first setup [port forwarding](https://github.com/QuarkChain/pyquarkchain/wiki/Private-Network-Setting%2C-Port-Forwarding) for UDP/TCP 38291.
+Please check [Testnet2-Schedule](https://github.com/QuarkChain/pyquarkchain/wiki/Testnet2-Schedule) for updates and schedule.
+
+### Running a cluster to join QuarkChain testnet 2.0
+If you are on a private network (e.g. running from a laptop which connects to the Internet through a router), you need to first setup [port forwarding](https://github.com/QuarkChain/pyquarkchain/wiki/Private-Network-Setting%2C-Port-Forwarding) for UDP/TCP 38291.
Then fill in your own coinbase address and [bootstrap a cluster](https://github.com/QuarkChain/pyquarkchain/wiki/Run-a-Private-Cluster-on-the-QuarkChain-Testnet-2.0) on QuarkChain Testnet 2.0.
We provide the [demo implementation of CPU mining software](https://github.com/QuarkChain/pyquarkchain/wiki/Demo-Implementation-of-CPU-Mining). Please refer to [QuarkChain mining](https://github.com/QuarkChain/pyquarkchain/wiki/Introduction-of-Mining-Algorithms) for more details.
-### Run a single cluster
-Start running a cluster. The default cluster has 8 shards and 4 slaves.
+### Running a single cluster for local testing
+Start running a local cluster which does not connect to anyone else. The default cluster has 8 shards and 4 slaves.
```bash
cd quarkchain/cluster
pypy3 cluster.py
+# add --start_simulated_mining to mine blocks with simulated mining (does not run any hash algorithms)
```
-### Run multiple clusters
+### Running multiple clusters for local testing
Run multiple clusters with P2P network on a single machine with *simulated* mininig:
```bash
pypy3 multi_cluster.py --num_clusters=3 --p2p --start_simulated_mining
```
-### Run multiple clusters with P2P network on different machines.
+### Running multiple clusters with P2P network on different machines
+NOTE this is effectively a private network. If you would like to join our testnet or mainnet, look back a few sections for instructions.
+
Just follow the same command to run single cluster and provide `--bootnodes` flag to discover and connect to other clusters. Make sure ports are open and accessible from outside world: this means if you are running on AWS, open the ports (default both UDP and TCP 38291) in security group; if you are running from a LAN (connecting to the internet through a router), you need to setup port forwarding for UDP/TCP 38291. We have a convenience UPNP module as well, but you will need to check if it has successfully set port forwarding.
(Optional) Not needed if you are joining a testnet or mainnet. If you are starting your own network, first start the bootstrap cluster:
|
Make "practice" the default org type for location endpoint
This was the behaviour under the old API and we are still getting
requests which expect this. | @@ -11,7 +11,9 @@ import api.view_utils as utils
@api_view(['GET'])
def org_location(request, format=None):
- org_type = request.GET.get('org_type', '')
+ # We make practice the default org type for compatibility with the previous
+ # API
+ org_type = request.GET.get('org_type', 'practice')
centroids = request.GET.get('centroids', '')
org_codes = utils.param_to_list(request.GET.get('q', ''))
if org_type == 'practice':
|
children_crossing: process /watch?=xxx&list=xxx as playlists
Resolves | @@ -1370,8 +1370,13 @@ class MusicBot(discord.Client):
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
- if matchUrl is None:
- song_url = song_url.replace('/', '%2F')
+ song_url = song_url.replace('/', '%2F') if matchUrl is None else song_url
+
+ # Rewrite YouTube playlist URLs if the wrong URL type is given
+ playlistRegex = r'watch\?v=.+&(list=[^&]+)'
+ matches = re.search(playlistRegex, song_url)
+ groups = matches.groups() if matches is not None else []
+ song_url = "https://www.youtube.com/playlist?" + groups[0] if len(groups) > 0 else song_url
if song_url.startswith('spotify:'): # treat it as probably a spotify URI
if self.config._spotify:
|
Added mention of collections keywords to list of API keywords
I'm not sure if I should includre 'Generate Test Data', since it doesn't
directly call the salesforce API. However, its main use is to create
data to be passed to the collection keywords. | @@ -286,6 +286,10 @@ API Keywords
In addition to browser interactions, the Salesforce Library also provides the following keywords for interacting with the Salesforce REST API:
+* **Salesforce Collection Insert**: used for bulk creation of objects
+ based on a template
+* **Salesforce Collection Update**: used for the bulk updating of
+ objects
* **Salesforce Delete**: Deletes a record using its type and ID
* **Salesforce Get**: Gets a dictionary of a record from its ID
* **Salesforce Insert**: Inserts a record using its type and field values. Returns the ID.
|
update conditions to run disconnected_buildings_heating_main
only run it when space heating is presented in a district | @@ -29,13 +29,13 @@ def disconnected_building_main(locator, total_demand, config, prices, lca):
"""
# local variables
- #TODO: This will do it in Singapore too, so watch-out...
buildings_name_with_heating = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr')
+ buildings_name_with_space_heating = get_building_names_with_load(total_demand, load_name='Qhs_sys_MWhyr')
buildings_name_with_cooling = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr')
# calculate substations
- if (buildings_name_with_heating != [] and config.data_helper.region != 'SG'): #FIXME: temporal fix to avoid heating calculation in SG
+ if (buildings_name_with_heating != [] and buildings_name_with_space_heating != []):
decentralized_buildings_heating.disconnected_buildings_heating_main(locator, total_demand,
buildings_name_with_heating,
config, prices, lca)
|
virt.init: move enable_qcow to disks parameter
enable_qcow is rather badly named since it doesn't tell the user what
that actually does. Thanks to the new disks parameter, this option can
now be set on a per-disk basis in the disks structure using a new
overlay_image property.
enable_qcow is now marked as deprecated | @@ -743,7 +743,7 @@ def _qemu_image_create(vm_name,
disk_image=None,
disk_size=None,
disk_type='qcow2',
- enable_qcow=False,
+ create_overlay=False,
saltenv='base'):
'''
Create the image file using specified disk_size or/and disk_image
@@ -782,7 +782,7 @@ def _qemu_image_create(vm_name,
imageinfo = salt.utils.yaml.safe_load(res)
qcow2 = imageinfo['file format'] == 'qcow2'
try:
- if enable_qcow and qcow2:
+ if create_overlay and qcow2:
log.info('Cloning qcow2 image %s using copy on write', sfn)
__salt__['cmd.run'](
'qemu-img create -f qcow2 -o backing_file={0} {1}'
@@ -1128,6 +1128,19 @@ def init(name,
:param enable_qcow:
``True`` to create a QCOW2 overlay image, rather than copying the image
(Default: ``False``).
+
+ Deprecated in favor of ``disks`` parameter. Add the following to the disks
+ definitions to create an overlay image of a template disk image with an
+ image set:
+
+ .. code-block:: python
+
+ {
+ 'name': 'name_of_disk_to_change',
+ 'overlay_image': True
+ }
+
+ .. deprecated:: Fluorine
:param pool:
Path of the folder where the image files are located for vmware/esx hypervisors.
@@ -1224,6 +1237,10 @@ def init(name,
Path to the image to use for the disk. If no image is provided, an empty disk will be created
(Default: ``None``)
+ overlay_image
+ ``True`` to create a QCOW2 disk image with ``image`` as backing file. If ``False``
+ the file pointed to by the ``image`` property will simply be copied. (Default: ``False``)
+
.. _init-graphics-def:
.. rubric:: Graphics Definition
@@ -1377,6 +1394,16 @@ def init(name,
disk_image = args.get('image', None)
disk_size = args.get('size', None)
disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
+ create_overlay = enable_qcow
+ if create_overlay:
+ salt.utils.versions.warn_until(
+ 'Sodium',
+ '\'enable_qcow\' parameter has been deprecated. Rather use the \'disks\' '
+ 'parameter to override or define the image. \'enable_qcow\' will be removed '
+ 'in {version}.'
+ )
+ else:
+ create_overlay = args.get('overlay_image', False)
img_dest = _qemu_image_create(
vm_name=name,
@@ -1384,7 +1411,7 @@ def init(name,
disk_image=disk_image,
disk_size=disk_size,
disk_type=disk_type,
- enable_qcow=enable_qcow,
+ create_overlay=create_overlay,
saltenv=saltenv,
)
|
Mark complex cycle grpc server watch tests as skipped
Summary: Title
Test Plan: none
Reviewers: prha | import time
+import pytest
from dagster.grpc.client import DagsterGrpcClient
from dagster.grpc.server import open_server_process
from dagster.grpc.server_watcher import create_grpc_watch_thread
@@ -154,6 +155,7 @@ def should_not_be_called(*args, **kwargs):
assert called["on_error"]
[email protected]
def test_grpc_watch_thread_server_complex_cycle():
# Server goes down, comes back up as the same server three times, then goes away and comes
# back as a new server
@@ -216,6 +218,7 @@ def on_error():
assert events[-1] == "on_updated"
[email protected]
def test_grpc_watch_thread_server_complex_cycle_2():
# Server goes down, comes back up as the same server three times, then goes away and comes
# back as a new server
|
Remove line that calls get_tags() method
The tags have now been shifted from the database to being static files and hence the get_tags()
method has undergone changes. It now dosen't fetch from the database but looks at the local files
and we need not call it more than once. | @@ -97,8 +97,6 @@ class Tags(Cog):
`predicate` will be the built-in any, all, or a custom callable. Must return a bool.
"""
- await self._get_tags()
-
keywords_processed: List[str] = []
for keyword in keywords.split(','):
keyword_sanitized = keyword.strip().casefold()
|
Make log about queue management duplicate thread be an error
If this case is firing, then something is wrong. | @@ -499,7 +499,7 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
logger.debug("Started queue management thread")
else:
- logger.debug("Management thread already exists, returning")
+ logger.error("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
|
Adjust video frame control flow to not require `with gil`.
This breaks subinterpreters. See | @@ -83,6 +83,9 @@ cdef class VideoFrame(Frame):
self._init(c_format, width, height)
cdef _init(self, lib.AVPixelFormat format, unsigned int width, unsigned int height):
+
+ cdef int res = 0
+
with nogil:
self.ptr.width = width
self.ptr.height = height
@@ -93,17 +96,18 @@ cdef class VideoFrame(Frame):
# We enforce aligned buffers, otherwise `sws_scale` can perform
# poorly or even cause out-of-bounds reads and writes.
if width and height:
- ret = lib.av_image_alloc(
+ res = lib.av_image_alloc(
self.ptr.data,
self.ptr.linesize,
width,
height,
format,
16)
- with gil:
- err_check(ret)
self._buffer = self.ptr.data[0]
+ if res:
+ err_check(res)
+
self._init_user_attributes()
cdef _init_user_attributes(self):
|
Update import path for get_package_repo_data.
refactored
this function out of common but this script didn't get the updated
import path. | @@ -27,10 +27,10 @@ from ros_buildfarm.argument import add_argument_os_code_name
from ros_buildfarm.argument import add_argument_os_name
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.common import get_os_package_name
-from ros_buildfarm.common import get_package_repo_data
from ros_buildfarm.common import Target
from ros_buildfarm.config import get_index as get_config_index
from ros_buildfarm.config import get_release_build_files
+from ros_buildfarm.package_repo import get_package_repo_data
from rosdistro import get_distribution_file
from rosdistro import get_index
|
BoolWidget : Fix unwanted horizontal expansion
This could cause fixed-width value widgets to be right-aligned in NameValuePlugValueWidget. This was made apparent by which made the OpenGLAttributes `maxTextureResolution` widget fixed-width.
Fixes
BoolWidget : Fixed unwanted horizontal expansion. | @@ -148,6 +148,10 @@ class _CheckBox( QtWidgets.QCheckBox ) :
self.__hitMode = self.HitMode.CheckBox
+ self.setSizePolicy( QtWidgets.QSizePolicy(
+ QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed
+ ) )
+
def setHitMode( self, hitMode ) :
self.__hitMode = hitMode
|
Fixes testing 11.5.4 in daemon log settings
Issues:
Fixes
Problem:
Daemon log settings were incorrectly testing 11.5.4
Analysis:
This adds a skipif
Tests:
functional | # limitations under the License.
#
+import pytest
+from distutils.version import LooseVersion
+
def setup_daemon_log_settings_clusterd_test(request, mgmt_root):
def teardown():
@@ -103,6 +106,10 @@ class TestDaemon_Log_Settings(object):
daemon2.refresh()
assert daemon1.logLevel == daemon2.logLevel
+ @pytest.mark.skipif(
+ LooseVersion(pytest.config.getoption('--release')) < LooseVersion('11.6.0'),
+ reason='Needs v11.6.0 TMOS or greater to pass.'
+ )
def test_icrd_RUL(self, request, mgmt_root):
# Load
daemon1 = setup_daemon_log_settings_icrd_test(request, mgmt_root)
|
click signal did not work, replace it with MousePressEvent
Probably a problem with context menu blocking signal | @@ -338,9 +338,8 @@ class CopySingleCellAction(qt.QAction):
"""QAction to copy text from a single cell in a modified
:class:`QTableWidget`.
- This action relies on the fact that the row and column coordinates
- of the last click are stored in :attr:`_last_cell_clicked` of the
- modified widget.
+ This action relies on the fact that the text in the last clicked cell
+ are stored in :attr:`_last_cell_clicked` of the modified widget.
In most cases, :class:`CopySelectedCellsAction` handles single cells,
but if the selection mode of the widget has been set to NoSelection
@@ -361,15 +360,10 @@ class CopySingleCellAction(qt.QAction):
def copyCellToClipboard(self):
"""
"""
- selected_idx = self.table._last_cell_clicked
- if selected_idx is None or len(selected_idx) != 2:
+ cell_text = self.table._text_last_cell_clicked
+ if cell_text is None:
return
- row, col = selected_idx
-
- qindex = self.table.model().index(row, col)
- cell_text = self.table.model().data(qindex)
-
# put this text into clipboard
qapp = qt.QApplication.instance()
qapp.clipboard().setText(cell_text)
@@ -397,8 +391,7 @@ class TableWidget(qt.QTableWidget):
"""
def __init__(self, parent=None, cut=False, paste=False):
super(TableWidget, self).__init__(parent)
- self._last_cell_clicked = None
- self.cellClicked.connect(self._remember_row_column)
+ self._text_last_cell_clicked = None
self.copySelectedCellsAction = CopySelectedCellsAction(self)
self.copyAllCellsAction = CopyAllCellsAction(self)
@@ -416,8 +409,10 @@ class TableWidget(qt.QTableWidget):
self.setContextMenuPolicy(qt.Qt.ActionsContextMenu)
- def _remember_row_column(self, row, column):
- self._last_cell_clicked = (row, column)
+ def mousePressEvent(self, event):
+ item = self.itemAt(event.pos())
+ self._text_last_cell_clicked = item.text()
+ super(TableWidget, self).mousePressEvent(event)
def enablePaste(self):
"""Enable paste action, to paste data from the clipboard into the
@@ -495,8 +490,7 @@ class TableView(qt.QTableView):
"""
def __init__(self, parent=None, cut=False, paste=False):
super(TableView, self).__init__(parent)
- self._last_cell_clicked = None
- self.cellClicked.connect(self._remember_row_column)
+ self._text_last_cell_clicked = None
self.cut = cut
self.paste = paste
@@ -508,8 +502,11 @@ class TableView(qt.QTableView):
self.cutSelectedCellsAction = None
self.cutAllCellsAction = None
- def _remember_row_column(self, row, column):
- self._last_cell_clicked = (row, column)
+ def mousePressEvent(self, event):
+ qindex = self.indexAt(event.pos())
+ if self.copyAllCellsAction is not None: # model was set
+ self._text_last_cell_clicked = self.model().data(qindex)
+ super(TableView, self).mousePressEvent(event)
def setModel(self, model):
"""Set the data model for the table view, activate the actions
|
Release: Added classifiers to setup.
* Seems that "landscape.io" might use these to derive the supported
Python versions, so let's have that.
* Also might give Nuitka better description on PyPI. | @@ -206,6 +206,53 @@ setup(
name = project_name,
license = "Apache License, Version 2.0",
version = version,
+ classifiers = [
+ # Nuitka is mature even
+ "5 - Production/Stable",
+
+ # Indicate who Nuitka is for
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+
+ # Nuitka is a compiler and a build tool as such.
+ "Topic :: Software Development :: Compilers",
+ "Topic :: Software Development :: Build Tools",
+
+ # Is has a weak subset of PyLint, but aims for more long term
+ "Topic :: Software Development :: Quality Assurance",
+
+ # Nuitka standalone mode aims at distribution
+ "Topic :: System :: Software Distribution",
+
+ # Python2 supported versions.
+ "Programming Language :: Python :: 2.6",
+ "Programming Language :: Python :: 2.7",
+
+ # Python3 supported versions.
+ "Programming Language :: Python :: 3.2",
+ "Programming Language :: Python :: 3.3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+
+ # We depend on CPython.
+ "Programming Language :: Python :: Implementation :: CPython",
+
+ # We generate C intermediate code and implement part of the
+ # run time environment in C. Actually C11.
+ "Programming Language :: C",
+
+ # Supported OSes are many
+ "Operating System :: POSIX :: Linux",
+ "Operating System :: POSIX :: BSD :: FreeBSD",
+ "Operating System :: POSIX :: BSD :: NetBSD",
+ "Operating System :: POSIX :: BSD :: OpenBSD",
+ "Operating System :: Microsoft :: Windows",
+
+ # License
+ "License :: OSI Approved :: Apache Software License",
+
+ ],
packages = findNuitkaPackages(),
scripts = scripts,
cmdclass = cmdclass,
|
Permission to read new CRD, Hosts and LogService
Added newly added CRD to CRD rbac | @@ -29,6 +29,8 @@ rules:
- filters.getambassador.io
- filterpolicies.getambassador.io
- ratelimits.getambassador.io
+ - hosts.getambassador.io
+ - logservices.getambassador.io
verbs: ["get", "list", "watch", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
|
fix iteration over column dict in table
Currently, iterating over a Row in a Table with columns that aren't labeled '0', '1', '2', etc., will fail, making it appear as if every Row is empty. This is fixed here, so that the actual OrderedDict of column names will be iterated over. | @@ -61,10 +61,8 @@ class Table(awkward.array.base.AwkwardArray):
def __iter__(self, checkiter=True):
if checkiter:
self._table._checkiter()
- i = 0
- while str(i) in self._table._contents:
- yield self._table._contents[str(i)][self._index]
- i += 1
+ for i in self._table._contents:
+ yield self._table._contents[i][self._index]
def __getitem__(self, where):
if isinstance(where, awkward.util.string):
|
ENH: added `data_dir` attribute
Added a data directory attribute that makes it possible to specify an absolute path for files not included in the local pysat data directories. | @@ -390,7 +390,13 @@ class Instrument(object):
# Assign an absolute path for files that may not be part of the
# standard pysat directory structure
- self.data_dir = data_dir if os.path.isdir(data_dir) else None
+ if os.path.isdir(data_dir):
+ self.data_dir = data_dir
+ else:
+ if len(data_dir) > 0:
+ logger.warning("data directory doesn't exist: {:}".format(
+ data_dir))
+ self.data_dir = None
# Check to make sure value is reasonable
if self.file_format is not None:
|
Minor style correction
no-tn-check | @@ -28,7 +28,9 @@ package body Langkit_Support.Adalog.Abstract_Relation is
Put_Line ("Press enter to continue ..");
declare
Dummy : String := Ada.Text_IO.Get_Line;
- begin null; end;
+ begin
+ null;
+ end;
end if;
end Wait;
|
Remove date handling function
We never call the bubble endpoint without supplying a date so there's no
need to support this. | -import datetime
-
from django.db import connection
from django.shortcuts import get_object_or_404
@@ -8,7 +6,6 @@ from rest_framework.response import Response
from rest_framework.exceptions import APIException
from common.utils import nhs_titlecase
-from frontend.models import ImportLog
from frontend.models import Practice, PCT, STP, RegionalTeam, PCN
from frontend.price_per_unit.prescribing_breakdown import (
get_prescribing,
@@ -32,17 +29,6 @@ class NotValid(APIException):
default_detail = "The code you provided is not valid"
-def _valid_or_latest_date(date):
- if date:
- try:
- date = datetime.datetime.strptime(date, "%Y-%m-%d").date()
- except ValueError:
- raise NotValid("%s is not a valid date" % date)
- else:
- date = ImportLog.objects.latest_in_category("prescribing").current_at
- return date
-
-
def _get_org_or_404(org_code, org_type=None):
if not org_type and org_code:
org_type = "ccg" if len(org_code) == 3 else "practice"
@@ -62,9 +48,11 @@ def bubble(request, format=None):
use in Highcharts bubble chart.
"""
code = request.query_params.get("bnf_code", "")
- date = _valid_or_latest_date(request.query_params.get("date", None))
+ date = request.query_params.get("date")
highlight = request.query_params.get("highlight", None)
focus = request.query_params.get("focus", None) and highlight
+ if not date:
+ raise NotValid("You must supply a date")
if highlight:
highlight_org_id = highlight
@@ -82,7 +70,7 @@ def bubble(request, format=None):
org_type = "all_standard_practices"
org_id = None
- prescribing = get_prescribing(code, str(date))
+ prescribing = get_prescribing(code, date)
ppu_breakdown = get_ppu_breakdown(prescribing, org_type, org_id)
mean_ppu = get_mean_ppu(prescribing, highlight_org_type, highlight_org_id)
@@ -177,9 +165,9 @@ def price_per_unit(request, format=None):
entity_codes = [entity_code]
if bnf_code:
- results = get_savings_for_orgs(bnf_code, str(date), entity_type, entity_codes)
+ results = get_savings_for_orgs(bnf_code, date, entity_type, entity_codes)
else:
- results = get_all_savings_for_orgs(str(date), entity_type, entity_codes)
+ results = get_all_savings_for_orgs(date, entity_type, entity_codes)
# Fetch the names of all the orgs involved and prepare to reformat the
# response to match the old API
|
[MetaSchedule] Fix typo of compare between GlobalVar and str
fix typo of compare between GlobalVar and str | @@ -53,7 +53,7 @@ def mod(mod: Union[PrimFunc, IRModule]) -> IRModule: # pylint: disable=redefine
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
func_names = mod.get_global_vars()
(func_name,) = func_names
- if len(func_names) == 1 and func_name != "main":
+ if len(func_names) == 1 and func_name.name_hint != "main":
mod = IRModule({"main": mod[func_name]})
return mod
|
Actually address a comment made in
I made a small change addressing a nit in the PR mentioned in the commit
title but forgot to actually push the commit before clicking merge,
whoops. | @@ -193,9 +193,8 @@ class FileUploaderMixin:
)
ctx = get_report_ctx()
- if ctx is not None and widget_value:
serialized = serialize_file_uploader(widget_value)
-
+ if ctx is not None and len(serialized) != 0:
# The first number in the serialized widget_value list is the id
# of the most recently uploaded file.
newest_file_id = serialized[0]
|
Url conversion
test case addition for converting garbled url's into actuals | @@ -468,6 +468,8 @@ def test_slack_message_sanitization():
target_message_1 = "You can sit here if you want"
target_message_2 = "Hey, you can sit here if you want !"
target_message_3 = "Hey, you can sit here if you want!"
+ target_message_4 = "convert garbled url to vicdb-f.net"
+ target_message_5 = "convert multiple garbled url to vicdb-f.net. Also eemdb-p.net"
uid_token = f"<@{test_uid}>"
raw_messages = [
@@ -483,6 +485,9 @@ def test_slack_message_sanitization():
"You can sit here{uid}if you want",
"Hey {uid}, you can sit here if you want{uid}!",
"Hey{uid} , you can sit here if you want {uid}!",
+ "convert garbled url to <http://vicdb-f.net|vicdb-f.net>",
+ "convert multiple garbled url to <http://vicdb-f.net|vicdb-f.net>. Also <http://eemdb-p.net|eemdb-p.net>",
+
]
]
@@ -493,6 +498,8 @@ def test_slack_message_sanitization():
target_message_1,
target_message_2,
target_message_3,
+ target_message_4,
+ target_message_5,
]
sanitized_messages = [
@@ -512,7 +519,6 @@ def test_slack_message_sanitization():
== 0
)
-
def test_slack_init_one_parameter():
from rasa.core.channels.slack import SlackInput
|
Remove the local file in `test_download_dataset` before download
The local created file in `test_download_dataset`, which gets uploaded, is not
removed before the download. This results in a failing test, since it is cached
and thus does not need to be downloaded. | @@ -1039,6 +1039,9 @@ class TestBinRucio:
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
+
+ os.remove(tmp_file1)
+
# download dataset
cmd = 'rucio -v download --dir /tmp {0}'.format(tmp_dataset) # triming '/tmp/' from filename
print(self.marker + cmd)
|
Fix responsive issue with stream filter search.
This fixes the search input collapsing to a second line in between
1024px and 1033px width views. | @@ -955,13 +955,11 @@ form#add_new_subscription {
}
}
-@media (max-width: 1024px) {
+@media (max-width: 1033px) {
#search_stream_name {
display: none;
}
-}
-@media (max-width: 1000px) {
.search-container {
text-align: center;
}
|
remove legacy APIs from dagster_shell_tests
### Summary & Motivation
### How I Tested These Changes | from contextlib import contextmanager
import psutil
-from dagster import repository
+from dagster import job, op, repository
from dagster._core.storage.pipeline_run import DagsterRunStatus
from dagster._core.test_utils import instance_for_test, poll_for_finished_run, poll_for_step_start
from dagster._core.workspace.context import WorkspaceProcessContext
from dagster._core.workspace.load_target import PythonFileTarget
-from dagster._legacy import pipeline, solid
from dagster._utils import file_relative_path
from dagster_shell.utils import execute
-@solid
-def sleepy_solid(context):
+@op
+def sleepy_op(context):
# execute a sleep in the background
execute("sleep 60", "NONE", context.log)
-@pipeline
-def sleepy_pipeline():
- sleepy_solid()
+@job
+def sleepy_job():
+ sleepy_op()
@repository
def sleepy_repo():
- return [sleepy_pipeline]
+ return [sleepy_job]
@contextmanager
@@ -67,10 +66,10 @@ def test_terminate_kills_subproc():
external_pipeline = (
workspace.get_repository_location("test")
.get_repository("sleepy_repo")
- .get_full_external_job("sleepy_pipeline")
+ .get_full_external_job("sleepy_job")
)
pipeline_run = instance.create_run_for_pipeline(
- pipeline_def=sleepy_pipeline,
+ pipeline_def=sleepy_job,
external_pipeline_origin=external_pipeline.get_external_origin(),
pipeline_code_origin=external_pipeline.get_python_origin(),
)
@@ -87,7 +86,7 @@ def test_terminate_kills_subproc():
subproc_pid = poll_for_pid(instance, run_id)
assert psutil.pid_exists(subproc_pid)
- # simulate waiting a bit to terminate the pipeline
+ # simulate waiting a bit to terminate the job
time.sleep(0.5)
launcher = instance.run_launcher
|
Update advanced.rst
fix typo | @@ -179,7 +179,7 @@ Pipenv allows you to open any Python module that is installed (including ones in
$ pipenv open background
Opening '/Users/kennethreitz/.local/share/virtualenvs/hmm-mGOawwm_/src/background/background.py' in your EDITOR.
-This allows you to easily read the code your consuming, instead of looking it up on GitHub.
+This allows you to easily read the code you're consuming, instead of looking it up on GitHub.
.. note:: The ``EDITOR`` environment variable is used for this. If you're using Sublime Text, for example, you'll want to ``export EDITOR=subl`` (once you've installed the command-line utility).
|
Change default value for Activity.meta so it's easier to manipulate it in the decider
With this, we can have meta parameters assigned like that directly:
def foo():
pass
class MyWorkflow(Workflow):
def run(self):
foo.meta["a_key"] = "a_value"
self.submit(foo) | @@ -102,7 +102,7 @@ class Activity(object):
self.task_schedule_to_close_timeout = schedule_to_close_timeout
self.task_schedule_to_start_timeout = schedule_to_start_timeout
self.task_heartbeat_timeout = heartbeat_timeout
- self.meta = meta
+ self.meta = meta if meta is not None else {}
self.register()
|
Harmon to Deadline - fix for non alpha last character on write node
Number or non alpha (._) last characters in write node create problems.
Remove them as this value is internal and not used in final
publish either way | @@ -77,11 +77,11 @@ class CollectFarmRender(pype.lib.abstract_collect_render.
# is sequence start node on write node offsetting whole sequence?
expected_files = []
- # add '.' if last character of file prefix is a number
+ # remove last char if last character of file prefix is a number
file_prefix = info[0]
- last_char = file_prefix[-1]
- if str.isdigit(last_char):
- file_prefix += '.'
+ while not str.isalpha(file_prefix[-1]):
+ file_prefix = file_prefix[:-1]
+
for frame in range(start, end):
expected_files.append(
path / "{}{}.{}".format(
|
update resilience_stats/views.py
top level functions in views are meant to be endpoints | @@ -49,19 +49,6 @@ class ScenarioErrored(Exception):
pass
-def parse_system_sizes(site):
- size_dict = dict()
- if "Generator" in site:
- size_dict["Generator"] = site["Generator"]["size_kw"]
- if "Storage" in site:
- size_dict["Storage_kw"] = site["Storage"]["size_kw"]
- size_dict["Storage_kwh"] = site["Storage"]["size_kwh"]
- if "Wind" in site:
- size_dict["Wind"] = site["Wind"]["size_kw"]
- if "PV" in site:
- size_dict["PV"] = site["PV"]["size_kw"]
- return size_dict
-
def resilience_stats(request, run_uuid=None, financial_check=None):
"""
Run outage simulator for given run_uuid
@@ -75,6 +62,19 @@ def resilience_stats(request, run_uuid=None, financial_check=None):
"probs_of_surviving",
}
"""
+ def parse_system_sizes(site):
+ size_dict = dict()
+ if "Generator" in site:
+ size_dict["Generator"] = site["Generator"]["size_kw"]
+ if "Storage" in site:
+ size_dict["Storage_kw"] = site["Storage"]["size_kw"]
+ size_dict["Storage_kwh"] = site["Storage"]["size_kwh"]
+ if "Wind" in site:
+ size_dict["Wind"] = site["Wind"]["size_kw"]
+ if "PV" in site:
+ size_dict["PV"] = site["PV"]["size_kw"]
+ return size_dict
+
try:
uuid.UUID(run_uuid) # raises ValueError if not valid uuid
except ValueError as e:
|
Eliminate self.tasks[id] from app done callback
see | @@ -384,7 +384,7 @@ class DataFlowKernel(object):
self._send_task_log_info(task_record)
- def handle_app_update(self, task_id, future):
+ def handle_app_update(self, task_record, future):
"""This function is called as a callback when an AppFuture
is in its final state.
@@ -397,12 +397,14 @@ class DataFlowKernel(object):
"""
- if not self.tasks[task_id]['app_fu'].done():
+ task_id = task_record['id']
+
+ if not task_record['app_fu'].done():
logger.error("Internal consistency error: app_fu is not done for task {}".format(task_id))
- if not self.tasks[task_id]['app_fu'] == future:
+ if not task_record['app_fu'] == future:
logger.error("Internal consistency error: callback future is not the app_fu in task structure, for task {}".format(task_id))
- self.memoizer.update_memo(task_id, self.tasks[task_id], future)
+ self.memoizer.update_memo(task_id, task_record, future)
if self.checkpoint_mode == 'task_exit':
self.checkpoint(tasks=[task_id])
@@ -870,7 +872,7 @@ class DataFlowKernel(object):
task_def['task_launch_lock'] = threading.Lock()
- app_fu.add_done_callback(partial(self.handle_app_update, task_id))
+ app_fu.add_done_callback(partial(self.handle_app_update, task_def))
task_def['status'] = States.pending
logger.debug("Task {} set to pending state with AppFuture: {}".format(task_id, task_def['app_fu']))
|
SDK - Compiler - Add optional Argo validation
argo CLI tool must be in path for this feature to work | @@ -898,12 +898,6 @@ class Compiler(object):
yaml.Dumper.ignore_aliases = lambda *args : True
yaml_text = yaml.dump(workflow, default_flow_style=False, default_style='|')
- if '{{pipelineparam' in yaml_text:
- raise RuntimeError(
- 'Internal compiler error: Found unresolved PipelineParam. '
- 'Please create a new issue at https://github.com/kubeflow/pipelines/issues '
- 'attaching the pipeline code and the pipeline package.' )
-
if package_path is None:
return yaml_text
@@ -946,4 +940,32 @@ class Compiler(object):
params_list,
pipeline_conf)
self._write_workflow(workflow, package_path)
+ _validate_workflow(workflow)
+
+def _validate_workflow(workflow: dict):
+ workflow = workflow.copy()
+ # Working around Argo lint issue
+ for argument in workflow['spec'].get('arguments', {}).get('parameters', []):
+ if 'value' not in argument:
+ argument['value'] = ''
+
+ yaml_text = yaml.dump(workflow)
+ if '{{pipelineparam' in yaml_text:
+ raise RuntimeError(
+ '''Internal compiler error: Found unresolved PipelineParam.
+Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.'''
+ )
+
+ # Running Argo lint if available
+ import shutil
+ import subprocess
+ argo_path = shutil.which('argo')
+ if argo_path:
+ result = subprocess.run([argo_path, 'lint', '/dev/stdin'], input=yaml_text, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if result.returncode:
+ raise RuntimeError(
+ '''Internal compiler error: Compiler has produced Argo-incompatible workflow.
+Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.
+Error: {}'''.format(result.stderr)
+ )
|
Removes superfluous packages.
The apt package texlive-full already includes texlive-latex-base and
texlive-fonts-extra, so they don't have to installed separately. | @@ -27,9 +27,7 @@ RUN apt-get install -qqy ffmpeg
ENV TZ=America/Los_Angeles
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN apt-get install -qqy apt-transport-https
-RUN apt-get install -qqy texlive-latex-base
RUN apt-get install -qqy texlive-full
-RUN apt-get install -qqy texlive-fonts-extra
RUN apt-get install -qqy sox
RUN apt-get install -qqy git
|
tests/scheduler_conditions: Sanitize
Disable 'PTXExec' variant, it hits the same problem as 'LLVMExec'.
Drop leftover debug print.
Use more targeted result shape workaround.
Codestyle. | @@ -4079,14 +4079,15 @@ class TestSystemComposition:
class TestSchedulerConditions:
@pytest.mark.composition
@pytest.mark.parametrize("mode", ['Python',
- # pytest.param('LLVM', marks=pytest.mark.llvm), #FIXME: Fails for `LLVM` and `LLVMExec` modes?
+ #FIXME: "Exec" versions see different shape of previous_value parameter ([0] vs. [[0]])
+ #pytest.param('LLVM', marks=pytest.mark.llvm),
#pytest.param('LLVMExec', marks=pytest.mark.llvm),
pytest.param('LLVMRun', marks=pytest.mark.llvm),
- pytest.param('PTXExec', marks=[pytest.mark.llvm, pytest.mark.cuda]),
- pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda])
+ #pytest.param('PTXExec', marks=[pytest.mark.llvm, pytest.mark.cuda]),
+ pytest.param('PTXRun', marks=[pytest.mark.llvm, pytest.mark.cuda]),
])
- @pytest.mark.parametrize(["condition", "expected_result"],[
- (pnl.EveryNCalls, [[.25, .25]]),
+ @pytest.mark.parametrize(["condition", "expected_result"],
+ [(pnl.EveryNCalls, [[.25, .25]]),
(pnl.BeforeNCalls, [[.05, .05]]),
(pnl.AtNCalls, [[.25, .25]]),
(pnl.AfterNCalls, [[.25, .25]]),
@@ -4102,7 +4103,6 @@ class TestSchedulerConditions:
#(pnl.Never), #TODO: Find a good test case for this!
])
def test_scheduler_conditions(self, mode, condition, expected_result):
- print(mode, condition)
decisionMaker = pnl.DDM(
function=pnl.DriftDiffusionIntegrator(starting_point=0,
threshold=1,
@@ -4146,8 +4146,10 @@ class TestSchedulerConditions:
comp.scheduler.add_condition(response, condition(0))
result = comp.run([0.05], bin_execute=mode)
- result = [x for x in np.array(result).flatten()] #HACK: The result is an object dtype in Python mode for some reason?
- assert np.allclose(result, np.array(expected_result).flatten())
+ #HACK: The result is an object dtype in Python mode for some reason?
+ if mode == "Python":
+ result = np.asfarray(result[0])
+ assert np.allclose(result, expected_result)
class TestNestedCompositions:
|
Add Cast Op
Summary: Pull Request resolved: | @@ -203,32 +203,20 @@ NetDef TvmTransformer::applyTvmTransform(
const std::unordered_set<int>& blacklisted_ops,
const ShapeInfoMap& shape_hints) {
auto profiling_based_jit = opts_.profiling_based_jit;
- auto tvm_supports = [&blacklisted_ops,
- &shape_hints,
- &profiling_based_jit](
+ auto tvm_supports = [&blacklisted_ops, &shape_hints, &profiling_based_jit](
const caffe2::OperatorDef& op) {
const static std::unordered_set<std::string> supported_ops{
- "Add",
- "Sum",
- "FC",
- "FCTransposed",
- "Flatten",
- "Relu",
- "Sigmoid",
- "Softmax",
- "Split",
- "EnsureCPUOutput",
- "Reshape",
- "ExpandDims",
- "Concat",
- "BatchMatMul",
- "MatMul",
- "BatchGather",
- "DotProduct",
- "Transpose",
- "Mul",
- "Tanh",
- "Logit"};
+ "Add", "Sum",
+ "FC", "FCTransposed",
+ "Flatten", "Relu",
+ "Sigmoid", "Softmax",
+ "Split", "EnsureCPUOutput",
+ "Reshape", "ExpandDims",
+ "Concat", "BatchMatMul",
+ "MatMul", "BatchGather",
+ "DotProduct", "Transpose",
+ "Mul", "Tanh",
+ "Logit", "Cast"};
try {
// If the op position is black listed, return false
|
Enable torch_speed_benchmark to accept different memory formats.
Summary: Pull Request resolved:
Test Plan: Imported from OSS | @@ -37,6 +37,10 @@ C10_DEFINE_string(
"semicolon to separate the dimension of different "
"tensors.");
C10_DEFINE_string(input_type, "", "Input type (uint8_t/float)");
+C10_DEFINE_string(
+ input_memory_format,
+ "contiguous_format",
+ "Input memory format (contiguous_format/channels_last)");
C10_DEFINE_bool(
no_inputs,
false,
@@ -87,10 +91,17 @@ std::vector<c10::IValue> create_inputs() {
std::vector<std::string> input_dims_list = split(';', FLAGS_input_dims);
std::vector<std::string> input_type_list = split(';', FLAGS_input_type);
+ std::vector<std::string> input_memory_format_list =
+ split(';', FLAGS_input_memory_format);
+
CAFFE_ENFORCE_EQ(
input_dims_list.size(),
input_type_list.size(),
"Input dims and type should have the same number of items.");
+ CAFFE_ENFORCE_EQ(
+ input_dims_list.size(),
+ input_memory_format_list.size(),
+ "Input dims and format should have the same number of items.");
std::vector<c10::IValue> inputs;
for (size_t i = 0; i < input_dims_list.size(); ++i) {
@@ -99,15 +110,35 @@ std::vector<c10::IValue> create_inputs() {
for (const auto& s : input_dims_str) {
input_dims.push_back(c10::stoi(s));
}
+
+ at::ScalarType input_type;
if (input_type_list[i] == "float") {
- inputs.push_back(torch::ones(input_dims, at::ScalarType::Float));
+ input_type = at::ScalarType::Float;
} else if (input_type_list[i] == "uint8_t") {
- inputs.push_back(torch::ones(input_dims, at::ScalarType::Byte));
+ input_type = at::ScalarType::Byte;
} else if (input_type_list[i] == "int64") {
- inputs.push_back(torch::ones(input_dims, torch::kI64));
+ input_type = at::ScalarType::Long;
} else {
CAFFE_THROW("Unsupported input type: ", input_type_list[i]);
}
+
+ at::MemoryFormat input_memory_format;
+ if (input_memory_format_list[i] == "channels_last") {
+ if (input_dims.size() != 4u) {
+ CAFFE_THROW(
+ "channels_last memory format only available on 4D tensors!");
+ }
+ input_memory_format = at::MemoryFormat::ChannelsLast;
+ } else if (input_memory_format_list[i] == "contiguous_format") {
+ input_memory_format = at::MemoryFormat::Contiguous;
+ } else {
+ CAFFE_THROW(
+ "Unsupported input memory format: ", input_memory_format_list[i]);
+ }
+
+ inputs.push_back(torch::ones(
+ input_dims,
+ at::TensorOptions(input_type).memory_format(input_memory_format)));
}
if (FLAGS_pytext_len > 0) {
|
node.status for vm_workload_consolidation
The primary usage of "node.state" is wrong, it should be 'node.status'.
So correct it and refactor the method 'get_state_str'. | @@ -169,20 +169,36 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
choices=["ceilometer", "gnocchi"])
]
- def get_state_str(self, state):
- """Get resource state in string format.
+ def get_instance_state_str(self, instance):
+ """Get instance state in string format.
- :param state: resource state of unknown type
+ :param instance:
"""
- if isinstance(state, six.string_types):
- return state
- elif isinstance(state, (element.InstanceState, element.ServiceState)):
- return state.value
+ if isinstance(instance.state, six.string_types):
+ return instance.state
+ elif isinstance(instance.state, element.InstanceState):
+ return instance.state.value
else:
- LOG.error('Unexpected resource state type, '
+ LOG.error('Unexpected instance state type, '
'state=%(state)s, state_type=%(st)s.' %
- dict(state=state,
- st=type(state)))
+ dict(state=instance.state,
+ st=type(instance.state)))
+ raise exception.WatcherException
+
+ def get_node_status_str(self, node):
+ """Get node status in string format.
+
+ :param node:
+ """
+ if isinstance(node.status, six.string_types):
+ return node.status
+ elif isinstance(node.status, element.ServiceState):
+ return node.status.value
+ else:
+ LOG.error('Unexpected node status type, '
+ 'status=%(status)s, status_type=%(st)s.' %
+ dict(status=node.status,
+ st=type(node.status)))
raise exception.WatcherException
def add_action_enable_compute_node(self, node):
@@ -219,7 +235,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
:param destination_node: node object
:return: None
"""
- instance_state_str = self.get_state_str(instance.state)
+ instance_state_str = self.get_instance_state_str(instance)
if instance_state_str != element.InstanceState.ACTIVE.value:
# Watcher currently only supports live VM migration and block live
# VM migration which both requires migrated VM to be active.
@@ -234,8 +250,12 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
migration_type = 'live'
- destination_node_state_str = self.get_state_str(destination_node.state)
- if destination_node_state_str == element.ServiceState.DISABLED.value:
+ # Here will makes repeated actions to enable the same compute node,
+ # when migrating VMs to the destination node which is disabled.
+ # Whether should we remove the same actions in the solution???
+ destination_node_status_str = self.get_node_status_str(
+ destination_node)
+ if destination_node_status_str == element.ServiceState.DISABLED.value:
self.add_action_enable_compute_node(destination_node)
if self.compute_model.migrate_instance(
@@ -406,8 +426,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
rcu = {}
counters = {}
for node in nodes:
- node_state_str = self.get_state_str(node.state)
- if node_state_str == element.ServiceState.ENABLED.value:
+ node_status_str = self.get_node_status_str(node)
+ if node_status_str == element.ServiceState.ENABLED.value:
rhu = self.get_relative_node_utilization(node)
for k in rhu.keys():
if k not in rcu:
|
improvements to ccl transmon
add measure_flipping(), todo: call correct analysis
small typos corrected | @@ -695,7 +695,7 @@ class CCLight_Transmon(Qubit):
self.ro_pulse_down_phi1())
ro_lm.acquisition_delay(self.ro_acq_delay())
- ro_lm.load_DIO_triggered_sequence_onto_UHFQC(hardcode_cases=[])
+ ro_lm.load_DIO_triggered_sequence_onto_UHFQC()
UHFQC.sigouts_0_offset(self.ro_pulse_mixer_offs_I())
UHFQC.sigouts_1_offset(self.ro_pulse_mixer_offs_Q())
@@ -751,6 +751,7 @@ class CCLight_Transmon(Qubit):
self.prepare_readout()
self._prep_td_sources()
self._prep_mw_pulses()
+ self._prep_td_configure_VSM()
def _prep_td_sources(self):
self.instr_spec_source.get_instr().off()
@@ -787,7 +788,6 @@ class CCLight_Transmon(Qubit):
if self.cfg_prepare_mw_awg():
MW_LutMan.load_waveforms_onto_AWG_lookuptable()
- self._prep_td_configure_VSM()
# N.B. This part is AWG8 specific
AWG = MW_LutMan.AWG.get_instr()
@@ -1308,7 +1308,7 @@ class CCLight_Transmon(Qubit):
nested_MC.run(name='gate_tuneup_allxy', mode='adaptive')
ma.OptimizationAnalysis(label='gate_tuneup_allxy')
- def calibrate_deletion_pulse_transients(
+ def calibrate_depletion_pulse_transients(
self, nested_MC=None, amp0=None,
amp1=None, phi0=180, phi1=0, initial_steps=None, two_par=True,
depletion_optimization_window=None, depletion_analysis_plot=False):
@@ -1598,6 +1598,36 @@ class CCLight_Transmon(Qubit):
self.T2_echo(a.fit_res.params['tau'].value)
return a
+ def measure_flipping(self, number_of_flips=np.arange(20), equator=False,
+ MC=None, analyze=True, close_fig=True, update=True):
+
+ if MC is None:
+ MC = self.instr_MC.get_instr()
+
+ # append the calibration points, times are for location in plot
+
+ nf = np.array(number_of_flips)
+ dn = nf[1] - nf[0]
+ nf = np.concatenate([nf,
+ (nf[-1]+1*dn,
+ nf[-1]+2*dn,
+ nf[-1]+3*dn,
+ nf[-1]+4*dn)])
+
+ self.prepare_for_timedomain()
+ p = sqo.flipping(number_of_flips=nf, equator=equator,
+ qubit_idx=self.cfg_qubit_nr(),
+ platf_cfg=self.cfg_openql_platform_fn())
+ s = swf.OpenQL_Sweep(openql_program=p,
+ CCL=self.instr_CC.get_instr())
+ d = self.int_avg_det
+ MC.set_sweep_function(s)
+ MC.set_sweep_points(nf)
+ MC.set_detector_function(d)
+ MC.run('flipping'+self.msmt_suffix)
+ a = ma.MeasurementAnalysis(auto=True, close_fig=True)
+ return a
+
def measure_randomized_benchmarking(self, nr_cliffords=2**np.arange(12),
nr_seeds=100,
double_curves=False,
|
Make DFK shutdown wait for app futures, not exec futures
This was my original intended behaviour of
A task does not always have an exec_fu, and in some cases in
testing, the final wait was failing rather than waiting,
because it could not find an exec_fu to wait for. | @@ -724,7 +724,7 @@ class DataFlowKernel(object):
for task_id in self.tasks:
# .exception() is a less exception throwing way of
# waiting for completion than .result()
- fut = self.tasks[task_id]['exec_fu']
+ fut = self.tasks[task_id]['app_fu']
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
|
Changed ice ring masking width to 1/d**2 to avoid having massive mask areas at
high resolution. | .type = space_group
.help = "The space group used to generate d_spacings for powder rings."
.expert_level = 1
- width = 0.06
+ width = 0.002
.type = float(value_min=0.0)
- .help = "The width of an ice ring (in d-spacing)."
+ .help = "The width of an ice ring (in 1/d^2)."
.expert_level = 1
d_min = None
.type = float(value_min=0.0)
@@ -93,6 +93,7 @@ def generate_ice_ring_resolution_ranges(beam, panel, params):
'''
from cctbx import crystal
+ from math import sqrt
if params.filter is True:
@@ -118,8 +119,11 @@ def generate_ice_ring_resolution_ranges(beam, panel, params):
# Yield all the d ranges
for j, d in enumerate(ms.d_spacings().data()):
- d_min = d - half_width
- d_max = d + half_width
+ d_sq_inv = 1.0 / (d**2)
+ d_sq_inv_min = d_sq_inv - half_width
+ d_sq_inv_max = d_sq_inv + half_width
+ d_min = sqrt(1.0 / d_sq_inv_min)
+ d_max = sqrt(1.0 / d_sq_inv_max)
yield (d_min, d_max)
|
gdb_helpers: always expect [New Thread ...] messages
It seems that all control-flow commands may print this message before
returning on Windows, even though when this occurs is random. Extend
expected patterns to always accept it.
TN: | @@ -38,6 +38,13 @@ dsl_break_map: Dict[str, int] = {}
Mapping from breakpoint labels in "test.py" to the corresponding line numbers.
"""
+thread_notif_pattern = r"@/(\[New Thread .*\])?/ @/(Thread \d+ hit )?/"
+"""
+"quotemeta" pattern for thread-related messages from GDB after a control-flow
+command has returned.
+"""
+
+
# Fill ``dsl_break_map``
with open("test.py") as f:
for i, line in enumerate(f, 1):
@@ -82,7 +89,7 @@ def run_foonext(next_descr: Optional[str]) -> None:
"""
if next_descr is None:
next_descr = "@..."
- gdb.test("foonext", f"@/(Thread \\d+ hit )?/Breakpoint @...\n{next_descr}")
+ gdb.test("foonext", f"{thread_notif_pattern}Breakpoint @...\n{next_descr}")
def run_fooout(next_descr: str) -> None:
@@ -92,7 +99,7 @@ def run_fooout(next_descr: str) -> None:
"""
gdb.test(
"fooout",
- r"@/(\[New Thread .*\])?/ "
+ f"{thread_notif_pattern} "
f"libfoolang.implementation.@...\n{next_descr}",
)
@@ -102,4 +109,4 @@ def run_foosi(next_descr: str) -> None:
Run the "foosi" command, checking that the message describing the
transition matches ``next_descr``.
"""
- gdb.test("foosi", f"@/(Thread \\d+ hit )?/Breakpoint @...\n{next_descr}")
+ gdb.test("foosi", f"{thread_notif_pattern}Breakpoint @...\n{next_descr}")
|
(hello) make more mac friendly
Originally-Committed-To:
Originally-Committed-As: | @@ -43,17 +43,28 @@ func main() {
}
var profile = flag.String("profile", "dev", "profile")
- var output = flag.String("output", "/dev/stdout", "output file")
- var input = flag.String("input", "/dev/stdin", "input file")
+ var output = flag.String("output", "", "output file")
+ var input = flag.String("input", "", "input file")
var newline = flag.String("newline", "\n", "string to use for newline")
flag.Parse()
- in, err := os.Open(*input)
+ var err error
+ var in *os.File
+ if *input != "" {
+ in, err = os.Open(*input)
defer in.Close()
+ } else {
+ in = os.Stdin
+ }
+ var out *os.File
+ if *output != "" {
out, err := os.Create(*output)
die(err)
defer out.Close()
+ } else {
+ out = os.Stdout
+ }
bytes, err := ioutil.ReadAll(in)
die(err)
|
Fix dictionary changed size during iteration error with Python 3
In Python 2, dict.keys() would return a copy of the keys as a
list which could be modified. In Python 3 it is returned as an
iterator which can't be modified, so simply cast it as a list. | @@ -212,7 +212,7 @@ def clear_forms_data(func):
LOG.debug('Clearing forms data for application {0}.'.format(fqn))
services.get_apps_data(request)[app_id] = {}
LOG.debug('Clearing any leftover wizard step data.')
- for key in request.session.keys():
+ for key in list(request.session.keys()):
# TODO(tsufiev): unhardcode the prefix for wizard step data
if key.startswith('wizard_wizard'):
request.session.pop(key)
|
Use psutil to check Windows Service status
Closes | @@ -4,13 +4,16 @@ import os
import platform
import re
import subprocess
-import sys
import time
from typing import Any, List, Tuple, cast
-from ..util import MonitorConfigurationError
from .monitor import Monitor, register
+try:
+ import psutil
+except ImportError:
+ psutil = None
+
try:
import pydbus
except ImportError:
@@ -59,46 +62,54 @@ class MonitorService(Monitor):
want_state = "RUNNING"
host = "."
_type = "service"
+ type = "service"
def __init__(self, name: str, config_options: dict) -> None:
super().__init__(name, config_options)
- self.service_name = self.get_config_option("service", required=True)
- self.want_state = self.get_config_option("state", default="RUNNING")
- self.host = self.get_config_option("host", default=".")
-
- if self.want_state not in ["RUNNING", "STOPPED"]:
- raise MonitorConfigurationError(
- "invalid state {0} for MonitorService".format(self.want_state)
+ if psutil is None:
+ self.monitor_logger.critical("psutil is not installed.")
+ self.monitor_logger.critical("Try: pip install -r requirements.txt")
+ self.service_name = cast(str, self.get_config_option("service", required=True))
+ self.want_state = cast(
+ str,
+ self.get_config_option(
+ "state",
+ default="RUNNING",
+ allowed_values=[
+ "RUNNING",
+ "STOPPED",
+ "PAUSED",
+ "START_PENDING",
+ "PAUSE_PENDING",
+ "CONTINUE_PENDING",
+ "STOP_PENDING",
+ ],
+ ),
)
+ self.host = cast(str, self.get_config_option("host", default="."))
def run_test(self) -> bool:
"""Check the service is in the desired state"""
- SVC_STATUS_LINE = 2
+ if psutil is None:
+ return self.record_fail("psutil is not installed")
try:
- if platform.system() == "CYGWIN_NT-6.0":
- host = "\\\\\\\\" + self.host
- elif platform.system() in ["Microsoft", "Windows"]:
- host = "\\\\" + self.host
- else:
- # we need windows for sc
+ service = psutil.windows_service_get(self.service_name)
+ except psutil.NoSuchProcess:
return self.record_fail(
- "Cannot check for Windows services while running on a non-Windows platform."
+ "service {} does not exist".format(self.service_name)
)
-
- output = str(
- subprocess.check_output(
- ["sc", host, "query", self.service_name, "state=all"]
- ),
- "utf-8",
+ except AttributeError:
+ return self.record_fail("not supported on this platform")
+ except Exception:
+ self.monitor_logger.exception("Failed to get service")
+ return self.record_fail("Unable to get service")
+
+ state = service.status().upper()
+ if state != self.want_state:
+ return self.record_fail(
+ "Service state is {} (wanted {})".format(state, self.want_state)
)
- lines = output.split(os.linesep)
- lines = [x for x in lines if len(x) > 0]
- if self.want_state in lines[SVC_STATUS_LINE]:
return self.record_success()
- except Exception as e:
- sys.stderr.write("%s\n" % e)
- pass
- return self.record_fail()
def describe(self) -> str:
"""Explains what this instance is checking"""
|
Update apt_pegasus.txt
Added addresses from [0] minus duplications.
Deleted ```track-your-fedex-package.org``` from ```proofpoint``` is a dup.
Deleted ```adjust-local-settings.com``` from ```proofpoint``` is a dup. | @@ -32,7 +32,6 @@ pickuchu.com
aalaan.tv
accounts.mx
-adjust-local-settings.com
alawaeltech.com
alljazeera.co
asrararabiya.co
@@ -60,7 +59,6 @@ smser.net
sms.webadv.co
topcontactco.com
tpcontact.co.uk
-track-your-fedex-package.org
turkeynewsupdates.com
turkishairines.info
uaenews.online
@@ -87,3 +85,18 @@ social-life.info
un0noticias.com
un0noticias.net
universopolitico.net
+
+# Reference: https://citizenlab.ca/2018/07/nso-spyware-targeting-amnesty-international/
+
+adjust-local-settings.co
+afternicweb.net
+arabnews365.com
+banca-movil.com
+ecommerce-ads.org
+kingdom-deals.com
+nsogroup.com
+nsoqa.com
+pine-sales.com
+qaintqa.com
+remove-subscription.co
+track-your-fedex-package.online
|
testsuite/python_support/utils.py: fix a minor coding style issue
TN: | @@ -7,6 +7,7 @@ from langkit.compiled_types import StructMetaclass, T
from langkit.diagnostics import DiagnosticError
from langkit.expressions import Self
from langkit.libmanage import ManageScript
+from langkit.utils import reset_memoized
def prepare_context(grammar,
@@ -140,5 +141,4 @@ def reset_langkit():
T._type_dict = {}
- from langkit.utils import reset_memoized
reset_memoized()
|
test(test_access_denied_notexist_username): update the error message
Apparently the error message has changed slightly in MW 1.34.0-wmf.13,
however I was not able to locate the change that has caused this. | @@ -1022,6 +1022,9 @@ class TestLazyLoginNotExistUsername(TestLazyLoginBase):
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
+ try:
+ error.assert_called_with('Login failed (readapidenied).')
+ except AssertionError: # MW version is older than 1.34.0-wmf.13
error.assert_called_with('Login failed (Failed).')
warning.assert_called_with(
'API error readapidenied: '
|
Update pyTorchDockerImageTag
Corresponds to | @@ -122,7 +122,7 @@ pytorch_tutorial_build_defaults: &pytorch_tutorial_build_defaults
command: |
set -e
- export pyTorchDockerImageTag=9de29bef4a5dc0dd1dd19428d83e5a66a44a1ed2
+ export pyTorchDockerImageTag=27360e99acec34d1c78f70ba15ac2c28ed96c182
echo "PyTorchDockerImageTag: "${pyTorchDockerImageTag}
cat >/home/circleci/project/ci_build_script.sh \<<EOL
|
Fixing upload_binary_htmls again
Summary: Pull Request resolved: | @@ -4,28 +4,29 @@ set -eux -o pipefail
# This step runs on multiple executors with different envfile locations
if [[ "$(uname)" == Darwin ]]; then
- source "/Users/distiller/project/env"
+ envfile="/Users/distiller/project/env"
elif [[ -d "/home/circleci/project" ]]; then
# machine executor (binary tests)
- source "/home/circleci/project/env"
+ envfile="/home/circleci/project/env"
else
# docker executor (binary builds)
- source "/env"
+ envfile="/env"
fi
-# MINICONDA_ROOT is populated in binary_populate_env.sh , but update_htmls does
-# not source that script since it does not have a BUILD_ENVIRONMENT. It could
-# make a fake BUILD_ENVIRONMENT and call that script anyway, but that seems
-# more hacky than this
-if [[ -z "${MINICONDA_ROOT:-}" ]]; then
- # TODO get rid of this. Might need to separate binary_populate_env into two
- # steps, one for every job and one for build jobs
+# TODO this is super hacky and ugly. Basically, the binary_update_html job does
+# not have an env file, since it does not call binary_populate_env.sh, since it
+# does not have a BUILD_ENVIRONMENT. So for this one case, which we detect by a
+# lack of an env file, we manually export the environment variables that we
+# need to install miniconda
+if [[ ! -f "$envfile" ]]; then
MINICONDA_ROOT="/home/circleci/project/miniconda"
workdir="/home/circleci/project"
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
export -f retry
+else
+ source "$envfile"
fi
conda_sh="$workdir/install_miniconda.sh"
|
remove duplicate-ish
freespace is already handled, in a more robust way, by the bbox method that stretch them by 20% | @@ -110,7 +110,6 @@ class FigureManager:
requiredSpacing = boxA.x1 - boxB.x0
else:
requiredSpacing = boxA.x0 - boxB.x1
- requiredSpacing *= 1.2
self.translateLabel(labels[a], boxA, dx=-requiredSpacing/2)
self.translateLabel(labels[b], boxB, dx=requiredSpacing/2)
|
Extract out dumping params into a function. Then it can be used from pdb
interactively and in other places if need be. | @@ -137,6 +137,21 @@ def optimizer_fun(net_params, step_size=1e-3):
return opt_state, opt_update
+def log_params(params, name="params"):
+ """Dumps the params with `logging.error`."""
+ for i, param in enumerate(params):
+ if not param:
+ # Empty tuple.
+ continue
+ if not isinstance(param, tuple):
+ logging.error(
+ "%s[%d] : (%s) = [%s]", name, i, param.shape, onp.array(param))
+ else:
+ for j, p in enumerate(param):
+ logging.error(
+ "\t%s[%d, %d] : (%s) = [%s]", name, i, j, p.shape, onp.array(p))
+
+
# Should this be collect 'n' trajectories, or
# Run the env for 'n' steps and take completed trajectories, or
# Any other option?
@@ -202,17 +217,7 @@ def collect_trajectories(env,
logging.error("predictions: [%s]", predictions)
logging.error("observation_history: [%s]", observation_history)
logging.error("policy_net_params: [%s]", policy_net_params)
- for i, param in enumerate(policy_net_params):
- if not param:
- # Empty tuple.
- continue
- if not isinstance(param, tuple):
- logging.error(
- "Param[%d] : (%s) = [%s]", i, param.shape, onp.array(param))
- else:
- for j, p in enumerate(param):
- logging.error(
- "\tParam[%d, %d] : (%s) = [%s]", i, j, p.shape, onp.array(p))
+ log_params(policy_net_params, "policy_net_params")
raise err
observation, reward, done, _ = env.step(action)
|
scripts: Fix pylint issue W1514 in scripts/convert_config.py
scripts/convert_config.py:123:9: W1514: Using open without explicitly specifying an encoding (unspecified-encoding)
scripts/convert_config.py:130:13: W1514: Using open without explicitly specifying an encoding (unspecified-encoding)
scripts/convert_config.py:174:9: W1514: Using open without explicitly specifying an encoding (unspecified-encoding) | @@ -120,14 +120,14 @@ def output_component(component, config, template, outfile):
print(f"Writing {component} configuration to {outfile}")
- with open(template, "r") as tf:
+ with open(template, "r", encoding="utf-8") as tf:
t = tf.read()
j2 = Template(t)
r = j2.render(config)
- with open(outfile, "w") as o:
+ with open(outfile, "w", encoding="utf-8") as o:
print(r, file=o)
@@ -171,7 +171,7 @@ def process_mapping(old_config, templates, mapping_file, debug=False):
configuration dictionary
"""
- with open(mapping_file, "r") as f:
+ with open(mapping_file, "r", encoding="utf-8") as f:
try:
mapping = json.loads(f.read())
except Exception as e:
|
re-order the __getattribute__ call
this call is expected to work in most cases, so it should be attempted
first, before looking in the list of alternateNames | @@ -167,13 +167,13 @@ class PropertySet(object):
def __getattribute__(self, name):
try:
+ return object.__getattribute__(self, name)
+ except AttributeError as exc:
alternateNames = object.__getattribute__(self, '_alternateNames')
if name in alternateNames:
return object.__getattribute__(self, 'getProperty')(alternateNames[name])
else:
- raise AttributeError()
- except AttributeError:
- return object.__getattribute__(self, name)
+ raise exc
class PropertyPanelHelper(object):
|
fix: Ignore ImportError for Notification Settings
During migrate, Notification Settings are fetched which doesn't exist
yet. It is safe to ignore it because Notification Settings are not
created anyway for any user. | @@ -44,7 +44,7 @@ def get_subscribed_documents():
try:
doc = frappe.get_doc('Notification Settings', frappe.session.user)
subscribed_documents = [item.document for item in doc.subscribed_documents]
- except frappe.DoesNotExistError:
+ except (frappe.DoesNotExistError, ImportError):
subscribed_documents = []
return subscribed_documents
|
correct rounding errors in sequence length computation
caused crashes with cuda kernels | @@ -187,7 +187,7 @@ class Reshape(Module):
input = input.permute(perm)
o = input.reshape(input.shape[:dest] + (input.shape[dest] * input.shape[dest + 1],) + input.shape[dest + 2:])
if seq_len is not None:
- seq_len = (seq_len * float(initial_len)/o.shape[3]).int()
+ seq_len = (seq_len * (float(initial_len)/o.shape[3])).int()
return o, seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
|
Improve AppendDims:
Use expand_dims when ndims == 1
Use tf.shape instead of GetShape (latter also calls tf.shape, plus a set of tf.slice and tf.pack). | @@ -6397,7 +6397,10 @@ def SequencePaddings(seqlen, maxlen=None):
def AppendDims(x, ndims):
- return tf.reshape(x, GetShape(x) + [1] * ndims)
+ if ndims == 1:
+ return tf.expand_dims(x, -1)
+ else:
+ return tf.reshape(x, tf.concat([tf.shape(x), [1] * ndims], axis=0))
def MaybeSoftCapLogits(x, cap=0.0):
|
tests: reformat test_trim_eviction
This commit doesn't introduce any changes to the flow of the tests. | @@ -38,11 +38,11 @@ def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
test_file_path = os.path.join(mount_point, "test_file")
with TestRun.step("Prepare devices."):
- cache_disk = TestRun.disks['cache']
+ cache_disk = TestRun.disks["cache"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
- core_disk = TestRun.disks['core']
+ core_disk = TestRun.disks["core"]
core_disk.create_partitions([Size(1, Unit.GibiByte)])
core_dev = core_disk.partitions[0]
@@ -76,7 +76,9 @@ def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
with TestRun.step("Remove file and create a new one."):
cache_iostats_before = cache_dev.get_io_stats()
data_reads_before = cache.get_io_class_statistics(io_class_id=0).block_stats.cache.reads
- metadata_reads_before = cache.get_io_class_statistics(io_class_id=1).block_stats.cache.reads
+ metadata_reads_before = cache.get_io_class_statistics(
+ io_class_id=1
+ ).block_stats.cache.reads
test_file.remove()
os_utils.sync()
os_utils.drop_caches()
@@ -106,16 +108,19 @@ def test_trim_eviction(cache_mode, cache_line_size, filesystem, cleaning):
)
else:
TestRun.LOGGER.info(
- "Number of reads from cache before and after removing test file is the same.")
+ "Number of reads from cache before and after removing test file is the same."
+ )
def create_file_with_ddrescue(core_dev, test_file_path):
- dd = Dd() \
- .block_size(Size(1, Unit.MebiByte)) \
- .count(900) \
- .input("/dev/urandom") \
- .output(test_file_path) \
+ dd = (
+ Dd()
+ .block_size(Size(1, Unit.MebiByte))
+ .count(900)
+ .input("/dev/urandom")
+ .output(test_file_path)
.oflag("sync")
+ )
dd.run()
return File(test_file_path)
|
Fix center alignment of dagster logo in readme
Test Plan: docs only | -<span style="display: block; text-align: center"><img align="center" src="https://user-images.githubusercontent.com/609349/57987382-7e294500-7a35-11e9-9c6a-f73e0f1d3a1c.png" />
+<p align="center">
+<img src="https://user-images.githubusercontent.com/609349/57987382-7e294500-7a35-11e9-9c6a-f73e0f1d3a1c.png" />
<br /><br />
-<span style="text-align: center; display: inline-block">
-[<img src="https://badge.fury.io/py/dagster.svg">](https://badge.fury.io/py/dagster)
-[<img src="https://coveralls.io/repos/github/dagster-io/dagster/badge.svg?branch=master">](https://coveralls.io/github/dagster-io/dagster?branch=master)
-[<img src="https://badge.buildkite.com/888545beab829e41e5d7303db15525a2bc3b0f0e33a72759ac.svg?branch=master">](https://buildkite.com/dagster/dagster)
-[<img src="https://readthedocs.org/projects/dagster/badge/?version=master">](https://dagster.readthedocs.io/en/master/)
-</span>
-
-</span>
-
+<a href="https://badge.fury.io/py/dagster"><img src="https://badge.fury.io/py/dagster.svg"></>
+<a href="https://coveralls.io/github/dagster-io/dagster?branch=master"><img src="https://coveralls.io/repos/github/dagster-io/dagster/badge.svg?branch=master"></a>
+<a href="https://buildkite.com/dagster/dagster"><img src="https://badge.buildkite.com/888545beab829e41e5d7303db15525a2bc3b0f0e33a72759ac.svg?branch=master"></a>
+<a href="https://dagster.readthedocs.io/en/master/"><img src="https://readthedocs.org/projects/dagster/badge/?version=master"></a>
+</p>
# Introduction
@@ -19,12 +16,13 @@ Combining an elegant programming model and beautiful tools, Dagster allows infra
### Install
To get started:
<br />
-<span align="center" style="display: block; font-size:20px;">
-``pip install dagster dagit``
-</span>
+<p align="center">
+<code>pip install dagster dagit</code>
+</p>
<br />
This installs two modules:
<br />
+<br />
* **dagster** | The core programming model and abstraction stack; stateless, single-node,
single-process and multi-process execution engines; and a CLI tool for driving those engines.
|
[compiler] Stop calling `Thread.getStackTrace` in `cb.fatal`
Leave old code commented out for debugging | @@ -354,9 +354,10 @@ object Code {
}
private def getEmitLineNum: Int = {
- val st = Thread.currentThread().getStackTrace
- val i = st.indexWhere(ste => ste.getFileName == "Emit.scala")
- if (i == -1) 0 else st(i).getLineNumber
+// val st = Thread.currentThread().getStackTrace
+// val i = st.indexWhere(ste => ste.getFileName == "Emit.scala")
+// if (i == -1) 0 else st(i).getLineNumber
+ 0
}
def _throw[T <: java.lang.Throwable, U](cerr: Code[T])(implicit uti: TypeInfo[U]): Code[U] =
|
fix(reportview): use .pop instead of del
use .pop() instead of del to avoid KeyError | @@ -27,8 +27,8 @@ def get_form_params():
"""Stringify GET request parameters."""
data = frappe._dict(frappe.local.form_dict)
- del data["cmd"]
- del data["data"]
+ data.pop('cmd', None)
+ data.pop('data', None)
if "csrf_token" in data:
del data["csrf_token"]
|
Fix Report Disocvery Problem profile_name
HG--
branch : feature/microservices | @@ -17,6 +17,8 @@ from noc.lib.nosql import get_db
from pymongo import ReadPreference
from noc.main.models.pool import Pool
from noc.sa.models.managedobject import ManagedObject
+from noc.sa.models.profile import Profile
+from noc.sa.models.profile import GENERIC_PROFILE
from noc.sa.models.managedobjectprofile import ManagedObjectProfile
from noc.sa.models.managedobjectselector import ManagedObjectSelector
from noc.sa.models.objectstatus import ObjectStatus
@@ -160,7 +162,7 @@ class ReportFilterApplication(SimpleReport):
mos = mos.filter(object_profile=obj_profile)
if filter_view_other:
mnp_in = list(ManagedObjectProfile.objects.filter(enable_ping=False))
- mos = mos.filter(profile_name="Generic.Host").exclude(object_profile__in=mnp_in)
+ mos = mos.filter(profile=Profile.objects.get(name=GENERIC_PROFILE)).exclude(object_profile__in=mnp_in)
discovery = "noc.services.discovery.jobs.box.job.BoxDiscoveryJob"
mos_id = list(mos.values_list("id", flat=True))
if avail_status:
@@ -203,6 +205,7 @@ class ReportFilterApplication(SimpleReport):
mo.name,
mo.address,
mo.profile.name,
+ mo.administrative_domain.name,
_("Yes") if mo.get_status() else _("No"),
discovery["st"].strftime("%d.%m.%Y %H:%M") if "st" in discovery else "",
method,
|
faq: add ERROR 1148 (42000)
* faq: add ERROR 1148 (42000)
Via:
* faq: fix a typo | @@ -825,3 +825,9 @@ update mysql.tidb set variable_value='30m' where variable_name='tikv_gc_life_tim
#### ERROR 1105 (HY000): other error: unknown error Wire Error(InvalidEnumValue(4004))
This error usually occurs when the version of TiDB does not match with the version of TiKV. To avoid version mismatch, upgrade all components when you upgrade the version.
+
+#### ERROR 1148 (42000): the used command is not allowed with this TiDB version
+
+When you execute the `LOAD DATA LOCAL` statement but the MySQL client does not allow executing this statement (the value of the `local_infile` option is 0), this error occurs.
+
+The solution is to use the `--local-infile=1` option when you start the MySQL client. For example, use command like `mysql --local-infile=1 -u root -h 127.0.0.1 -P 4000`. The default value of `local-infile` is different in different versions of MySQL client, therefore you need to configure it in some MySQL clients and do not need to configure it in some others.
\ No newline at end of file
|
Testing/Ethernet: add SCHED_FIFO API call
SCHED_FIFO is more desirable than SCHED_DEADLINE as it
has no timeout, and we can tune the priority as needed. | @@ -71,10 +71,11 @@ BUFFER_SIZE = 4096 # Size in bytes of buffer for PC to receive message
TCP_RECEIVE_BUFFER_SIZE = 16
# Scheduling parameters
-SCHEDDL_SETTING = ""
+SCHEDDL_SETTING = "fifo"
SCHEDDL_RUNTIME = 300000000000
SCHEDDL_DEADLINE = 600000000000
SCHEDDL_PERIOD = 600000000000
+SCHEDDL_PRIORITY = 1
ETH_ECHO_TEST["name"] = "eth_echo_test_{}_{}.json".format(PROTOCOL, DATE_TIME)
ETH_ECHO_TEST["config"]["message_sizes"] = str(MESSAGE_SIZES)
@@ -94,7 +95,10 @@ ETH_ECHO_TEST["config"]["scheduling_params"]["scheddl_period"] = SCHEDDL_PERIOD
if SCHEDDL_SETTING == "deadline":
scheddl.set_deadline(SCHEDDL_RUNTIME, SCHEDDL_DEADLINE, SCHEDDL_PERIOD, scheddl.RESET_ON_FORK)
- print("Running with SCHEDDL_SETTING \"deadline\"")
+ print("Running with SCHEDDL_SETTING {\"deadline\"}")
+elif SCHEDDL_SETTING == "fifo":
+ scheddl.set_fifo(SCHEDDL_PRIORITY, scheddl.RESET_ON_FORK)
+ print("Running with SCHEDDL_SETTING \"fifo\"")
i_trial = 0
for msg_size in MESSAGE_SIZES:
@@ -174,5 +178,3 @@ print("Collected {} results".format(len(ETH_ECHO_TEST["tests"])))
with open(ETH_ECHO_TEST["name"], "w") as test_results_json:
json.dump(ETH_ECHO_TEST, test_results_json)
-
-
|
Update setup_mac.md
Clarification on what hardware tensorflow-gpu operates | @@ -40,7 +40,7 @@ pip install -e .[pc]
* Tensorflow GPU
-Currently there is no gpu support for [tensorflow on mac](https://www.tensorflow.org/install#install-tensorflow).
+Currently there is no NVidia gpu support for [tensorflow on mac](https://www.tensorflow.org/install#install-tensorflow).
* Create your local working dir:
|
[modules/title] fixed runtime exception
From i3ipc the find_focused().name can return a None instead of a string, this will casue a runtime exception | @@ -23,6 +23,8 @@ import bumblebee.engine
from bumblebee.output import scrollable
+no_title = "n/a"
+
class Module(bumblebee.engine.Module):
"""Window title module."""
@@ -36,7 +38,7 @@ class Module(bumblebee.engine.Module):
self._i3 = i3ipc.Connection()
self._full_title = self._i3.get_tree().find_focused().name
except Exception:
- self._full_title = "n/a"
+ self._full_title = no_title
def get_title(self, widget):
if bumblebee.util.asbool(self.parameter("scroll", False)):
@@ -62,6 +64,9 @@ class Module(bumblebee.engine.Module):
try:
self._full_title = self._i3.get_tree().find_focused().name
except Exception:
- self._full_title = "n/a"
+ self._full_title = no_title
+
+ if(self._full_title is None):
+ self._full_title = no_title
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Replace FA pro issue icon with the regular icon
We stopped using FA pro, as we wanted it was using an ex-admin's person FA pro subscription, which we didn't control. | @@ -27,7 +27,7 @@ Our projects on Python Discord are open source and [available on Github](https:/
</div>
</div>
<div class="card-footer">
- <a href="https://github.com/python-discord/sir-lancebot/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="far fa-exclamation-circle"></i> Issues</a>
+ <a href="https://github.com/python-discord/sir-lancebot/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="fas fa-exclamation-circle"></i> Issues</a>
<a href="https://github.com/python-discord/sir-lancebot/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="fas fa-code-merge"></i> PRs</a>
</div>
<div class="card-footer">
@@ -54,7 +54,7 @@ Our projects on Python Discord are open source and [available on Github](https:/
</div>
</div>
<div class="card-footer">
- <a href="https://github.com/python-discord/bot/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="far fa-exclamation-circle"></i> Issues</a>
+ <a href="https://github.com/python-discord/bot/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="fas fa-exclamation-circle"></i> Issues</a>
<a href="https://github.com/python-discord/bot/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="fas fa-code-merge"></i> PRs</a>
</div>
<div class="card-footer">
@@ -81,7 +81,7 @@ Our projects on Python Discord are open source and [available on Github](https:/
</div>
</div>
<div class="card-footer">
- <a href="https://github.com/python-discord/site/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="far fa-exclamation-circle"></i> Issues</a>
+ <a href="https://github.com/python-discord/site/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="fas fa-exclamation-circle"></i> Issues</a>
<a href="https://github.com/python-discord/site/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc" class="card-footer-item"><i class="fas fa-code-merge"></i> PRs</a>
</div>
<div class="card-footer">
|
Temporarily disable fluentd from scenario001-multinode-containers
Mixing containers and BM is currently not working. Once the master
promotion will take place we will have a fluentd container and
can readd fluentd as a container and the problem should not re-occurr.
Related-Bug: | @@ -29,7 +29,9 @@ resource_registry:
# FIXME(mandre) fluentd container image missing from tripleomaster registry
# https://bugs.launchpad.net/tripleo/+bug/1721723
# OS::TripleO::Services::FluentdClient: ../../docker/services/fluentd-client.yaml
- OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
+ # FIXME(mandre/bandini) mixing BM fluentd and containers is problematic
+ # https://bugs.launchpad.net/tripleo/+bug/1726891
+ # OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
@@ -97,7 +99,9 @@ parameter_defaults:
- OS::TripleO::Services::Congress
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- - OS::TripleO::Services::FluentdClient
+ # FIXME(mandre/bandini) mixing BM fluentd and containers is problematic
+ # https://bugs.launchpad.net/tripleo/+bug/1726891
+ #- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::Iscsid
|
Update typo in instructions.md
Corrected "a adjective" to "an adjective" | @@ -77,7 +77,7 @@ Implement the `remove_suffix_ness(<word>)` function that takes in a word `str`,
## 4. Extract and transform a word
Suffixes are often used to change the part of speech a word has.
- A common practice in English is "verbing" or "verbifying" -- where a adjective _becomes_ a verb by adding an `en` suffix.
+ A common practice in English is "verbing" or "verbifying" -- where an adjective _becomes_ a verb by adding an `en` suffix.
In this task, your sister is going to practice "verbing" words by extracting an adjective from a sentence and turning it into a verb.
Fortunately, all the words that need to be transformed here are "regular" - they don't need spelling changes to add the suffix.
|
BUG: Fix in memory fx rate lookups in py3.
Convert from S3 -> U3 before doing lookup. | +"""Interface and definitions for foreign exchange rate readers.
"""
-"""
+import six
from interface import implements, Interface
@@ -47,6 +48,11 @@ class InMemoryFXRateReader(implements(FXRateReader)):
self._data = data
def get_rates(self, field, quote, bases, dates):
+ if six.PY3:
+ # DataFrames in self._data should contain str as column keys, which
+ # don't compare equal to bases in Python 3. Convert to unicode.
+ bases = bases.astype('U3')
+
return self._data[field][quote][bases].reindex(dates, method='ffill')
|
Create faq.html
* Create faq.html
* Add route for FAQ page
* Update faq.html
Updated based on feedback & have made other fixes after looking at staging site
* Update faq.html
* Update faq.html
Changed page title | @@ -12,6 +12,8 @@ urlpatterns = [
url(r'^api/$', TemplateView.as_view(template_name='api.html'), name="api"),
url(r'^about/$', TemplateView.as_view(template_name='about.html'),
name="about"),
+ url(r'^faq/$', TemplateView.as_view(template_name='faq.html'),
+ name="faq"),
url(r'^caution/$', TemplateView.as_view(template_name='caution.html'),
name="caution"),
url(r'^how-to-use/$',
|
Added Arch Linux (ARM)
Adding Arch Linux ARM | @@ -91,6 +91,48 @@ Manual installation (On Raspbian Wheezy)
echo "export PYTHONPATH=$(pwd):\$PYTHONPATH" >> ~/.profile
source ~/.profile
+Manual installation (On Arch Linux ARM)
+------------------------------------------------
+
+#. Install the dependencies::
+
+ sudo pacman -Syu
+ sudo pacman -S sdl2 sdl2_gfx sdl2_image sdl2_net sdl2_ttf sdl2_mixer python-setuptools
+
+ Note: python-setuptools needs to be installed through pacman or it will result with conflicts!
+
+#. Install pip from source::
+
+ wget https://raw.github.com/pypa/pip/master/contrib/get-pip.py
+ sudo python get-pip.py
+
+#. Install a new enough version of Cython:
+
+ .. parsed-literal::
+
+ sudo pip install -U |cython_install|
+
+#. Install Kivy globally on your system::
+
+ sudo pip install git+https://github.com/kivy/kivy.git@master
+
+#. Or build and use kivy inplace (best for development)::
+
+ git clone https://github.com/kivy/kivy
+ cd kivy
+ makepkg -Asri
+
+Images to use::
+
+ http://raspex.exton.se/?p=859 (recommended)
+ https://archlinuxarm.org/
+
+.. note::
+
+ On versions of kivy prior to 1.10.1, Mesa library naming changes can result
+ in "Unable to find any valuable Window provider" errors. If you experience
+ this issue, please upgrade or consult `ticket #5360.
+ <https://github.com/kivy/kivy/issues/5360>`_
KivyPie distribution
--------------------
|
update exp. family doc
Summary:
sphinx doesn't understand hyphen. it does not merge the two halves together in html.
Pull Request resolved: | @@ -18,8 +18,8 @@ class ExponentialFamily(Distribution):
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
- divergence methods. We use this class to compute the entropy and KL divergence using the AD frame-
- work and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
+ divergence methods. We use this class to compute the entropy and KL divergence using the AD
+ framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
Cross-entropies of Exponential Families).
"""
|
Update options.py
Remove Ifremer path to gdac ftp | @@ -20,7 +20,7 @@ USER_LEVEL = 'mode'
# Define the list of available options and default values:
OPTIONS = {DATA_SOURCE: 'erddap',
- LOCAL_FTP: '/home/ref-argo/gdac', # default Argo data set on Ifremer/Datarmor network
+ LOCAL_FTP: '.',
DATASET: 'phy',
DATA_CACHE: os.path.expanduser(os.path.sep.join(["~", ".cache", "argopy"])),
USER_LEVEL: 'standard'}
|
Fix for Python 3.6
Take asynccontextmanager from `prompt_toolkit.eventloop.async_context_manager`. | """
Implementation for async generators.
"""
-from contextlib import asynccontextmanager
from queue import Empty, Full, Queue
from threading import Event
from typing import (
@@ -14,6 +13,7 @@ from typing import (
Union,
)
+from .async_context_manager import asynccontextmanager
from .utils import get_event_loop, run_in_executor_with_context
__all__ = [
|
Travis CI: use e3-testsuite's --failure-exit-code option
TN: | @@ -15,14 +15,12 @@ which gprbuild
gcc -v
gprbuild -v
-# Duplicate output of testsuite in file TESTSUITE_OUT.
+# Exit with an error if there is a test failure/error.
#
# TODO: adjust the Travis CI setup to provide a viable OCaml environment and
# enable the corresponding testcases.
./scripts/interactive_testsuite \
--no-auto-path \
--disable-ocaml \
- | tee TESTSUITE_OUT
+ --failure-exit-code=1
-# Exit with an error if there is a FAIL or ERROR line in TESTSUITE_OUT
-! grep "^INFO \+\(FAIL\|ERROR\) " TESTSUITE_OUT > /dev/null
|
Improve error message
One arguments it not sufficient and three arguments would be
inconsistent, so the error message asks for exactly two arguments. | @@ -62,7 +62,7 @@ class Material:
given_args.append(arg)
if len(given_args) != 2:
raise ValueError(
- "At least 2 arguments from E, G_s" "and Poisson should be provided "
+ "Exactly 2 arguments from E, G_s and Poisson should be provided"
)
self.name = name
self.rho = rho
|
Removing Anaconda install suggestion
Due to various issues that seem to be limited to the Anaconda package manager. Use pip (and virtual environments!) | @@ -50,18 +50,6 @@ pySPEDAS supports Windows, macOS and Linux. To get started, install the `pyspeda
pip install pyspedas --upgrade
```
-### Anaconda
-
-```bash
-conda install -c spedas pyspedas
-```
-
-You can upgrade to the latest version using:
-
-```bash
-conda update -c spedas pyspedas
-```
-
## Usage
To get started, import pyspedas and pytplot:
|
Fixes bug in usage of custom LM opt for gauge optimization.
Because the minSol solution object returned by scipy is *not* the
same as the what is returned by our custom LM routine! This commit
adds a little plumbing to call the custom LM optimizer correctly. | @@ -483,26 +483,29 @@ def gaugeopt_custom(gateset, objective_fn, gauge_group=None,
minSol = _opt.minimize(call_objective_fn, x0,
method=method, maxiter=maxiter, maxfev=maxfev, tol=tol,
callback = print_obj_func if bToStdout else None)
+ solnX = minSol.x
+ solnF = minSol.fun
elif algorithm == 'ls':
jacobian = calculate_ls_jacobian(gaugeGroupEl, gateset, call_objective_fn, itemWeights, checkJac)
#minSol = _opt.least_squares(call_objective_fn, x0, jac=jacobian,
# max_nfev=maxfev, ftol=tol)
- minSol,converged,msg = _opt.custom_leastsq(
+ solnX,converged,msg = _opt.custom_leastsq(
call_objective_fn, jacobian, x0, f_norm2_tol=tol,
jac_norm_tol=tol, rel_ftol=tol, rel_xtol=tol,
max_iter=maxiter, comm=None, #no MPI for gauge opt yet...
verbosity=printer.verbosity-2)
printer.log("Least squares message = %s" % msg,2)
assert(converged)
+ solnF = call_objective_fn(solnX) if returnAll else None
else:
raise ValueError('Unknown algorithm inside of gauge opt: {}'.format(algorithm))
- gaugeGroupEl.from_vector(minSol.x)
+ gaugeGroupEl.from_vector(solnX)
newGateset = gateset.copy()
newGateset.transform(gaugeGroupEl)
if returnAll:
- return minSol.fun, gaugeMat, newGateset
+ return solnF, gaugeMat, newGateset
else:
return newGateset
|
lint pyflakes: Pull out our error-suppression patterns as data.
This makes the list much cleaner to understand and edit. | @@ -7,6 +7,27 @@ from .printer import print_err, colors
from typing import Any, Dict, List
+suppress_patterns = [
+ (b'', b'imported but unused'),
+ (b'', b'redefinition of unused'),
+
+ # Our ipython startup pythonrc file intentionally imports *
+ (b"scripts/lib/pythonrc.py",
+ b" import *' used; unable to detect undefined names"),
+
+ # Special dev_settings.py import
+ (b'', b"from .prod_settings_template import *"),
+
+ (b"settings.py", b"settings import *' used; unable to detect undefined names"),
+ (b"settings.py", b"may be undefined, or defined from star imports"),
+]
+
+def suppress_line(line: str) -> bool:
+ for file_pattern, line_pattern in suppress_patterns:
+ if file_pattern in line and line_pattern in line:
+ return True
+ return False
+
def check_pyflakes(options, by_lang):
# type: (Any, Dict[str, List[str]]) -> bool
if len(by_lang['py']) == 0:
@@ -16,21 +37,9 @@ def check_pyflakes(options, by_lang):
pyflakes = subprocess.Popen(['pyflakes'] + by_lang['py'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
-
assert pyflakes.stdout is not None # Implied by use of subprocess.PIPE
for ln in pyflakes.stdout.readlines() + pyflakes.stderr.readlines():
- if options.full or not (
- b'imported but unused' in ln or
- b'redefinition of unused' in ln or
- # Our ipython startup pythonrc file intentionally imports *
- (b"scripts/lib/pythonrc.py" in ln and
- b" import *' used; unable to detect undefined names" in ln) or
- # Special dev_settings.py import
- b"from .prod_settings_template import *" in ln or
- (b"settings.py" in ln and
- (b"settings import *' used; unable to detect undefined names" in ln or
- b"may be undefined, or defined from star imports" in ln))):
-
+ if options.full or not suppress_line(ln):
print_err('pyflakes', color, ln)
failed = True
return failed
|
llvm: Remove incorrect comment
LLVM IR doesn't have void* type. | @@ -732,7 +732,6 @@ def _convert_llvm_ir_to_ctype(t:ir.Type):
elif type_t is ir.FloatType:
return ctypes.c_float
elif type_t is ir.PointerType:
- # FIXME: Can this handle void*? Do we care?
pointee = _convert_llvm_ir_to_ctype(t.pointee)
ret_t = ctypes.POINTER(pointee)
elif type_t is ir.ArrayType:
|
Update index.rst
Changed Elasticsearch typo '6.0' to 7.0. | @@ -30,7 +30,7 @@ Compatibility
The library is compatible with all Elasticsearch versions since ``2.x`` but you
**have to use a matching major version**:
-For **Elasticsearch 6.0** and later, use the major version 7 (``7.x.y``) of the
+For **Elasticsearch 7.0** and later, use the major version 7 (``7.x.y``) of the
library.
For **Elasticsearch 6.0** and later, use the major version 6 (``6.x.y``) of the
|
Add temp CoolingLoad test in test_job_endpoint
May be temporary just to debug CoolingLoad inputs and outputs | @@ -159,3 +159,35 @@ class TestJobEndpoint(ResourceTestCaseMixin, TestCase):
self.assertAlmostEqual(results["ElectricLoad"]["offgrid_load_met_fraction"], 0.99999, places=-2)
self.assertAlmostEqual(sum(results["ElectricLoad"]["offgrid_load_met_series_kw"]), 8760.0, places=-1)
self.assertAlmostEqual(results["Financial"]["lifecycle_offgrid_other_annual_costs_after_tax"], 0.0, places=-2)
+
+ def test_cooling_possible_sets_and_results(self):
+ """
+ Purpose of this test is to test the validity of Cooling Load possible_sets, in particular []/null and blend/hybrid
+ """
+ scenario = {
+ "Site": {"longitude": -118.1164613, "latitude": 34.5794343},
+ "ElectricTariff": {"urdb_label": "5ed6c1a15457a3367add15ae"},
+ "PV": {"max_kw": 0.0},
+ "ElectricStorage":{"max_kw": 0.0, "max_kwh": 0.0},
+ "ElectricLoad": {
+ "blended_doe_reference_names": ["Hospital", "LargeOffice"],
+ "blended_doe_reference_percents": [0.75, 0.25],
+ "annual_kwh": 8760.0
+ },
+ "CoolingLoad": {
+ "doe_reference_name": "Hospital",
+ "annual_tonhour": 5000.0
+ }
+ }
+
+ resp = self.api_client.post('/dev/job/', format='json', data=scenario)
+ self.assertHttpCreated(resp)
+ r = json.loads(resp.content)
+ run_uuid = r.get('run_uuid')
+
+ resp = self.api_client.get(f'/dev/job/{run_uuid}/results')
+ r = json.loads(resp.content)
+ inputs = r["inputs"]
+ results = r["outputs"]
+
+ json.dump(r, open("response.json", "w"))
\ No newline at end of file
|
Remove mention of --file-store from tracking docs
Fixing based on feedback from mlflow slack | @@ -358,11 +358,7 @@ backend as ``./path_to_store`` or ``file:/path_to_store`` and a *database-backed
`SQLAlchemy database URI <https://docs.sqlalchemy.org/en/latest/core/engines
.html#database-urls>`_. The database URI typically takes the format ``<dialect>+<driver>://<username>:<password>@<host>:<port>/<database>``.
MLflow supports the database dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``.
-Drivers are optional. If you do not specify a driver, SQLAlchemy uses a dialect's default driver.
-For backwards compatibility, ``--file-store`` is an alias for ``--backend-store-uri``.
-For example, ``--backend-store-uri sqlite:///mlflow.db`` would create a local SQLite database.
-
-For backwards compatibility, ``--file-store`` is an alias for ``--backend-store-uri``.
+Drivers are optional. If you do not specify a driver, SQLAlchemy uses a dialect's default driver. For example, ``--backend-store-uri sqlite:///mlflow.db`` would use a local SQLite database.
.. important::
|
add --to-mp4 argument to resize_videos.py
* Update resize_videos.py
To support resizeing .webm videos
* correct error
* change back, but add one line
* add --to-mp4 argument
* Add comment | @@ -17,6 +17,11 @@ def resize_videos(vid_item):
bool: Whether generate video cache successfully.
"""
full_path, vid_path = vid_item
+ # Change the output video extension to .mp4 if '--to-mp4' flag is set
+ if args.to_mp4:
+ vid_path = vid_path.split('.')
+ assert len(vid_path) == 2, f"Video path '{vid_path}' contain more than one dot"
+ vid_path = vid_path[0] + '.mp4'
out_full_path = osp.join(args.out_dir, vid_path)
dir_name = osp.dirname(vid_path)
out_dir = osp.join(args.out_dir, dir_name)
@@ -71,6 +76,10 @@ def parse_args():
default='mp4',
choices=['avi', 'mp4', 'webm', 'mkv'],
help='video file extensions')
+ parser.add_argument(
+ '--to-mp4',
+ action='store_true',
+ help='whether to output videos in mp4 format')
parser.add_argument(
'--scale',
type=int,
|
Code block: return code blocks with valid ticks but no lang
Such code block will be useful down the road for sending information
on including a language specified if the content successfully parses
as valid Python. | @@ -227,26 +227,23 @@ class CodeBlockCog(Cog, name="Code Block"):
log.trace("The code consists only of expressions, not sending instructions")
@staticmethod
- def find_invalid_code_blocks(message: str) -> Sequence[CodeBlock]:
+ def find_code_blocks(message: str) -> Sequence[CodeBlock]:
"""
- Find and return all invalid Markdown code blocks in the `message`.
+ Find and return all Markdown code blocks in the `message`.
- An invalid code block is considered to be one which uses invalid back ticks.
-
- If the `message` contains at least one valid code block, return an empty sequence. This is
- based on the assumption that if the user managed to get one code block right, they already
- know how to fix the rest themselves.
+ If the `message` contains at least one code block with valid ticks and a specified language,
+ return an empty sequence. This is based on the assumption that if the user managed to get
+ one code block right, they already know how to fix the rest themselves.
"""
code_blocks = []
for _, tick, language, content in RE_CODE_BLOCK.finditer(message):
- if tick == BACKTICK:
+ language = language.strip()
+ if tick == BACKTICK and language:
return ()
else:
- code_block = CodeBlock(content, language.strip(), tick)
+ code_block = CodeBlock(content, language, tick)
code_blocks.append(code_block)
- return code_blocks
-
def fix_indentation(self, msg: str) -> str:
"""Attempts to fix badly indented code."""
def unindent(code: str, skip_spaces: int = 0) -> str:
|
Remove redundant comment
We no longer have a noop client | @@ -365,8 +365,6 @@ class Config(object):
AWS_REGION = 'eu-west-1'
- # CBC Proxy
- # if the access keys are empty then noop client is used
CBC_PROXY_AWS_ACCESS_KEY_ID = os.environ.get('CBC_PROXY_AWS_ACCESS_KEY_ID', '')
CBC_PROXY_AWS_SECRET_ACCESS_KEY = os.environ.get('CBC_PROXY_AWS_SECRET_ACCESS_KEY', '')
|
Update tests/test_server.py
fix test fail | @@ -694,7 +694,8 @@ def test_load_model_from_model_server(rasa_app, trained_core_model):
assert old_fingerprint != response.json["fingerprint"]
-
+ import rasa.core.jobs
+ rasa.core.jobs.__scheduler = None
def test_load_model_invalid_request_body(rasa_app):
_, response = rasa_app.put("/model")
|
Updating regional account ids for af-south-1 and eu-south-1
Adding Debugger Repo Account IDs for `af-south-1` (Cape Town) and `eu-south-1` (Milan) | @@ -16,6 +16,7 @@ from sagemaker import image_uris
from tests.unit.sagemaker.image_uris import expected_uris, regions
ACCOUNTS = {
+ "af-south-1": "314341159256",
"ap-east-1": "199566480951",
"ap-northeast-1": "430734990657",
"ap-northeast-2": "578805364391",
@@ -27,6 +28,7 @@ ACCOUNTS = {
"cn-northwest-1": "658757709296",
"eu-central-1": "482524230118",
"eu-north-1": "314864569078",
+ "eu-south-1": "563282790590",
"eu-west-1": "929884845733",
"eu-west-2": "250201462417",
"eu-west-3": "447278800020",
|
[App Service] az staticwebapp hostname show: Fix dns-txt-token validation command to show command
az staticwebapp hostname get does not exist. Instead users should use hostname show | @@ -134,7 +134,7 @@ def set_staticsite_domain(cmd, name, hostname, resource_group_name=None, no_wait
name, hostname, domain_envelope)
if validation_method.lower() == "dns-txt-token":
- validation_cmd = ("az staticwebapp hostname get -n {} -g {} "
+ validation_cmd = ("az staticwebapp hostname show -n {} -g {} "
"--hostname {} --query \"validationToken\"".format(name,
resource_group_name,
hostname))
|
Update training.rst
Added Sales: General Questions channel to channel list | @@ -81,6 +81,7 @@ Whenever possible, we share key updates and have discussions in Mattermost. Some
- `Marketing Website <https://community.mattermost.com/private-core/channels/marketing-website-priv>`_ - Website bugs, release notes, and web discussions
- `Product Management <https://community.mattermost.com/core/channels/product-management>`_ - Discussion with and questions for Mattermost product managers
- `Roadmap <https://community.mattermost.com/private-core/channels/roadmap>`_ - Questions about and discussion of the product roadmap, or to view public roadmap in the header
+- `Sales: General Questions <https://community.mattermost.com/private-core/channels/sales-general-questions>`_ - Discussion of all types of sales inquiries/questions
- `Spec Reviews <https://community.mattermost.com/core/channels/spec-reviews>`_ - In-progress plans for new features
- `Sustained Engineering <https://community.mattermost.com/core/channels/sustained-engineering>`_ - Discussion with Mattermost's `Sustained Engineering Team (SET) <https://developers.mattermost.com/internal/sustained-engineering/>`_
- `UX Design <https://community.mattermost.com/core/channels/ux-design>`_ - Questions and discussion about product design
|
catch deprecated basis specification
Some devices would return a basis of the form 'SU2+CNOT'.
If there are no commas in basis string it will be replaced
with u1,u2,u3,cx.id and a warning will be logged. | @@ -996,6 +996,11 @@ class QuantumProgram(object):
if not basis_gates:
if 'basis_gates' in backend_conf:
basis_gates = backend_conf['basis_gates']
+ elif len(basis_gates.split(',')) < 2:
+ # catches deprecated basis specification like 'SU2+CNOT'
+ logger.warn('encountered deprecated basis specification: '
+ '"{}" substituting u1,u2,u3,cx,id'.format(basis_gates))
+ basis_gates = 'u1,u2,u3,cx,id'
if not coupling_map:
coupling_map = backend_conf['coupling_map']
if not name_of_circuits:
|
Fix variable name for stackhpc.os-networks upper constraints
Upper constraints should be defined using
os_networks_upper_constraints_file rather than
os_openstacksdk_upper_constraints_file because of [1].
1.
TrivialFix | - role: stackhpc.os-networks
os_openstacksdk_install_epel: "{{ dnf_install_epel }}"
os_openstacksdk_state: latest
- os_openstacksdk_upper_constraints_file: "{{ pip_upper_constraints_file }}"
+ os_networks_upper_constraints_file: "{{ pip_upper_constraints_file }}"
os_networks_venv: "{{ venv }}"
os_networks_auth_type: "{{ openstack_auth_type }}"
os_networks_auth: "{{ openstack_auth }}"
|
Fix documentation docker Readme
Updated Docker Readme with info about `Dockerfile.Alpine` | This directory contains various Docker related utilities.
-* `Dockerfile.master` -- a Dockerfile to build neo-python's master branch
-* `Dockerfile.dev` -- a Dockerfile to build neo-python's development branch
+* `Dockerfile` -- a Dockerfile to build neo-python's (Ubuntu Linux distribution)
+* `Dockerfile.Alpine` -- a Dockerfile to build neo-python's (Alpine Linux distribution)
* `docker-compose-neoscan.yml` -- a Docker compose file to start a private network and a neoscan container
---
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.