message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Update pythonpackage.yml
still nosetests setup | @@ -21,6 +21,7 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install Cython
+ pip install numpy
pip install -r requirements.txt
- name: Test with nose
run: |
|
OSLObjectUI : Remove plug deletion code
Instead we can hook into the metadata system provided by NodeUI. | @@ -214,6 +214,7 @@ Gaffer.Metadata.registerNode(
],
"primitiveVariables.*" : [
+ "deletable", True,
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
@@ -284,37 +285,3 @@ Gaffer.Metadata.registerNode(
}
)
-
-#########################################################################
-# primitiveVariable plug menu
-##########################################################################
-
-def __deletePlug( plug ) :
-
- with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) :
- plug.parent().removeChild( plug )
-
-def __plugPopupMenu( menuDefinition, plugValueWidget ) :
-
- plug = plugValueWidget.getPlug()
- if not isinstance( plug.node(), GafferOSL.OSLObject ):
- return
-
- relativeName = plug.relativeName( plug.node() ).split( "." )
- if relativeName[0] != "primitiveVariables" or len( relativeName ) < 2:
- return
-
- primVarPlug = plug.node()["primitiveVariables"][relativeName[1]]
-
- menuDefinition.append( "/DeleteDivider", { "divider" : True } )
- menuDefinition.append(
- "/Delete",
- {
- "command" : functools.partial( __deletePlug, primVarPlug ),
- "active" : not plugValueWidget.getReadOnly() and not Gaffer.MetadataAlgo.readOnly( primVarPlug ),
- }
- )
-
-GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
-
-
|
Move metrics to CPU
Hopefully eliminates continuously growing memory usage during training | @@ -61,9 +61,10 @@ class KrakenTrainer(pl.Trainer):
def __init__(self,
enable_progress_bar: bool = True,
enable_summary: bool = True,
- min_epochs=5,
- max_epochs=100,
- pb_ignored_metrics=('loss', 'val_metric'),
+ min_epochs: int = 5,
+ max_epochs: int = 100,
+ pb_ignored_metrics: Sequence[str] = ('loss', 'val_metric'),
+ move_metrics_to_cpu: bool = True,
*args,
**kwargs):
kwargs['logger'] = False
@@ -72,6 +73,7 @@ class KrakenTrainer(pl.Trainer):
kwargs['min_epochs'] = min_epochs
kwargs['max_epochs'] = max_epochs
kwargs['callbacks'] = ([] if 'callbacks' not in kwargs else kwargs['callbacks'])
+ kwargs['move_metrics_to_cpu'] = move_metrics_to_cpu
if not isinstance(kwargs['callbacks'], list):
kwargs['callbacks'] = [kwargs['callbacks']]
|
Support for both api_key_required AND authorizer on routes
Changed to support routes that need both an api_key and an authorizer. | @@ -98,7 +98,7 @@ class SwaggerGenerator(object):
# type: (Any, Dict[str, Any], RouteEntry) -> None
if view.authorizer is not None:
self._generate_security_from_auth_obj(api_config, view.authorizer)
- return
+ #do not return, need to handle both authorizer AND api_key security
for auth in security:
name = list(auth.keys())[0]
if name == 'api_key':
|
Improve visual settings of output panels
* Improve visual settings of output panels
* Add bullet
* fix navigation to result in Windows
* Add a single whitespace to diagnostics lines
ensure at least one space in indention.
ensure result_line_regexp and syntax highlighting keeps working after 99.999.999 of lines | @@ -7,7 +7,7 @@ scope: output.lsp.diagnostics
variables:
start_of_diag_body: ^\s+(?=\d)
- filename_and_colon: ^(.*)(:)$
+ filename_and_colon: ^\s*(\S)\s+(.*)(:)$
contexts:
main:
@@ -18,8 +18,9 @@ contexts:
- match: '{{filename_and_colon}}'
captures:
0: meta.diagnostic.preamble.lsp
- 1: string.unquoted.lsp
- 2: punctuation.separator.lsp
+ 1: punctuation.section.diagnostics.preample.lsp
+ 2: string.unquoted.lsp
+ 3: punctuation.separator.lsp
diagnostic-body:
- match: '{{start_of_diag_body}}'
|
Remove old numpy 1.16 work-arounds that hinder some masked transformations.
With this change, erfa.ufunc.s2p becomes much easier to handle for
masked arrays/quantities. | @@ -1562,11 +1562,8 @@ class UnitSphericalRepresentation(BaseRepresentation):
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
- # NUMPY_LT_1_16 cannot create a vector automatically
- p = u.Quantity(np.empty(self.shape + (3,)), u.dimensionless_unscaled,
- copy=False)
# erfa s2c: Convert [unit]spherical coordinates to Cartesian.
- p = erfa_ufunc.s2c(self.lon, self.lat, p)
+ p = erfa_ufunc.s2c(self.lon, self.lat)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
@@ -1890,10 +1887,8 @@ class SphericalRepresentation(BaseRepresentation):
else:
d = self.distance
- # NUMPY_LT_1_16 cannot create a vector automatically
- p = u.Quantity(np.empty(self.shape + (3,)), d.unit, copy=False)
# erfa s2p: Convert spherical polar coordinates to p-vector.
- p = erfa_ufunc.s2p(self.lon, self.lat, d, p)
+ p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
|
ensure --cluster-path is empty when trying to deploy a new cluster
This is to guard against accidentally writing over an existing clusters
information if it could not be connected to for whatever reason. | @@ -12,7 +12,7 @@ from oc.openshift_ops import OCP
from ocs import constants, ocp, defaults
from ocs.exceptions import CommandFailed, CephHealthException
from ocs.utils import create_oc_resource, apply_oc_resource
-from utility import templating
+from utility import templating, system
from utility.aws import AWS
from utility.retry import retry
from utility.utils import destroy_cluster, run_cmd, get_openshift_installer, get_openshift_client, is_cluster_running
@@ -58,6 +58,10 @@ def cluster(request):
msg = "The given cluster can not be connected to: {}. ".format(cluster_path)
msg += "Provide a valid --cluster-path or use --deploy to deploy a new cluster"
pytest.fail(msg)
+ elif not system.is_path_empty(cluster_path) and deploy:
+ msg = "The given cluster path is not empty: {}. ".format(cluster_path)
+ msg += "Provide an empty --cluster-path and --deploy to deploy a new cluster"
+ pytest.fail(msg)
else:
log.info("A testing cluster will be deployed and cluster information stored at: %s", cluster_path)
|
Remove link to outdated client docs in tutorial
Remove link to incomplete and severely outdated
client_setup_and_repository_example.md in client section of
TUTORIAL.md.
Instead we should link (or move the entire client tutorial part) to
tuf/client/README.md, which is more comprehensive and less outdated
than above document (see | @@ -672,10 +672,6 @@ Adding a verification key that has already been used. [repeated 32x]
## How to Perform an Update ##
-Documentation for setting up a TUF client and performing an update is
-available [here](../tuf/client_setup_and_repository_example.md). The documentation
-there is provided here for convenience.
-
The following [repository tool](../tuf/repository_tool.py) function creates a directory
structure that a client downloading new software using TUF (via
[tuf/client/updater.py](../tuf/client/updater.py)) expects. The `root.json` metadata file must exist, and
|
Fixed a small bug that could cause issues on a multi-user machine where
more than one user is creating and running test installers and another
wants to run a production installer. Only one user per machine can run
the production installer at any one time. | @@ -122,7 +122,7 @@ else
# which serve as documentation.
sed -i -e '/^#/!d' install.cfg
# Set the insecure registry configuration based on the installer hostname
- echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${uId}${NC}"
+ echo -e "${lBlue}Set up the inescure registry config for hostname ${lCyan}vinstall${NC}"
sed -i -e '/docker_push_registry/s/.*/docker_push_registry: "vinstall:5000"/' ansible/group_vars/all
echo '{' > ansible/roles/voltha/templates/daemon.json
echo '"insecure-registries" : ["vinstall:5000"]' >> ansible/roles/voltha/templates/daemon.json
@@ -139,12 +139,12 @@ while [ ! -z "$vStat" ];
do
echo "Waiting for $iVmName to shut down"
sleep 2
- vStat=`virsh list | grep $iVmName`
+ vStat=`virsh list | grep "$iVmName "`
ctr=`expr $ctr + 1`
if [ $ctr -eq $shutdownTimeout ]; then
echo -e "${red}Tired of waiting, forcing the VM off${NC}"
virsh destroy $iVmName
- vStat=`virsh list | grep $iVmName`
+ vStat=`virsh list | grep "$iVmName "`
fi
done
@@ -368,12 +368,12 @@ else
do
echo "Waiting for $iVmName to shut down"
sleep 2
- vStat=`virsh list | grep $iVmName`
+ vStat=`virsh list | grep "$iVmName "`
ctr=`expr $ctr + 1`
if [ $ctr -eq $shutdownTimeout ]; then
echo -e "${red}Tired of waiting, forcing the VM off${NC}"
virsh destroy $iVmName
- vStat=`virsh list | grep $iVmName`
+ vStat=`virsh list | grep "$iVmName "`
fi
done
# Copy the install bootstrap script to the installer directory
|
Fix lndmanage
fix bonus.lndmanage.sh | @@ -16,17 +16,22 @@ fi
# install
if [ "$1" = "1" ] || [ "$1" = "on" ]; then
+
+ if [ -d "/home/admin/lndmanage" ]; then
+ echo "LNDMANAGE already installed"
+ exit 1
+ fi
+
echo "*** INSTALL LNDMANAGE ***"
- mkdir lndmanage
- cd lndmanage
+ mkdir /home/admin/lndmanage
+ cd /home/admin/lndmanage
# activate virtual environment
- sudo apt install -y python3-venv
- python3 -m venv venv
- source venv/bin/activate
+ python -m venv venv
+ source /home/admin/lndmanage/venv/bin/activate
# get dependencies
sudo apt install -y python3-dev libatlas-base-dev
- pip3 install wheel
- pip3 install lndmanage==0.8.0.1
+ python -m pip install wheel
+ python -m pip install lndmanage==0.8.0.1
# setting value in raspi blitz config
sudo sed -i "s/^lndmanage=.*/lndmanage=on/g" /mnt/hdd/raspiblitz.conf
|
[MINOR] Fix typos in frame.py
Fixed typo in `frame.py` | @@ -1285,7 +1285,7 @@ class DataFrame(_Frame, Generic[T]):
... # 0,1,2,3
... # 1,4,5,6
- We can omit the the index by passing the keyword `index` and setting
+ We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
@@ -1801,7 +1801,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
- "`to_markdown()` only supported in Kaoals with pandas >= 1.0.0"
+ "`to_markdown()` only supported in Koalas with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
@@ -2796,7 +2796,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
is_return_series = getattr(return_sig, "__origin__", None) == ks.Series
if not is_return_dataframe and not is_return_series:
raise TypeError(
- "The given function should specify a frame or seires as its type "
+ "The given function should specify a frame or series as its type "
"hints; however, the return type was %s." % return_sig
)
if is_return_series:
|
workload/rt-app: Better JSON processing error reporting
The rt-app workload parses the rt-app JSON file in order to override
some options (specifically duration). Previously, if the JSON was
syntactically incorrect, an uninformative ValueError was raised. Now
we raise a ConfigError with appropriate message prompting the user to
fix the file. | @@ -21,7 +21,7 @@ from collections import OrderedDict
from subprocess import CalledProcessError
from wa import Workload, Parameter, Executable, File
-from wa.framework.exception import WorkloadError, ResourceError
+from wa.framework.exception import WorkloadError, ResourceError, ConfigError
from wa.utils.misc import check_output
from wa.utils.exec_control import once
@@ -231,7 +231,13 @@ class RtApp(Workload):
config_file = self._generate_workgen_config(user_config_file,
context.output_directory)
with open(config_file) as fh:
+ try:
config_data = json.load(fh, object_pairs_hook=OrderedDict)
+ except ValueError:
+ # We were not able to parse the JSON file. Raise an informative error.
+ msg = "Failed to parse {}. Please make sure it is valid JSON."
+ raise ConfigError(msg.format(user_config_file))
+
self._update_rt_app_config(config_data)
self.duration = config_data['global'].get('duration', 0)
self.task_count = len(config_data.get('tasks', []))
|
framework/instrumentation: handle non-job errors in ManagedCallback
If an error occurs in a ManagedCallback that is invoked outside of a
job, re-raise rather than attempting to update the status of the
non-existent job. | @@ -275,7 +275,10 @@ class ManagedCallback(object):
if isinstance(e, WorkloadError):
context.set_status('FAILED')
else:
+ if context.current_job:
context.set_status('PARTIAL')
+ else:
+ raise
# Need this to keep track of callbacks, because the dispatcher only keeps
|
doc/media: make module level match
ImageFile and SoundFile are located at media.ev3dev, not media. | :mod:`media <pybricks.media>` -- Sounds and Images
==================================================
-.. automodule:: pybricks.media
- :no-members:
+.. module:: pybricks.media
-You can use your own sound and image files by placing them in your project
-folder. You can also use any of the images and sounds built into ev3dev, or
-draw your own.
+This module describes media such as sound and images that you can use in your
+projects. Media are divided into submodules that indicate on which platform
+they are available.
-ev3dev
-------
+:mod:`media.ev3dev <pybricks.media.ev3dev>` -- Sounds and Images
+---------------------------------------------------------------------
-.. automodule:: pybricks.media.ev3dev
- :no-members:
+.. module:: pybricks.media.ev3dev
+
+EV3 MicroPython is built on top of ev3dev, which comes with a variety of image
+and sound files. You can access them using the classes below.
+
+You can also use your own sound and image files by placing them in your project
+folder.
Image Files
^^^^^^^^^^^
|
Fixing sudo command
Fixing small spelling mistakes | @@ -362,7 +362,7 @@ There are a few things that still don't work, and you can see what works and wha
1. Running raspbian lite(headless) or desktop (both 64bit) we should first start off with an update/upgrade.
```bash
- sudo apt update && sydo apt upgrade
+ sudo apt update && sudo apt upgrade
```
Once completed reboot and lets reopen.
|
Fix 2
Forget to add : | @@ -1187,7 +1187,7 @@ class PokemonGoBot(object):
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
- if 'warn' in response_dict['responses']['GET_PLAYER']
+ if 'warn' in response_dict['responses']['GET_PLAYER']:
warn = response_dict['responses']['GET_PLAYER']['warn']
player = self._player
else:
|
Allow nullable length unit in cable API
Cables models define it as None by default, but the API rejects a
request containing a null length_unit. Allows it in the API
serializer. | @@ -507,7 +507,7 @@ class CableSerializer(ValidatedModelSerializer):
termination_a = serializers.SerializerMethodField(read_only=True)
termination_b = serializers.SerializerMethodField(read_only=True)
status = ChoiceField(choices=CONNECTION_STATUS_CHOICES, required=False)
- length_unit = ChoiceField(choices=CABLE_LENGTH_UNIT_CHOICES, required=False)
+ length_unit = ChoiceField(choices=CABLE_LENGTH_UNIT_CHOICES, required=False, allow_null=True)
class Meta:
model = Cable
|
Remove scipy usage from laikad
Remove scipy | @@ -4,7 +4,7 @@ from typing import List
import numpy as np
from collections import defaultdict
-from scipy import linalg
+from numpy.linalg import linalg
from cereal import log, messaging
from laika import AstroDog
|
Remove comparison with default_in='body'
In general they differ at res[0]['schema']['required'] | @@ -110,7 +110,6 @@ class TestMarshmallowFieldToSwagger:
assert len(res[0]['schema']['required']) == 2
assert 'field1' in res[0]['schema']['required']
assert 'field2' in res[0]['schema']['required']
- assert res == swagger.fields2parameters(field_dict, default_in='body')
def test_fields2parameters_does_not_modify_metadata(self):
field_dict = {'field': fields.Str(location='querystring')}
|
Catch OSError besides IOError in _GetType() to handle symlink loop exception
fixes | @@ -2410,7 +2410,7 @@ class FakeFilesystem(object):
obj = self.ResolveObject(path, follow_symlinks)
if obj:
return stat.S_IFMT(obj.st_mode) == st_flag
- except IOError:
+ except (IOError, OSError):
return False
return False
|
refine benchmark log
test=develop | @@ -258,8 +258,8 @@ def main():
logs = train_stats.log()
if it % cfg.log_iter == 0 and (not FLAGS.dist or trainer_id == 0):
ips = float(cfg['TrainReader']['batch_size']) / time_cost
- strs = 'iter: {}, lr: {:.6f}, {}, batch_cost: {:.5f} s, eta: {}, ips: {:.5f} images/sec'.format(
- it, np.mean(outs[-1]), logs, time_cost, eta, ips)
+ strs = 'iter: {}, lr: {:.6f}, {}, eta: {}, batch_cost: {:.5f} sec, ips: {:.5f} images/sec'.format(
+ it, np.mean(outs[-1]), logs, eta, time_cost, ips)
logger.info(strs)
# NOTE : profiler tools, used for benchmark
|
Update emotet.txt
Tails are extremely various and there're no signs for these IPs to be legit. | @@ -2822,6 +2822,15 @@ http://91.242.136.103/fmrNxd4xgr7w
http://68.114.229.171/gK0HUPd
74.101.225.121:443/iMPCBDusm7qwkGo
+# Reference: https://www.virustotal.com/gui/ip-address/72.186.137.156/relations
+
+72.186.137.156:80
+
+# Reference: https://www.virustotal.com/gui/ip-address/66.7.242.50/relations
+
+66.7.242.50:80
+66.7.242.50:8080
+
# Generic trails
/ringin/
|
fix typo (Response -> Request)
check docs for more information | @@ -39,7 +39,7 @@ class ${ProjectName}SpiderMiddleware(object):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
- # Should return either None or an iterable of Response, dict
+ # Should return either None or an iterable of Request, dict
# or Item objects.
pass
|
UT for testing urls for all the objects
Only exceptional urls are being tested, other urls are missing
test case. If some changes are introduced then these test
case makes sure that those changes follow url expected by the
ODL. | @@ -17,6 +17,7 @@ from oslo_config import cfg
from neutron.tests import base
+from networking_odl.common import constants as odl_const
from networking_odl.common import utils
@@ -30,19 +31,31 @@ class TestUtils(base.DietTestCase):
def test_neutronify_empty(self):
self.assertEqual('', utils.neutronify(''))
- def test_make_url_object_in_resource_map(self):
- url_object = utils.make_url_object('policy')
- self.assertEqual('qos/policies', url_object)
+ @staticmethod
+ def _get_resources():
+ # TODO(rajivk): Load balancer resources are not specified because
+ # urls builder is registered explictly. Add load balancer resources
+ # here, once lbaas url creation is directed through this method
+ return {odl_const.ODL_SG: 'security-groups',
+ odl_const.ODL_SG_RULE: 'security-group-rules',
+ odl_const.ODL_NETWORK: 'networks',
+ odl_const.ODL_SUBNET: 'subnets',
+ odl_const.ODL_ROUTER: 'routers',
+ odl_const.ODL_PORT: 'ports',
+ odl_const.ODL_FLOATINGIP: 'floatingips',
+ odl_const.ODL_QOS_POLICY: 'qos/policies',
+ odl_const.ODL_TRUNK: 'trunks',
+ odl_const.ODL_BGPVPN: 'bgpvpns',
+ odl_const.ODL_SFC_FLOW_CLASSIFIER: 'sfc/flowclassifiers',
+ odl_const.ODL_SFC_PORT_PAIR: 'sfc/portpairs',
+ odl_const.ODL_SFC_PORT_PAIR_GROUP: 'sfc/portpairgroups',
+ odl_const.ODL_SFC_PORT_CHAIN: 'sfc/portchains',
+ odl_const.ODL_L2GATEWAY: 'l2-gateways',
+ odl_const.ODL_L2GATEWAY_CONNECTION: 'l2gateway-connections'}
- def test_make_url_sfc_object_in_resource_map(self):
- objs = ['flowclassifier', 'portpair', 'portpairgroup', 'portchain']
- for obj in objs:
- url_object = utils.make_url_object(obj)
- self.assertEqual('sfc/%ss' % obj, url_object)
-
- def test_make_url_object_conversion(self):
- self.assertEqual('networks', utils.make_url_object('network'))
- self.assertEqual('l2-gateways', utils.make_url_object('l2_gateway'))
+ def test_all_resources_url(self):
+ for obj, url in self._get_resources().items():
+ self.assertEqual(utils.make_url_object(obj), url)
def test_get_odl_url(self):
"""test make uri."""
|
full refactor complete
structure overhauled, changed stop logic | @@ -144,7 +144,15 @@ class TriviaNightCog(commands.Cog):
percentage *= 0.5
duration = next_question.time * percentage
- await asyncio.sleep(duration)
+ await asyncio.wait([self.question_closed.wait()], timeout=duration)
+
+ if self.question_closed.is_set():
+ await ctx.send(embed=question_view.end_question(self.scoreboard))
+ await message.edit(embed=question_embed, view=None)
+
+ self.game.end_question()
+ self.question_closed.clear()
+ return
if int(duration) > 1:
# It is quite ugly to display decimals, the delay for requests to reach Discord
@@ -153,7 +161,14 @@ class TriviaNightCog(commands.Cog):
else:
# Since each time we divide the percentage by 2 and sleep one half of the halves (then sleep a
# half, of that half) we must sleep both halves at the end.
- await asyncio.sleep(duration)
+ await asyncio.wait([self.question_closed.wait()], timeout=duration)
+ if self.question_closed.is_set():
+ await ctx.send(embed=question_view.end_question(self.scoreboard))
+ await message.edit(embed=question_embed, view=None)
+
+ self.game.end_question()
+ self.question_closed.clear()
+ return
break
await ctx.send(embed=question_view.end_question(self.scoreboard))
@@ -210,7 +225,7 @@ class TriviaNightCog(commands.Cog):
await ctx.send(embed=error_embed)
return
- self.ongoing_question = False
+ self.question_closed.set()
@trivianight.command()
@commands.has_any_role(*TRIVIA_NIGHT_ROLES)
|
Skip namespace bucket creation via MCG RPC.
Skip namespace bucket creation via MCG RPC apart from Version 4.6 | @@ -12,13 +12,13 @@ from ocs_ci.ocs import constants
logger = logging.getLogger(__name__)
+@skipif_ocs_version("!=4.6")
@scale
class TestScaleNamespace(E2ETest):
"""
Test creation of a namespace scale resource
"""
- @skipif_ocs_version("!=4.6")
@pytest.mark.parametrize(
argnames=["platform"],
argvalues=[
@@ -47,7 +47,6 @@ class TestScaleNamespace(E2ETest):
read_ns_resources=[ns_resource_name],
)
- @skipif_ocs_version("<4.6")
@pytest.mark.polarion_id("OCS-2517")
@on_prem_platform_required
def test_scale_namespace_bucket_creation_with_rgw(
|
Update data_analytics_dag.py again
Need to use write_append to concatenate all of the tables, my bad. It's okay to have it keep adding the data for the sake of our demo. | @@ -113,8 +113,8 @@ with models.DAG(
ON Holidays.Date = Weather.Date;
"""
- # for demo purposes we are using WRITE_TRUNCATE
- # to reduce chance of 409 duplicate errors
+ # for demo purposes we are using WRITE_APPEND
+ # but if you run the DAG repeatedly it will continue to append
# Your use case may be different, see the Job docs
# https://cloud.google.com/bigquery/docs/reference/rest/v2/Job
# for alternative values for the writeDisposition
@@ -131,7 +131,7 @@ with models.DAG(
"datasetId": BQ_DESTINATION_DATASET_NAME,
"tableId": BQ_DESTINATION_TABLE_NAME,
},
- "writeDisposition": "WRITE_TRUNCATE",
+ "writeDisposition": "WRITE_APPEND",
}
},
location="US",
|
Remove style to show text in portico-page-container.
Fixes | @@ -593,10 +593,6 @@ a.bottom-signup-button {
padding-top: 50px !important;
}
-.portico-page-container {
- padding-top: 0px !important;
-}
-
.portico-page-header {
font-weight: 300;
font-size: 35px;
|
removed
"VirusTotal - Private API": "reached api alloted quota.",
from conf.json as it caused the circle to be green without testing | "_comment": "~~~ QUOTA ISSUES ~~~",
"AWS - Athena - Beta": "Issue 19834",
"Lastline": "issue 20323",
- "VirusTotal - Private API": "reached api alloted quota.",
"Google Resource Manager": "Cannot create projects because have reached alloted quota.",
"Looker": "Warehouse 'DEMO_WH' cannot be resumed because resource monitor 'LIMITER' has exceeded its quota."
},
|
Add type annotations to parsl.dataflow.futures
This comes from the benc-mypy branch | @@ -9,7 +9,7 @@ We have two basic types of futures:
from concurrent.futures import Future
import logging
import threading
-from typing import Sequence
+from typing import Optional, Sequence
from parsl.app.futures import DataFuture
from parsl.dataflow.taskrecord import TaskRecord
@@ -75,24 +75,24 @@ class AppFuture(Future):
self.task_def = task_def
@property
- def stdout(self):
+ def stdout(self) -> Optional[str]:
return self.task_def['kwargs'].get('stdout')
@property
- def stderr(self):
+ def stderr(self) -> Optional[str]:
return self.task_def['kwargs'].get('stderr')
@property
- def tid(self):
+ def tid(self) -> int:
return self.task_def['id']
- def cancel(self):
+ def cancel(self) -> bool:
raise NotImplementedError("Cancel not implemented")
- def cancelled(self):
+ def cancelled(self) -> bool:
return False
- def task_status(self):
+ def task_status(self) -> str:
"""Returns the status of the task that will provide the value
for this future. This may not be in-sync with the result state
of this future - for example, task_status might return 'done' but
@@ -116,5 +116,5 @@ class AppFuture(Future):
return self.task_def['status'].name
@property
- def outputs(self):
+ def outputs(self) -> Sequence[DataFuture]:
return self._outputs
|
Add packages and package_data to setup.py
Review: | # See the License for the specific language governing permissions and
# limitations under the License.
-from setuptools import setup
+from setuptools import find_packages, setup
# Read in requirements.txt
requirements = open('requirements.txt').readlines()
@@ -24,4 +24,6 @@ setup(
url='http://github.com/quantumlib/cirq',
author='The Cirq Developers',
install_requirements=requirements,
- license='Apache 2')
+ license='Apache 2',
+ packages=find_packages(),
+ package_data={'cirq.api.google.v1': ['*.proto']})
|
fix bug in
Summary:
Mistakenly created an infinite recursive call.
(Note: this ignores all push blocking failures!) | @@ -57,7 +57,7 @@ bool ConvDNNLowPAcc16Op<ReluFused>::RunOnDeviceWithOrderNHWC() {
template <bool ReluFused>
bool ConvDNNLowPAcc16Op<ReluFused>::GetQuantizationParameters_() {
- if (!this->GetQuantizationParameters_()) {
+ if (!BaseType::GetQuantizationParameters_()) {
return false;
}
|
Vagrant fixes
1)Fix glitch with vagrant and eth1 not having an active ip after first boot.
Removed unnecessary ntp install by script; will be done by openshift install. | @@ -12,10 +12,3 @@ else
echo "eth1 missing ip; restaring interface"
ifdown eth1 && ifup eth1
fi
-
-
-echo "Install ntp server to avoid to have desync issues"
-yum -y install ntp
-systemctl enable ntpd
-systemctl start ntpd
-date
|
Improve documentation and minor pystyle violations.
No semantic changes. | from typing import Callable, Any, Optional
-import jax
-import jax.numpy as jnp
-import numpy as np
-
from flax import linen as nn
from flax import struct
+import jax.numpy as jnp
+import numpy as np
+
@struct.dataclass
class TransformerConfig:
@@ -68,17 +67,17 @@ def sinusoidal_init(max_len=2048):
return init
+
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
- Args:
+ Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
- def __call__(self,
- inputs):
+ def __call__(self, inputs):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
@@ -112,7 +111,7 @@ class AddPositionEmbs(nn.Module):
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
- Args:
+ Attributes:
config: TransformerConfig dataclass containing hyperparameters.
out_dim: optionally specify out dimension.
"""
@@ -141,17 +140,15 @@ class MlpBlock(nn.Module):
class Encoder1DBlock(nn.Module):
- """Transformer decoder layer.
+ """Transformer encoder layer.
- Args:
+ Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
- def __call__(self,
- inputs,
- deterministic):
+ def __call__(self, inputs, deterministic):
"""Applies Encoder1DBlock module.
Args:
@@ -193,10 +190,7 @@ class Transformer(nn.Module):
config: TransformerConfig
@nn.compact
- def __call__(self,
- *,
- inputs,
- train):
+ def __call__(self, *, inputs, train):
"""Applies Transformer model on the inputs.
Args:
@@ -204,7 +198,7 @@ class Transformer(nn.Module):
train: if it is training.
Returns:
- output of a transformer decoder.
+ output of a transformer encoder.
"""
padding_mask = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)[..., None]
@@ -217,7 +211,7 @@ class Transformer(nn.Module):
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=not train)
x = AddPositionEmbs(cfg)(x)
- for l in range(cfg.num_layers):
+ for _ in range(cfg.num_layers):
x = Encoder1DBlock(cfg)(x, deterministic=not train)
x = nn.LayerNorm(dtype=cfg.dtype)(x)
|
Update extract_features.py
adapt pooling in extract_features.py to lower pytorch version | @@ -64,7 +64,7 @@ class FeatureExtractor(torch.nn.Module):
def forward(self, src, seg):
emb = self.embedding(src, seg)
output = self.encoder(emb, seg)
- seg = torch.unsqueeze(seg, dim=-1)
+ seg = torch.unsqueeze(seg, dim=-1).type(torch.float)
output = output * seg
if self.pooling == "mean":
@@ -73,7 +73,7 @@ class FeatureExtractor(torch.nn.Module):
elif self.pooling == "max":
output = torch.max(output + (seg - 1) * sys.maxsize, dim=1)[0]
elif self.pooling == "last":
- output = output[torch.arange(output.shape[0]), torch.squeeze(torch.sum(seg, dim=1) - 1), :]
+ output = output[torch.arange(output.shape[0]), torch.squeeze(torch.sum(seg, dim=1).type(torch.int64) - 1), :]
else:
output = output[:, 0, :]
|
update: use flags noout and nodeep-scrub only
1. set noout and nodeep-scrub flags,
2. upgrade each OSD node, one by one, wait for active+clean pgs
3. after all osd nodes are upgraded, unset flags | name: ceph-mgr
+- name: set osd flags
+ hosts: "{{ mon_group_name | default('mons') }}[0]"
+ become: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
+ - name: set osd flags
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set {{ item }}"
+ with_items:
+ - noout
+ - nodeep-scrub
+
- name: upgrade ceph osds cluster
vars:
health_osd_check_retries: 40
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment | bool
- - name: set osd flags
- command: "{{ container_exec_cmd_update_osd | default('') }} ceph --cluster {{ cluster }} osd set {{ item }}"
- with_items:
- - noout
- - norebalance
- - norecover
- - nobackfill
- delegate_to: "{{ groups[mon_group_name][0] }}"
-
- name: stop ceph osd
systemd:
name: ceph-osd@{{ item }}
- ceph_release in ["nautilus", "octopus"]
- not containerized_deployment | bool
- - name: unset osd flags
- command: "{{ container_exec_cmd_update_osd | default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
- with_items:
- - noout
- - norebalance
- - norecover
- - nobackfill
- delegate_to: "{{ groups[mon_group_name][0] }}"
-
- name: get num_pgs - non container
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
register: ceph_pgs
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment | bool
+ - name: unset osd flags
+ command: "{{ container_exec_cmd_update_osd | default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
+ with_items:
+ - noout
+ - nodeep-scrub
+
- name: get osd versions
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
|
make `correct` float to avoid truncating
Without float conversion the `acc` variable always becomes `0`. | @@ -402,7 +402,7 @@ Finally we can evaluate our model on the test nodes:
model.eval()
_, pred = model(data).max(dim=1)
- correct = pred[data.test_mask].eq(data.y[data.test_mask]).sum().item()
+ correct = float (pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
acc = correct / data.test_mask.sum().item()
print('Accuracy: {:.4f}'.format(acc))
>>> Accuracy: 0.8150
|
langkit.dsl: refactor special fields filter
TN: | @@ -82,6 +82,17 @@ def check_decorator_use(decorator, expected_cls, cls):
)
+def filter_out_special_fields(dct):
+ """
+ Helper for metaclasses. Return dct without the special fields (__foo__).
+
+ :param dict[str, T] dct: Class attributes dictionnary.
+ :rtype: dict[str, T]
+ """
+ return {k: v for k, v in dct.items()
+ if not k.startswith('__') or not k.endswith('__')}
+
+
class _StructMetaclass(type):
"""
Internal metaclass for struct types, used to collect all Struct subclasses
@@ -134,9 +145,7 @@ class _StructMetaclass(type):
# Make sure all fields are AbstractField instances; assign them
# their name.
fields = []
- for f_n, f_v in dct.items():
- if f_n.startswith('__') and f_n.endswith('__'):
- continue
+ for f_n, f_v in filter_out_special_fields(dct).items():
fields.append((f_n, f_v))
with Context('in {}.{}'.format(name, f_n), location):
check_source_language(
|
Include API_HOST in URL
This makes no difference in production, but it means we can run a
development instance against a different API server and maintains
consistency with what we do elsewhere. | @@ -455,7 +455,8 @@ def measure_for_one_entity(request, measure, entity_code, entity_type):
"measure": measure,
"measure_options": measure_options,
"current_at": ImportLog.objects.latest_in_category("prescribing").current_at,
- "numerator_breakdown_url": "{}?{}".format(
+ "numerator_breakdown_url": "{}{}?{}".format(
+ settings.API_HOST,
reverse("measure_numerators_by_org"),
urlencode(
{"org": entity.code, "org_type": entity_type, "measure": measure.id}
@@ -492,7 +493,8 @@ def measure_for_all_england(request, measure):
"measure": measure,
"measure_options": measure_options,
"current_at": ImportLog.objects.latest_in_category("prescribing").current_at,
- "numerator_breakdown_url": "{}?{}".format(
+ "numerator_breakdown_url": "{}{}?{}".format(
+ settings.API_HOST,
reverse("measure_numerators_by_org"),
urlencode({"org": "", "org_type": entity_type, "measure": measure.id}),
),
|
Updated Lithuania Exchanges (to BY and RU-KGD)
used source from | "rotation": 135
},
"BY->LT": {
+ "capacity": [
+ -4553,
+ 4553
+ ],
"lonlat": [
25.756061,
54.789457
"rotation": 180
},
"LT->RU-KGD": {
+ "capacity": [
+ -2490,
+ 2490
+ ],
"lonlat": [
21.963913,
55.080726
|
[hail][ir] improve error message when unify fails in PruneDeadFields
With this added context it was completely obvious what I had done wrong. Without
this context, I just knew that somewhere I had some ints and structs that did not
unify. | @@ -138,6 +138,7 @@ object PruneDeadFields {
def unifyBaseType(base: BaseType, children: BaseType*): BaseType = unifyBaseTypeSeq(base, children)
def unifyBaseTypeSeq(base: BaseType, children: Seq[BaseType]): BaseType = {
+ try {
if (children.isEmpty)
return minimalBT(base)
base match {
@@ -188,6 +189,10 @@ object PruneDeadFields {
base
}
}
+ } catch {
+ case e: RuntimeException =>
+ throw new RuntimeException(s"failed to unify children while unifying:\n base: ${ base }\n${ children.mkString("\n") }", e)
+ }
}
def unify[T <: BaseType](base: T, children: T*): T = unifyBaseTypeSeq(base, children).asInstanceOf[T]
|
[Fix,Roofline] Fix roofline handling of multiple peak flops
In the switch to multiple possible peakflops measurement, the logic to
add all of them was skipped. Instead only the last was added. | @@ -145,6 +145,7 @@ def roofline_from_existing(
if isinstance(prim, tir.PrimFunc) and "hash" in prim.attrs.keys()
}
+ new_configuration = dict(report.configuration.items())
new_calls = []
for call in report.calls:
if "Hash" in call.keys() and call["Hash"] in all_features:
@@ -159,6 +160,10 @@ def roofline_from_existing(
loaded_bytes, peak_bandwidth, bandwidth_name = registry.estimate_peak_bandwidth(
prim, features, target, dev, remote
)
+ new_configuration[f"Estimated Peak FLOP/s ({flops_name})"] = profiling.Ratio(peak_flops)
+ new_configuration[
+ f"Estimated Peak Bandwidth ({bandwidth_name}, byte/second)"
+ ] = profiling.Ratio(peak_bandwidth)
ridge_point = peak_flops / peak_bandwidth
runtime = call["Duration (us)"].microseconds * 1e-6
@@ -180,11 +185,6 @@ def roofline_from_existing(
new_calls.append(call)
else:
new_calls.append(call)
- new_configuration = dict(report.configuration.items())
- new_configuration[f"Estimated Peak FLOP/s ({flops_name})"] = profiling.Ratio(peak_flops)
- new_configuration[
- f"Estimated Peak Bandwidth ({bandwidth_name}, byte/second)"
- ] = profiling.Ratio(peak_bandwidth)
return profiling.Report(new_calls, report.device_metrics, new_configuration)
|
[builder] get 'Variation Font Origin' from font-wide custom parameters
Fixes | @@ -173,10 +173,13 @@ def to_ufos(data, include_instances=False, family_name=None, debug=False):
result = [ufos[master_id] for master_id in master_id_order]
instances = {'defaultFamilyName': source_family_name,
'data': data.pop('instances', [])}
- for key in ("Variation Font Origin",):
- value = data.get(key)
- if value:
- instances[key] = value
+
+ # the 'Variation Font Origin' is a font-wide custom parameter, thus it is
+ # shared by all the master ufos; here we just get it from the first one
+ varfont_origin_key = "Variation Font Origin"
+ varfont_origin = first_ufo.lib.get(GLYPHS_PREFIX + varfont_origin_key)
+ if varfont_origin:
+ instances[varfont_origin_key] = varfont_origin
if debug:
return clear_data(data)
elif include_instances:
|
Update install-mmte-helm-gitlab-helm.rst
Updated config > configJSON and fixed Helm chart location in deployment command. | @@ -57,7 +57,7 @@ Deploy Mattermost Team Edition Helm Chart
Requirements:
- - Mattermost Team Edition Helm Chart Version: 1.4.0
+ - Mattermost Team Edition Helm Chart Version: 3.8.2
To deploy Mattermost Team Edition with GitLab Helm Chart, disable the running ``MySql`` chart and configure InitContainer and Environment variables in ``values.yaml``. The list below indicates the values that should be changed. Note that we assume the GitLab chart name is ``gitlab``.
@@ -87,10 +87,10 @@ To deploy Mattermost Team Edition with GitLab Helm Chart, disable the running ``
enabled: false
# Mattermost configuration:
- config:
- siteUrl: "https://<your-mattermost-domain>"
- siteName: "Mattermost"
- enableSignUpWithEmail: false
+ configJSON:
+ SiteUrl: "https://<your-mattermost-domain>"
+ SiteName: "Mattermost"
+ EnableSignUpWithEmail: false
ingress:
enabled: true
@@ -235,7 +235,7 @@ After these changes, deploy the Mattermost Team Edition Helm Chart with followin
.. code-block:: bash
- $ helm upgrade --install mattermost -f values.yaml stable/mattermost-team-edition
+ $ helm upgrade --install mattermost -f values.yaml mattermost/mattermost-team-edition
Wait for the pods to run. Then access your Mattermost server, and log in with your GitLab credentials.
|
Added commented wildcards for spam domains
Added commented wildcards for spam domains so that they may be more easily scripted for automated integration into other platforms by finding and replacing lines that start with "#.*". | # Spam domains
# If your software is able, the below domains and all subdomains should be blocked
127.0.0.1 angiemktg.com
+#*.angiemktg.com
127.0.0.1 weconfirmyou.com
+#*.weconfirmyou.com
#=====================================
|
dding an unit test for checking that if no positive words were given,
highlighted text is same as original text | from collections import defaultdict
import pytest
-from DBotPredictPhishingWords import get_model_data, predict_phishing_words, main
from CommonServerPython import *
+from DBotPredictPhishingWords import get_model_data, predict_phishing_words, main
TOKENIZATION_RESULT = None
@@ -242,3 +242,30 @@ def test_main(mocker):
}
main()
assert res['Contents'] == correct_res
+
+
+def test_no_positive_words(mocker):
+ # make sure that if no positive words were found, TextTokensHighlighted output is equivalent to original text
+ global TOKENIZATION_RESULT
+ args = {'modelName': 'modelName', 'modelStoreType': 'list', 'emailSubject': 'word1', 'emailBody': 'word2 word3',
+ 'minTextLength': '0', 'labelProbabilityThreshold': '0', 'wordThreshold': '0', 'topWordsLimit': '10',
+ 'returnError': 'true'}
+ mocker.patch.object(demisto, 'args', return_value=args)
+ mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
+ mocker.patch('demisto_ml.decode_model', return_value="Model", create=True)
+ mocker.patch('demisto_ml.filter_model_words', return_value=("text", 2), create=True)
+ mocker.patch('demisto_ml.explain_model_words', return_value={"Label": 'Valid',
+ 'Probability': 0.7,
+ 'PositiveWords': [],
+ 'NegativeWords': ['word2']},
+ create=True)
+
+ TOKENIZATION_RESULT = {'originalText': '%s %s' % (args['emailSubject'], args['emailBody']),
+ 'tokenizedText': '%s %s' % (args['emailSubject'], args['emailBody']),
+ 'originalWordsToTokens': {'word1': ['word1'], 'word2': ['word2'], 'word3': ['word3']},
+ }
+
+ res = main()
+ assert res['Contents']['TextTokensHighlighted'] == TOKENIZATION_RESULT['originalText']
+
+
|
GDB helpers: update env rebindings pretty-printer after un-refcouting
TN: | @@ -417,20 +417,18 @@ class RebindingsPrinter(BasePrinter):
return 'null'
def rebinding_img(value):
- new_env = EnvGetterPrinter(value['new_env'], self.context).env
- return ASTNodePrinter(new_env['node'], self.context).sloc(
- with_end=False
- ) if new_env and new_env['node'] else '<synthetic>'
-
- rebindings = self.value
-
- size = int(rebindings['size'])
- array = rebindings['bindings'].address.cast(
- rebindings['bindings'].type.target().array(1, size).pointer()
- ).dereference()
- return '[{}]'.format(', '.join(
- rebinding_img(array[i]) for i in range(1, size + 1)
- ))
+ return ASTNodePrinter(value['new_env']['node'],
+ self.context).sloc(with_end=False)
+
+ # Gather all Env_Rebindings_Type records, parents last
+ rebindings = []
+ r = self.value
+ while r:
+ rebindings.append(r)
+ r = r['parent']
+
+ return '[{}]'.format(', '.join(rebinding_img(r)
+ for r in reversed(rebindings)))
def to_string(self):
return "<Rebindings {}>".format(self.inner)
|
Fix typo in streaming docs
Typo | @@ -141,7 +141,8 @@ When you need to remove one or more columns, give [`IterableDataset.remove_colum
```py
>>> from datasets import load_dataset
->>> dataset = load_dataset('glue', 'mrpc', split='train')features
+>>> dataset = load_dataset('glue', 'mrpc', split='train')
+>>> dataset.features
{'sentence1': Value(dtype='string', id=None),
'sentence2': Value(dtype='string', id=None),
'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
|
Add a couple of sanity checks so we don't break the database.
Part of | @@ -9,5 +9,16 @@ if _upper_dir not in sys.path:
import chdb
+def sanity_check():
+ sdb = chdb.init_scratch_db()
+ snippet_count = sdb.execute_with_retry_s(
+ '''SELECT COUNT(*) FROM snippets''')[0]
+ assert snippet_count > 100
+
+ article_count = sdb.execute_with_retry_s(
+ '''SELECT COUNT(*) FROM articles''')[0]
+ assert article_count > 100
+
if __name__ == '__main__':
+ sanity_check()
chdb.install_scratch_db()
|
Add ddownload.com to file storage and sharing
added ddownload.com | @@ -276,6 +276,7 @@ API | Description | Auth | HTTPS | CORS |
| [AnonFiles](https://anonfiles.com/docs/api) | Upload and share your files anonymously | No | Yes | Unknown |
| [BayFiles](https://bayfiles.com/docs/api) | Upload and share your files | No | Yes | Unknown |
| [Box](https://developer.box.com/) | File Sharing and Storage | `OAuth` | Yes | Unknown |
+| [ddownload](https://ddownload.com/api) | File Sharing and Storage | `apiKey` | Yes | Unknown |
| [Dropbox](https://www.dropbox.com/developers) | File Sharing and Storage | `OAuth` | Yes | Unknown |
| [File.io](https://www.file.io) | Super simple file sharing, convenient, anonymous and secure | No | Yes | Unknown |
| [GoFile](https://gofile.io/api) | Unlimited size file uploads for free | `apiKey` | Yes | Unknown |
|
fix(db_query): Handle permlevel check cases clearer
Split to utility functions for clarity
Add example over code blocks
Re-arrange blocks based on priority | @@ -564,25 +564,38 @@ class DatabaseQuery:
permitted_fields = get_permitted_fields(doctype=self.doctype)
for i, field in enumerate(self.fields):
- if "distinct" in field:
+ # field: like 'name', 'published'
+ if is_plain_field(field) and field not in permitted_fields:
+ self.fields.remove(field)
+ continue
+
+ if "distinct" in field.lower():
+ # field: 'count(distinct `tabPhoto`.name) as total_count'
+ # column: 'tabPhoto.name'
self.distinct = True
- column = field.split(" ", 2)[1].replace("`", "")
+ column = field.split(" ", 2)[1].replace("`", "").replace(")", "")
else:
- column = field.split(" ", 1)[0].replace("`", "")
+ # field: 'count(`tabPhoto`.name) as total_count'
+ # column: 'tabPhoto.name'
+ column = field.split("(")[-1].split(")", 1)[0]
+ column = strip_alias(column).replace("`", "")
- if column == "*":
+ if column == "*" and not in_function("*", field):
self.fields[i : i + 1] = permitted_fields
+ # handle pseudo columns
+ elif not column:
+ continue
+
# labels / pseudo columns or frappe internals
- elif column[0] in {"'", '"', "_"} or column in permitted_fields:
+ elif column[0] in {"'", '"', "_"}:
continue
# handle child / joined table fields
elif "." in field:
- table, _column = field.split(".", 1)
- column = _column.lower().split(" ", 1)[0].replace("`", "")
+ table, column = column.split(".", 1)
- if table in self.tables:
+ if wrap_grave_quotes(table) in self.tables:
ch_doctype = table.replace("`", "").replace("tab", "", 1)
permitted_child_table_fields = get_permitted_fields(
doctype=ch_doctype, parenttype=self.doctype
@@ -592,6 +605,9 @@ class DatabaseQuery:
else:
self.fields.remove(field)
+ elif column in permitted_fields:
+ continue
+
# field inside function calls / * handles things like count(*)
elif "(" in field:
if "*" in field:
@@ -605,7 +621,6 @@ class DatabaseQuery:
not param or param in permitted_fields or param.isnumeric() or "'" in param or '"' in param
):
continue
- else:
self.fields.remove(field)
# remove if access not allowed
@@ -1230,3 +1245,30 @@ def get_permitted_fields(doctype, parenttype=None):
meta_fields.remove("idx")
return meta_fields + accessible_fields + optional_meta_fields
+
+
+def wrap_grave_quotes(table: str) -> str:
+ if table[0] != "`":
+ table = f"`{table}`"
+ return table
+
+
+def is_plain_field(field: str) -> bool:
+ for char in field:
+ if char in ("(", "`", ".", "'", '"', "*"):
+ return False
+ return True
+
+
+def in_function(substr: str, field: str) -> bool:
+ try:
+ return substr in field and field.index("(") < field.index(substr) < field.index(")")
+ except ValueError:
+ return False
+
+
+def strip_alias(field: str) -> str:
+ # Note: Currently only supports aliases that use the " AS " syntax
+ if " as " in field.lower():
+ return field.split(" as ", 1)[0]
+ return field
|
Update .travis.yml
python3 didn't work. Modifying output redirect to get more info. The previous commit may have worked for python2. Still waiting for testing to finish on Travis. | @@ -86,7 +86,7 @@ install:
- echo 'installing pyglow';
- cd ./pyglow;
- travis_wait 50 make -C src/pyglow/models source >/dev/null;
- - travis_wait 50 python setup.py install --user >/dev/null;
+ - travis_wait 50 python setup.py install --user
- cd ../pysat;
# install pysat
- "python setup.py install"
|
op-guide: add accessing Spark with Python or R
Via: | @@ -164,4 +164,13 @@ scala> spark.sql("select count(*) from lineitem").show
+--------+
```
+You can also access Spark with Python or R using the following commands:
+
+```
+docker-compose exec tispark-master /opt/spark/bin/pyspark
+docker-compose exec tispark-master /opt/spark/bin/sparkR
+```
+
+For more details about TiSpark, see [here](../tispark/tispark-quick-start-guide.md).
+
Here is [a 5-minute tutorial](https://www.pingcap.com/blog/how_to_spin_up_an_htap_database_in_5_minutes_with_tidb_tispark/) for macOS users that shows how to spin up a standard TiDB cluster using Docker Compose on your local computer.
\ No newline at end of file
|
doc: add instructions for setting up Cloud9 environment.
Added instructions that allow for a low-cost ~10min environment setup. | @@ -19,6 +19,23 @@ reported the issue. Please try to include as much information as you can. Detail
* Any modifications you've made relevant to the bug
* A description of your environment or deployment
+## Setting up your development environment [optional, but recommended]
+
+* Set up the Cloud9 environment:
+ * Instance type: You'll need at least 4 GB of RAM to avoid running into memory issues. We recommend at least a t3.medium to run the unit tests. Larger hosts will reduce the chance of encountering resource limits.
+ * Follow the instructions at [Creating a Cloud9 EC2 Environment](https://docs.aws.amazon.com/cloud9/latest/user-guide/create-environment.html#create-environment-main) to set up a Cloud9 EC2 environment
+* Expand the storage of the EC2 instance from 10GB to 20GB
+ * Because you'll need a minimum of 11GB of disk storage on the EC2 instance to run the package's unit tests, you'll need to expand your EC2 volume size. We recommend at least 20GB. A larger volume will reduce the chance of encountering resource limits.
+ * Follow the instructions at [Modifying an EBS Volume Using Elastic Volumes (Console)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/requesting-ebs-volume-modifications.html#modify-ebs-volume) to increase the EBS volume size associated with the newly created EC2 instance.
+ * Wait 5-10min for the new EBS volume increase to take effect.
+ * Allow EC2 to claim the additional space by stopping and then starting your EC2 host.
+* Create a fork of this package on GitHub. You should end up with a fork at `https://github.com/<username>/sagemaker-python-sdk`
+ * Follow the instructions at [Fork a repo](https://help.github.com/en/articles/fork-a-repo) to fork a GitHub repository.
+* In the Cloud9 UI, pull down this package by clicking on "Clone from Github" or running the following command in the Cloud9 terminal: `git clone https://github.com/<username>/sagemaker-python-sdk` where <username> is your github username.
+* Install tox using `pip install tox`
+* Install coverage using `pip install .[test]`
+* cd into the sagemaker-python-sdk package: `cd sagemaker-python-sdk` or `cd /environment/sagemaker-python-sdk`
+* Run the following tox command and verify that all unit tests pass: `tox tests/unit`
## Contributing via Pull Requests
Contributions via pull requests are much appreciated.
|
TST: make test_verbosity() pass
remove pause() and its use
remove non-existent precision argument in save() | @@ -20,15 +20,10 @@ from scipy.sparse.linalg import aslinearoperator, LinearOperator
__all__ = ['lobpcg']
-def pause():
- # Used only when verbosity level > 10.
- input()
-
-
def save(ar, fileName):
# Used only when verbosity level > 10.
from numpy import savetxt
- savetxt(fileName, ar, precision=8)
+ savetxt(fileName, ar)
def _report_nonhermitian(M, a, b, name):
@@ -530,7 +525,6 @@ def lobpcg(A, X,
if verbosityLevel > 10:
print(eigBlockVector)
- pause()
##
# Compute Ritz vectors.
@@ -559,7 +553,6 @@ def lobpcg(A, X,
print(pp)
print(app)
print(bpp)
- pause()
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
test-backend: Clean up leak data import files after test-suite run.
This is a simple, non-intrusive way of removing the bulk of the
clutter from `var/<uuid>/test-backend` after running `test-backend`.
Ideally, we'll replace this logic with proper tearDown methods. | @@ -16,6 +16,7 @@ import ujson
import httplib2
import httpretty
import requests
+import shutil
import django
from django.conf import settings
@@ -456,6 +457,21 @@ def main() -> None:
# an important clue as to why tests fail.
report_slow_tests()
+ # We now cleanup files leaked by certain tests that don't clean up
+ # after themselves. Ideally, this cleanup would happen in in the
+ # tearDown() methods for the relevant test classes, but this at least
+ # prevents a disk leak.
+ from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path
+ for path in glob.glob(os.path.join(get_or_create_dev_uuid_var_path('test-backend'),
+ "test-*-import-*")):
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+ except FileNotFoundError:
+ pass
+
# Ideally, we'd check for any leaked test databases here;
# but that needs some hackery with database names.
#
|
Provide safer command to delete sharding jobs
This ensures that people who are blindly copy-pasting commands
don't accidentally delete running reshard jobs. | @@ -717,12 +717,11 @@ good idea to remove already completed jobs. See :ref:`reshard configuration
section <config/reshard>` for the default value of ``max_jobs`` parameter and
how to adjust if needed.
-For example, if the jobs have completed, to remove all the jobs run:
+For example, to remove all the completed jobs run:
.. code-block:: bash
- $ curl -s $COUCH_URL:5984/_reshard/jobs | jq -r '.jobs[].id' |\
- while read -r jobid; do\
+ $ for jobid in $(curl -s $COUCH_URL:5984/_reshard/jobs | jq -r '.jobs[] | select (.job_state=="completed") | .id'); do \
curl -s -XDELETE $COUCH_URL:5984/_reshard/jobs/$jobid \
done
|
Fix link for common_task/target_aggregate
The link to Create a Target Aggregate found on the docsite at currently leads to a 404, this should resolve that issue. | "setup_repo": "dist/markdown/html/src/docs/setup_repo.html",
"styleguide": "dist/markdown/html/src/docs/styleguide.html",
"target_addresses": "dist/markdown/html/src/docs/target_addresses.html",
+ "target_aggregate": "dist/markdown/html/src/docs/common_tasks/target_aggregate.html",
"test": "dist/markdown/html/src/docs/common_tasks/test.html",
"test_suite": "dist/markdown/html/src/docs/common_tasks/test_suite.html",
"thrift_deps": "dist/markdown/html/examples/src/thrift/org/pantsbuild/example/README.html",
|
Fix: tutorial index headings do not match tutorial content page
Index does not match content on tutorials page. This fixes that. I haven't worked the examples myself, so whether this is the correct fix or whether the content should be changed to match the index, is beyond my ability to judge. | @@ -127,21 +127,21 @@ upper_tabs:
#### INTERMEDIATE ####
- heading: "Intermediate"
- - title: "Shor's algorithm"
- path: /cirq/tutorials/shor
-
- #### ADVANCED ####
- - heading: "Advanced"
- title: "Quantum variational algorithm"
path: /cirq/tutorials/variational_algorithm
- - title: "QAOA experiment"
+ - title: "Approximate optimization"
path: /cirq/tutorials/qaoa
- - title: "Hidden linear function problem"
- path: /cirq/tutorials/hidden_linear_function
- title: "Quantum walks"
path: /cirq/tutorials/quantum_walks
+
+ #### ADVANCED ####
+ - heading: "Advanced"
+ - title: "Hidden linear function problem"
+ path: /cirq/tutorials/hidden_linear_function
- title: "Rabi oscillation experiment"
path: /cirq/tutorials/rabi_oscillations
+ - title: "Shor's algorithm"
+ path: /cirq/tutorials/shor
#### GOOGLE HARDWARE ####
- heading: "Google hardware"
|
travis: Remove ssh loopback trick
Builds are currently failing. For example | @@ -58,11 +58,6 @@ before_script:
# TODO, only do this step for the postgres environment
- psql -c 'create database spotify;' -U postgres
- # allow ssh loopback
- - ssh-keygen -t rsa -N '' -C '' -f ~/.ssh/id_rsa
- - cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
- - ssh -o StrictHostKeyChecking=no localhost true
-
# Create mysql database if possible but fail silently if not available.
- mysql -e 'create database IF NOT EXISTS luigi_test;' -uroot || true
|
Update sensor.py
Fix Issue | @@ -329,10 +329,9 @@ class GarbageCollection(Entity):
if self.date_inside(today):
next_date = self.get_next_date(today)
if next_date is not None:
- next_date_year = next_date.year
if not self.date_inside(next_date):
if self.__first_month <= self.__last_month:
- next_year = date(next_date_year + 1, self.__first_month, 1)
+ next_year = date(year + 1, self.__first_month, 1)
next_date = self.get_next_date(next_year)
_LOGGER.debug(
"(%s) Did not find the date this year, "
@@ -340,7 +339,7 @@ class GarbageCollection(Entity):
self.__name,
)
else:
- next_year = date(next_date_year, self.__first_month, 1)
+ next_year = date(year, self.__first_month, 1)
next_date = self.get_next_date(next_year)
_LOGGER.debug(
"(%s) Arrived to the end of date range, "
|
Fix error on empty groups
When running with ALL target, application is trying to list all
projects in all groups. But it crushes when there are groups
without any project.
This change fixes such errors. | -from gitlabform.gitlab.core import GitLabCore
+from gitlabform.gitlab.core import GitLabCore, NotFoundException
class GitLabGroups(GitLabCore):
@@ -28,7 +28,10 @@ class GitLabGroups(GitLabCore):
returned, so if "group" (= members of this group) is also a member of some projects, they won't be
returned here.
"""
+ try:
projects = self._make_requests_to_api("groups/%s/projects", group, paginated=True)
+ except NotFoundException:
+ projects = []
all_project_and_groups = sorted(map(lambda x: x['path_with_namespace'], projects))
|
Edit wwis_weather en-US locale
Added a ForecastKeyword keyword which can be either "weather" or
"forecast" in the templates. Also added a template for "Can you
tell me the weather forcast?" | @@ -26,6 +26,10 @@ class WWISWeatherPlugin(plugin.SpeechHandlerPlugin):
'locale': {
'en-US': {
'keywords': {
+ 'ForecastKeyword':[
+ 'WEATHER',
+ 'FORECAST'
+ ],
'WeatherTypePresentKeyword': [
'SNOWING',
'RAINING',
@@ -64,17 +68,18 @@ class WWISWeatherPlugin(plugin.SpeechHandlerPlugin):
]
},
'templates': [
- "WHAT IS THE WEATHER IN {LocationKeyword}",
- "WHAT IS THE FORECAST FOR {DayKeyword}",
- "WHAT IS THE FORECAST FOR {LocationKeyword}",
- "WHAT IS THE FORECAST FOR {LocationKeyword} ON {DayKeyword}",
- "WHAT IS THE FORECAST FOR {LocationKeyword} ON {DayKeyword} {TimeKeyword}",
+ "WHAT IS THE {ForecastKeyword} IN {LocationKeyword}",
+ "WHAT IS THE {ForecastKeyword} FOR {DayKeyword}",
+ "WHAT IS THE {ForecastKeyword} FOR {LocationKeyword}",
+ "WHAT IS THE {ForecastKeyword} FOR {LocationKeyword} ON {DayKeyword}",
+ "WHAT IS THE {ForecastKeyword} FOR {LocationKeyword} ON {DayKeyword} {TimeKeyword}",
"IS IT {WeatherTypePresentKeyword} IN {LocationKeyword}",
"WILL IT {WeatherTypeFutureKeyword} THIS {TimeKeyword}",
"WILL IT {WeatherTypeFutureKeyword} {DayKeyword}",
"WILL IT {WeatherTypeFutureKeyword} {DayKeyword} {TimeKeyword}",
"WHEN WILL IT {WeatherTypeFutureKeyword}",
- "WHEN WILL IT {WeatherTypeFutureKeyword} IN {LocationKeyword}"
+ "WHEN WILL IT {WeatherTypeFutureKeyword} IN {LocationKeyword}",
+ "CAN YOU TELL ME THE {ForecastKeyword}"
]
},
'fr-FR': {
|
utils/doc: Adds support for showing aliases when formatting parameters
Now displays all available local and global aliases when generating the
rst for a parameter. | @@ -274,6 +274,10 @@ def get_params_rst(parameters):
param.mandatory and '(mandatory)' or ' ')
desc = strip_inlined_text(param.description or '')
text += indent('{}\n'.format(desc))
+ if param.aliases:
+ text += indent('\naliases: {}\n'.format(', '.join(map(format_literal, param.aliases))))
+ if param.global_alias:
+ text += indent('\nglobal alias: {}\n'.format(format_literal(param.global_alias)))
if param.allowed_values:
text += indent('\nallowed values: {}\n'.format(', '.join(map(format_literal, param.allowed_values))))
elif param.constraint:
|
Update test_ops_decompositions.py
the 3 in line 212 should be an n | @@ -209,7 +209,7 @@ class TestGraphEmbed:
A = np.random.random([n, n]) + 1j * np.random.random([n, n])
A += A.T
- A -= np.trace(A) * np.identity(n) / 3
+ A -= np.trace(A) * np.identity(n) / n
sq, U = dec.graph_embed(A)
|
No longer need the name proxy, now that we're not mutating InstallRequirement
instances in combine_install_requirements | @@ -234,39 +234,6 @@ class Resolver:
return results
- def _get_ireq_with_name(
- self,
- ireq: InstallRequirement,
- proxy_cache: Dict[InstallRequirement, InstallRequirement],
- ) -> InstallRequirement:
- """
- Return the given ireq, if it has a name, or a proxy for the given ireq
- which has been prepared and therefore has a name.
-
- Preparing the ireq is side-effect-ful and can only be done once for an
- instance, so we use a proxy instead. combine_install_requirements may
- use the given ireq as a template for its aggregate result, mutating it
- further by combining extras, etc. In that situation, we don't want that
- aggregate ireq to be prepared prior to mutation, since its dependencies
- will be frozen with those of only a subset of extras.
-
- i.e. We both want its name early (via preparation), but we also need to
- prepare it after any mutation for combination purposes. So we use a
- proxy here for the early preparation.
- """
- if ireq.name is not None:
- return ireq
-
- if ireq in proxy_cache:
- return proxy_cache[ireq]
-
- # get_dependencies has the side-effect of assigning name to ireq
- # (so we can group by the name in _group_constraints below).
- name_proxy = copy.deepcopy(ireq)
- self.repository.get_dependencies(name_proxy)
- proxy_cache[ireq] = name_proxy
- return name_proxy
-
def _group_constraints(
self, constraints: Iterable[InstallRequirement]
) -> Iterator[InstallRequirement]:
@@ -287,30 +254,19 @@ class Resolver:
"""
constraints = list(constraints)
- cache: Dict[InstallRequirement, InstallRequirement] = {}
-
- def key_from_ireq_with_name(ireq: InstallRequirement) -> str:
- """
- See _get_ireq_with_name for context.
-
- We use a cache per call here because it should only be necessary
- the first time an ireq is passed here (later on in the round, it
- will be prepared and dependencies for it calculated), but we can
- save time by reusing the proxy between the sort and groupby calls
- below.
- """
- return key_from_ireq(self._get_ireq_with_name(ireq, cache))
+ for ireq in constraints:
+ if ireq.name is None:
+ # get_dependencies has side-effect of assigning name to ireq
+ # (so we can group by the name below).
+ self.repository.get_dependencies(ireq)
# Sort first by name, i.e. the groupby key. Then within each group,
# sort editables first.
# This way, we don't bother with combining editables, since the first
# ireq will be editable, if one exists.
for _, ireqs in groupby(
- sorted(
- constraints,
- key=(lambda x: (key_from_ireq_with_name(x), not x.editable)),
- ),
- key=key_from_ireq_with_name,
+ sorted(constraints, key=(lambda x: (key_from_ireq(x), not x.editable))),
+ key=key_from_ireq,
):
yield combine_install_requirements(ireqs)
|
Add django-zero-downtime-migrations Postgres DB engine
From the repo: | @@ -21,6 +21,8 @@ DEFAULT_POSTGRESQL_ENGINES = (
'django.db.backends.postgis',
'django.contrib.gis.db.backends.postgis',
'psqlextra.backend',
+ 'django_zero_downtime_migrations.backends.postgres',
+ 'django_zero_downtime_migrations.backends.postgis',
)
SQLITE_ENGINES = getattr(settings, 'DJANGO_EXTENSIONS_RESET_DB_SQLITE_ENGINES', DEFAULT_SQLITE_ENGINES)
|
Don't yield in list comprehensions
I've tried to grep for more of this with no success. | @@ -259,11 +259,15 @@ class ApplicationServicesHandler(object):
event based on the service regex.
"""
services = self.store.get_app_services()
- interested_list = [
- s for s in services if (
- yield s.is_interested(event, self.store)
- )
- ]
+
+ # we can't use a list comprehension here. Since python 3, list
+ # comprehensions use a generator internally. This means you can't yield
+ # inside of a list comprehension anymore.
+ interested_list = []
+ for s in services:
+ if (yield s.is_interested(event, self.store)):
+ interested_list.append(s)
+
defer.returnValue(interested_list)
def _get_services_for_user(self, user_id):
|
Update mkvtomp4.py
fix progress bar and isolate it to its own method | @@ -1064,12 +1064,9 @@ class MkvtoMp4:
try:
for timecode in conv:
if reportProgress:
- try:
- sys.stdout.write('\r')
- sys.stdout.write('[{0}] {1}%'.format('#' * (timecode / 10) + ' ' * (10 - (timecode / 10)), timecode))
- except:
- sys.stdout.write(str(timecode))
- sys.stdout.flush()
+ self.displayProgressBar(timecode)
+ if reportProgress:
+ self.displayProgressBar(100, newline=True)
self.log.info("%s created." % outputfile)
self.setPermissions(outputfile)
@@ -1096,6 +1093,18 @@ class MkvtoMp4:
return finaloutputfile, inputfile
+ def displayProgressBar(self, complete, width=20, newline=False):
+ try:
+ divider = 100 / width
+
+ sys.stdout.write('\r')
+ sys.stdout.write('[{0}] {1}%'.format('#' * round(complete / divider) + ' ' * round(width - (complete / divider)), complete))
+ if newline:
+ sys.stdout.write('\n')
+ sys.stdout.flush()
+ except:
+ print(complete)
+
# Break apart a file path into the directory, filename, and extension
def parseFile(self, path):
path = os.path.abspath(path)
|
Add :rtype: markers to LocalVars.create* docstrings
TN: | @@ -2659,6 +2659,7 @@ class LocalVars(object):
:param str|names.Name name: The name of the variable.
:param langkit.compiled_types.CompiledType type: Type parameter. The
type of the local variable.
+ :rtype: LocalVars.LocalVar
"""
result = self.create_scopeless(name, type)
PropertyDef.get_scope().add(result)
@@ -2672,6 +2673,7 @@ class LocalVars(object):
:param str|names.Name name: The name of the variable.
:param langkit.compiled_types.CompiledType type: Type parameter. The
type of the local variable.
+ :rtype: LocalVars.LocalVar
"""
name = names.Name.get(name)
|
Change Default AoC Commands Channel
Changes the default value of the advent_of_code_commands constant to be
the same channel ID as sir-lancebot-commands. If no AoC commands channel
is set in the .env file, it'll re-direct people to sir-lancebot-commands
instead. | @@ -95,7 +95,7 @@ class Branding:
class Channels(NamedTuple):
admins = 365960823622991872
advent_of_code = int(environ.get("AOC_CHANNEL_ID", 782715290437943306))
- advent_of_code_commands = int(environ.get("AOC_COMMANDS_CHANNEL_ID", 783503267849437205))
+ advent_of_code_commands = int(environ.get("AOC_COMMANDS_CHANNEL_ID", 607247579608121354))
announcements = int(environ.get("CHANNEL_ANNOUNCEMENTS", 354619224620138496))
big_brother_logs = 468507907357409333
bot = 267659945086812160
|
doc/common/Keypad: fix name
Keypad is one word, so the class is not spelled KeyPad. | @@ -659,7 +659,7 @@ class LightMatrix:
pass
-class KeyPad:
+class Keypad:
"""Get status of buttons on a keypad layout."""
def pressed(self):
|
Remove broken Travis tests
We are working on replacing the tests carried out by Travis with GitLab | @@ -42,9 +42,9 @@ groups:
azul:
conditions:
- - "'*[Tt]ravis*' in check_runs.successful"
- "base.ref == 'master'"
- "'content' in labels"
+ # Will add a new condition for GitLab once that's set up
reviewers:
users:
- hannes-ucsc
@@ -55,9 +55,9 @@ groups:
browser:
conditions:
- - "'*[Tt]ravis*' in check_runs.successful"
- "base.ref == 'master'"
- "'content' in labels"
+ # Will add a new condition for GitLab once that's set up
reviewers:
users:
- NoopDog
@@ -68,9 +68,9 @@ groups:
data_import:
conditions:
- - "'*[Tt]ravis*' in check_runs.successful"
- "base.ref == 'master'"
- "'content' in labels"
+ # Will add a new condition for GitLab once that's set up
reviewers:
users:
- aherbst-broad
@@ -83,9 +83,9 @@ groups:
tdr:
conditions:
- - "'*[Tt]ravis*' in check_runs.successful"
- "base.ref == 'master'"
- "'content' in labels"
+ # Will add a new condition for GitLab once that's set up
reviewers:
users:
- ruchim
@@ -96,9 +96,9 @@ groups:
ingest-devs:
conditions:
- - "'*[Tt]ravis*' in check_runs.successful"
- "base.ref == 'master'"
- "'content' in labels"
+ # Will add a new condition for GitLab once that's set up
reviewers:
users:
- amnonkhen
@@ -129,9 +129,9 @@ groups:
code-review:
conditions:
- - "'*[Tt]ravis*' in check_runs.successful"
- "base.ref == 'develop'"
- "'*.py' in files or '*.js' in files or '*.yml' in files"
+ # Will add a new condition for GitLab once that's set up
reviewers:
users:
- ESapenaVentura
|
use getattr in Batch constructor
Fixes | @@ -23,7 +23,7 @@ class Batch(object):
for (name, field) in dataset.fields.items():
if field is not None:
- batch = [x.__dict__[name] for x in data]
+ batch = [getattr(x, name) for x in data]
setattr(self, name, field.process(batch, device=device, train=train))
@classmethod
|
Remove dead code
get_node_by_instance_uuid will never return None,
so the OR condition is dead code. | @@ -159,7 +159,7 @@ class NovaNotification(base.NotificationEndpoint):
try:
current_node = (
self.cluster_data_model.get_node_by_instance_uuid(
- instance.uuid) or self.get_or_create_node(node.uuid))
+ instance.uuid))
except exception.ComputeNodeNotFound as exc:
LOG.exception(exc)
# If we can't create the node,
|
Add more interchange debugging logs
These are based on practical experience debugging the interchange. | @@ -516,6 +516,7 @@ class Interchange(object):
assert 'type' in r, f"Message is missing type entry: {r}"
if r['type'] == 'result':
try:
+ logger.debug(f"Removing task {r['task_id']} from manager record {manager}")
self._ready_managers[manager]['tasks'].remove(r['task_id'])
except Exception:
# If we reach here, there's something very wrong.
@@ -529,9 +530,11 @@ class Interchange(object):
b_messages_to_send.append(b_message)
if b_messages_to_send:
+ logger.debug("Sending messages on results_outgoing")
self.results_outgoing.send_multipart(b_messages_to_send)
+ logger.debug("Sent messages on results_outgoing")
- logger.debug("Current tasks: {}".format(self._ready_managers[manager]['tasks']))
+ logger.debug(f"Current tasks on manager {manager}: {self._ready_managers[manager]['tasks']}")
if len(self._ready_managers[manager]['tasks']) == 0 and self._ready_managers[manager]['idle_since'] is None:
self._ready_managers[manager]['idle_since'] = time.time()
logger.debug("leaving results_incoming section")
@@ -540,11 +543,12 @@ class Interchange(object):
time.time() - self._ready_managers[manager]['last_heartbeat'] > self.heartbeat_threshold]
for manager in bad_managers:
logger.debug("Last: {} Current: {}".format(self._ready_managers[manager]['last_heartbeat'], time.time()))
- logger.warning("Too many heartbeats missed for manager {}".format(manager))
+ logger.warning(f"Too many heartbeats missed for manager {manager} - removing manager")
if self._ready_managers[manager]['active']:
self._ready_managers[manager]['active'] = False
self._send_monitoring_info(hub_channel, manager)
+ logger.warning(f"Raising ManagerLost for htex tasks {self._ready_managers[manager]['tasks']} on missing manager")
for tid in self._ready_managers[manager]['tasks']:
try:
raise ManagerLost(manager, self._ready_managers[manager]['hostname'])
|
Always define HPY macro when building HPy extensions
This allows to share code between HPy extensions and CPython extensions
that have not been ported to HPy yet. | @@ -217,6 +217,7 @@ class build_hpy_ext_mixin:
ext.hpy_abi = self.distribution.hpy_abi
ext.include_dirs += self.hpydevel.get_extra_include_dirs()
ext.sources += self.hpydevel.get_extra_sources()
+ ext.define_macros.append(('HPY', None))
if ext.hpy_abi == 'cpython':
ext.sources += self.hpydevel.get_ctx_sources()
ext._hpy_needs_stub = False
|
STY: Fix up indentation.
[ci skip] | @@ -1109,9 +1109,7 @@ def luf(lamdaexpr, *args, **kwargs):
>>> np.compare_chararrays(a,b,">",True)
array([False,True,False])
-"""
-
- )
+ """)
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
|
Replaced uses of ansi_aware_write with appropriate wrapper functions.
Made help command format for argparse commands the same as using "command -h" | @@ -720,7 +720,7 @@ class Cmd(cmd.Cmd):
pipe_proc = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE)
pipe_proc.communicate(msg_str.encode('utf-8', 'replace'))
else:
- ansi.ansi_aware_write(self.stdout, msg_str)
+ self.poutput(msg_str, end='')
except BrokenPipeError:
# This occurs if a command's output is being piped to another process and that process closes before the
# command is finished. If you would like your application to print a warning message, then set the
@@ -2182,8 +2182,8 @@ class Cmd(cmd.Cmd):
return self.do_shell(statement.command_and_args)
else:
- err_msg = self.default_error.format(statement.command)
- ansi.ansi_aware_write(sys.stderr, "{}\n".format(err_msg))
+ # Set apply_style to False so default_error's style is not overridden
+ self.perror(self.default_error.format(statement.command), apply_style=False)
def _pseudo_raw_input(self, prompt: str) -> str:
"""Began life as a copy of cmd's cmdloop; like raw_input but
@@ -2730,12 +2730,14 @@ class Cmd(cmd.Cmd):
from .argparse_completer import AutoCompleter
completer = AutoCompleter(getattr(func, 'argparser'), self)
tokens = [args.command] + args.subcommand
- self.poutput(completer.format_help(tokens))
+
+ # Set end to blank so the help output matches how it looks when "command -h" is used
+ self.poutput(completer.format_help(tokens), end='')
# If there is no help information then print an error
elif help_func is None and (func is None or not func.__doc__):
- err_msg = self.help_error.format(args.command)
- ansi.ansi_aware_write(sys.stderr, "{}\n".format(err_msg))
+ # Set apply_style to False so help_error's style is not overridden
+ self.perror(self.help_error.format(args.command), apply_style=False)
# Otherwise delegate to cmd base class do_help()
else:
@@ -4012,15 +4014,15 @@ class Cmd(cmd.Cmd):
self.disable_command(cmd_name, message_to_print)
# noinspection PyUnusedLocal
- @staticmethod
- def _report_disabled_command_usage(*args, message_to_print: str, **kwargs) -> None:
+ def _report_disabled_command_usage(self, *args, message_to_print: str, **kwargs) -> None:
"""
Report when a disabled command has been run or had help called on it
:param args: not used
:param message_to_print: the message reporting that the command is disabled
:param kwargs: not used
"""
- ansi.ansi_aware_write(sys.stderr, "{}\n".format(message_to_print))
+ # Set apply_style to False so message_to_print's style is not overridden
+ self.perror(message_to_print, apply_style=False)
def cmdloop(self, intro: Optional[str] = None) -> int:
"""This is an outer wrapper around _cmdloop() which deals with extra features provided by cmd2.
|
setup.py: allow to override default "share/man" via environment variable
Apparently on some BSD systems man pages go to /usr/man instead of /usr/share/man.
It's too complicated to keep track of all the nuances of Linux distros so package maintainers can simply override the default via a $FONTTOOLS_MANPATH env variable
Fixes | from __future__ import print_function
import io
import sys
+import os
+from os.path import isfile, join as pjoin
+from glob import glob
from setuptools import setup, find_packages, Command
from distutils import log
+from distutils.util import convert_path
import subprocess as sp
import contextlib
@@ -259,6 +263,49 @@ class PassCommand(Command):
pass
+def find_data_files(manpath="share/man"):
+ """ Find FontTools's data_files (just man pages at this point).
+
+ By default, we install man pages to "share/man" directory relative to the
+ base installation directory for data_files. The latter can be changed with
+ the --install-data option of 'setup.py install' sub-command.
+
+ E.g., if the data files installation directory is "/usr", the default man
+ page installation directory will be "/usr/share/man".
+
+ You can override this via the $FONTTOOLS_MANPATH environment variable.
+
+ E.g., on some BSD systems man pages are installed to 'man' instead of
+ 'share/man'; you can export $FONTTOOLS_MANPATH variable just before
+ installing:
+
+ $ FONTTOOLS_MANPATH="man" pip install -v .
+ [...]
+ running install_data
+ copying Doc/man/ttx.1 -> /usr/man/man1
+
+ When installing from PyPI, for this variable to have effect you need to
+ force pip to install from the source distribution instead of the wheel
+ package (otherwise setup.py is not run), by using the --no-binary option:
+
+ $ FONTTOOLS_MANPATH="man" pip install --no-binary=fonttools fonttools
+
+ Note that you can only override the base man path, i.e. without the
+ section number (man1, man3, etc.). The latter is always implied to be 1,
+ for "general commands".
+ """
+
+ # get base installation directory for man pages
+ manpagebase = os.environ.get('FONTTOOLS_MANPATH', convert_path(manpath))
+ # all our man pages go to section 1
+ manpagedir = pjoin(manpagebase, 'man1')
+
+ manpages = [f for f in glob(pjoin('Doc', 'man', 'man1', '*.1')) if isfile(f)]
+
+ data_files = [(manpagedir, manpages)]
+ return data_files
+
+
setup(
name="fonttools",
version="3.5.0.dev0",
@@ -274,9 +321,7 @@ setup(
package_dir={'': 'Lib'},
packages=find_packages("Lib"),
include_package_data=True,
- data_files=[
- ('share/man/man1', ["Doc/ttx.1"])
- ],
+ data_files=find_data_files(),
setup_requires=pytest_runner + wheel + bumpversion,
tests_require=[
'pytest>=2.8',
|
Update ftcode.txt
Making generic trails harder due to info | @@ -26,7 +26,12 @@ qvo5sd7p5yazwbrgioky7rdu4vslxrcaeruhjr7ztn3t2pihp56ewlqd.onion
m1-systems.xyz
+# Reference: https://twitter.com/reecdeep/status/1179672368958058496
+
+home.goteamrob.com
+home.isdes.com
+
# Generic trails
-/?need=6ff4040&vid=docit1
-/?need=9f5b9ee&vid=docit1
+/?need=6ff4040
+/?need=9f5b9ee
|
[IMPR] Improvement for argument handling
call handle_args before local_args processing
use arg.partition(':') instead of looping over a generator with one element
rename args list to templates and do not redefine main parameter list | @@ -742,37 +742,35 @@ def main(*args):
salt = ''
force = False
calc = None
- args = []
-
- def if_arg_value(arg, name):
- if arg.startswith(name):
- yield arg[len(name) + 1:]
-
- for arg in pywikibot.handle_args(args):
- for v in if_arg_value(arg, '-file'):
- filename = v
- for v in if_arg_value(arg, '-locale'):
+ templates = []
+
+ local_args = pywikibot.handle_args(args)
+ for arg in local_args:
+ option, _, value = arg.partition(':')
+ if not option.startswith('-'):
+ templates.append(arg)
+ continue
+ option = option[1:]
+ if option in ('file', 'filename'):
+ filename = value
+ elif option == 'locale':
# Required for english month names
- locale.setlocale(locale.LC_TIME, v.encode('utf8'))
- for v in if_arg_value(arg, '-timezone'):
- os.environ['TZ'] = v.timezone
+ locale.setlocale(locale.LC_TIME, value.encode('utf8'))
+ elif option == 'timezone':
+ os.environ['TZ'] = value.timezone
# Or use the preset value
if hasattr(time, 'tzset'):
time.tzset()
- for v in if_arg_value(arg, '-calc'):
- calc = v
- for v in if_arg_value(arg, '-salt'):
- salt = v
- for v in if_arg_value(arg, '-force'):
+ elif option == 'calc':
+ calc = value
+ elif option == 'salt':
+ salt = value
+ elif option == 'force':
force = True
- for v in if_arg_value(arg, '-filename'):
- filename = v
- for v in if_arg_value(arg, '-page'):
- pagename = v
- for v in if_arg_value(arg, '-namespace'):
- namespace = v
- if not arg.startswith('-'):
- args.append(arg)
+ elif option == 'page':
+ pagename = value
+ elif option == 'namespace':
+ namespace = value
site = pywikibot.Site()
@@ -790,12 +788,12 @@ def main(*args):
pywikibot.output('key = %s' % calc_md5_hexdigest(calc, salt))
return
- if not args:
+ if not templates:
pywikibot.bot.suggest_help(
additional_text='No template was specified.')
return False
- for a in args:
+ for a in templates:
pagelist = []
a = pywikibot.Page(site, a, ns=10).title()
if not filename and not pagename:
|
Update eye_tracking_settings.py
Typo | # if app.platform == 'mac':
# eye_zoom_mouse.config.screen_area = Point2d(100, 75)
# eye_zoom_mouse.config.img_scale = 6
-# elif app.platformh == 'win':
+# elif app.platform == 'win':
# eye_zoom_mouse.config.screen_area = Point2d(200, 150)
# eye_zoom_mouse.config.img_scale = 4.5
|
RemoteMemoryBlock: close a process handle on cleanup
HG--
branch : issue_290 | @@ -149,10 +149,10 @@ class RemoteMemoryBlock(object):
last_error = win32api.GetLastError()
print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())
sys.stdout.flush()
- #self._CloseHandle()
+ self._CloseHandle()
raise ctypes.WinError()
self.memAddress = 0
- #self._CloseHandle()
+ self._CloseHandle()
else:
pass #ActionLogger().log('\nWARNING: Cannot call VirtualFreeEx! process_id == 0.')
|
set warning for first local minimum in `complexity_delay`
pushing to dev because already broke it anw ^^ | @@ -202,9 +202,17 @@ def _embedding_delay_select(metric_values, algorithm="first local minimum"):
)["Peaks"]
elif algorithm == "first local minimum":
# Find reversed peaks
+ try:
optimal = signal_findpeaks(-1 * metric_values, relative_height_min=0.1, relative_max=True)[
"Peaks"
]
+ except ValueError:
+ warn(
+ "First local minimum detection failed. Try setting " +
+ "`algorithm = 'first local minimum (corrected)'` or using another method.",
+ category=NeuroKitWarning,
+ )
+
elif algorithm == "first 1/e crossing":
metric_values = metric_values - 1 / np.exp(1)
optimal = signal_zerocrossings(metric_values)
|
Add method SEARCH to allowed method
I found method SEARCH used in Photos | @@ -199,7 +199,7 @@ SecRule REQUEST_FILENAME "@rx /(?:remote|index|public)\.php/" \
t:none,\
nolog,\
ver:'OWASP_CRS/3.3.0',\
- setvar:'tx.allowed_methods=%{tx.allowed_methods} PUT PATCH CHECKOUT COPY DELETE LOCK MERGE MKACTIVITY MKCOL MOVE PROPFIND PROPPATCH UNLOCK REPORT TRACE jsonp'"
+ setvar:'tx.allowed_methods=%{tx.allowed_methods} PUT PATCH CHECKOUT COPY DELETE LOCK MERGE MKACTIVITY MKCOL MOVE PROPFIND PROPPATCH SEARCH UNLOCK REPORT TRACE jsonp'"
# We need to allow DAV methods for sharing files, and removing shares
|
Don't exec `celery`
If we do, flower is killed | @@ -13,4 +13,4 @@ fi
sleep 10
echo "==> $(date +%H:%M:%S) ==> Running Celery beat <=="
celery -C -A config.celery_app flower &
-exec celery -C -A config.celery_app beat -S django_celery_beat.schedulers:DatabaseScheduler --loglevel $log_level
+celery -C -A config.celery_app beat -S django_celery_beat.schedulers:DatabaseScheduler --loglevel $log_level
|
fix: Don't add currency field if it does not exist
closes | @@ -595,7 +595,9 @@ frappe.views.ReportView = class ReportView extends frappe.views.ListView {
add_currency_column(fieldname, doctype, col_index) {
// Adds dependent currency field if required
const df = frappe.meta.get_docfield(doctype, fieldname);
- if (df && df.fieldtype === 'Currency' && df.options && !df.options.includes(':')) {
+ if (df && df.fieldtype === 'Currency' && df.options &&
+ !df.options.includes(':') && frappe.meta.has_field(doctype, df.options)
+ ) {
const field = [df.options, doctype];
if (col_index === undefined) {
this.fields.push(field);
|
Project quota validation
Before project creation, the budget quota is validated | @@ -125,6 +125,7 @@ public class ProjectServiceImpl implements ProjectService {
return projectDAO.getProjectsByEndpoint(endpointName);
}
+ @BudgetLimited
@Override
public void create(UserInfo user, ProjectDTO projectDTO, String projectName) {
if (!projectDAO.get(projectDTO.getName()).isPresent()) {
|
[S1.OSV.catch] fixed bug in stopping search if no files found on page 1
the number of pages are now read directly from the URL return of page 1
and those pages are then searched | @@ -155,6 +155,8 @@ class OSV(object):
address, outdir = self._typeEvaluate(osvtype)
# a dictionary for storing the url arguments
query = {'page': 1}
+ # a list of pages to be searched; will be extended during url readout
+ pages = [1]
if sensor in ['S1A', 'S1B']:
query['sentinel1__mission'] = sensor
@@ -189,7 +191,7 @@ class OSV(object):
query['validity_start'] = '{0}..{1}'.format(date_start, date_stop)
print('searching for new {} files'.format(osvtype))
# iterate through the url pages and look for files
- while True:
+ while len(pages) > 0:
# parse the url
subaddress = urlQueryParser(address, query)
# read the remote content
@@ -198,6 +200,13 @@ class OSV(object):
print(subaddress)
except IOError as e:
raise RuntimeError(e)
+ if query['page'] == 1:
+ # read all existing pages from the url return of the first page
+ pages_str = re.findall('page=[0-9]+', response)
+ pages = list(set([int(x.strip('page=')) for x in pages_str]))
+ else:
+ # delete the page from the list of pages yet to be searched
+ del pages[pages.index(query['page'])]
# list all osv files found on the page
remotes = sorted(set(re.findall(pattern_url, response)))
# do a more accurate filtering of the time stamps
@@ -207,12 +216,10 @@ class OSV(object):
remotes = [x for x in remotes if self.date(x, 'start') <= stop]
# filter files already existing in the files collection
selection = [x for x in remotes if x not in files]
- # stop the loop if no more files are found on the current url page
- if len(selection) == 0:
- break
- else:
- # append the found files to the collection and increment the url page
+ if len(selection) >= 0:
+ # append the found files to the collection
files += selection
+ # increment the url page
query['page'] += 1
# in case the type 'RES' is selected then only return those files covering
# a time period not covered by any POE file
|
docs: Add communication guidance to mentor guide.
Also link to GSoC's mentor guide. | @@ -14,6 +14,10 @@ and help make sure that everything is on track. You are also expected to help
program administrators keep an eye on your mentee's progress, and flag any
concerns you might have.
+Mentors can refer to the excellent [GSoC Mentor
+Guide](https://google.github.io/gsocguides/mentor/) for detailed guidance,
+including GSoC's expectations, and best practices that can apply to any program.
+
## Who can mentor
In order to mentor with Zulip, you need to have spent significant time working
@@ -74,6 +78,24 @@ is a good chance others would also find it confusing. Asking your
mentee for an explanation will itself be valuable feedback on what
parts of the PR need to be commented or rewritten in a more clear way.
+### Establishing communication patterns
+
+Over the course of the program, you will interact with your mentee both
+synchronously (e.g., on calls, pair programming, and/or via chat in the
+development community), and asynchronously (in the development community and on
+GitHub).
+
+It's important to ensure that there are regular opportunities for synchronous
+interactions, which are the best way to resolve some types of questions and
+concerns, and will help your mentee feel supported.
+
+Please schedule at least one synchronous interaction opportunity with your
+mentee per week. A weekly call is highly recommended, as calls can be especially
+helpful for getting a feel for how your mentee is doing, and lowering the bar
+for asking questions and sharing concerns. A good pattern might be a weekly
+call, plus 1-2 other regular time slots when you and your mentee check in in the
+development community.
+
## Managing challenges
We rely on you to proactively flag your concerns to Zulip's program
|
[tune] Remove _pause and related method in RayTrialExecutor.
Removes legacy code path that is never invoked | @@ -22,7 +22,7 @@ from ray.actor import ActorHandle
from ray.exceptions import GetTimeoutError
from ray import ray_constants
from ray._private.resource_spec import NODE_ID_PREFIX
-from ray.tune.error import AbortTrialExecution, TuneError
+from ray.tune.error import AbortTrialExecution
from ray.tune.logger import NoopLogger
from ray.tune.result import TRIAL_INFO, STDOUT_FILE, STDERR_FILE
from ray.tune.resources import Resources
@@ -175,10 +175,6 @@ class RayTrialExecutor(TrialExecutor):
wait_for_placement_group: Optional[float] = None):
super(RayTrialExecutor, self).__init__()
self._running = {}
- # Since trial resume after paused should not run
- # trial.train.remote(), thus no more new remote object ref generated.
- # We use self._paused to store paused trials here.
- self._paused = {}
force_trial_cleanup = int(
os.environ.get("TUNE_FORCE_TRIAL_CLEANUP_S", "0"))
@@ -408,12 +404,6 @@ class RayTrialExecutor(TrialExecutor):
def _train(self, trial):
"""Start one iteration of training and save remote id."""
- if self._find_item(self._paused, trial):
- raise TuneError(
- "Should not call `train` on PAUSED trial {}. "
- "This is an internal error - please file an issue "
- "on https://github.com/ray-project/ray/issues/.".format(
- str(trial)))
if self._find_item(self._running, trial):
logging.debug(
@@ -481,7 +471,6 @@ class RayTrialExecutor(TrialExecutor):
See `RayTrialExecutor.restore` for possible errors raised.
"""
- prior_status = trial.status
self.set_status(trial, Trial.PENDING)
runner = self._setup_remote_runner(trial)
if not runner:
@@ -494,12 +483,7 @@ class RayTrialExecutor(TrialExecutor):
if trial in self._staged_trials:
self._staged_trials.remove(trial)
- previous_run = self._find_item(self._paused, trial)
- if prior_status == Trial.PAUSED and previous_run:
- # If Trial was in flight when paused, self._paused stores result.
- self._paused.pop(previous_run[0])
- self._running[previous_run[0]] = trial
- elif train and not trial.is_restoring:
+ if train and not trial.is_restoring:
self._train(trial)
return True
@@ -652,17 +636,6 @@ class RayTrialExecutor(TrialExecutor):
"""Continues the training of this trial."""
self._train(trial)
- def pause_trial(self, trial: Trial) -> None:
- """Pauses the trial.
-
- If trial is in-flight, preserves return value in separate queue
- before pausing, which is restored when Trial is resumed.
- """
- trial_future = self._find_item(self._running, trial)
- if trial_future:
- self._paused[trial_future[0]] = trial
- super(RayTrialExecutor, self).pause_trial(trial)
-
def reset_trial(self,
trial: Trial,
new_config: Dict,
|
gen2-social-distancing: use latest OpenVINO,
remove `setOpenVINOVersion`, as the version was not specified to blobconverter | @@ -15,7 +15,7 @@ class DepthAI:
log.info("Creating DepthAI pipeline...")
pipeline = dai.Pipeline()
- pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_2)
+ #pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_2)
# Define sources and outputs
camRgb = pipeline.createColorCamera()
|
Update extensions.py
comments may help | @@ -7,8 +7,10 @@ from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
class SQLAlchemy(_BaseSQLAlchemy):
-
def apply_pool_defaults(self, app, options):
+ """
+ Set default engine options. We enable `pool_pre_ping` to be the default value.
+ """
options = super().apply_pool_defaults(app, options)
options["pool_pre_ping"] = True
return options
|
Update developing.rst
Fixing a small typo I noticed. | @@ -40,7 +40,7 @@ example::
self.message = message
def run(self, fileStore):
- return "Hello, world!, here's a message: %s" % self.message
+ return "Hello, world! Here's a message: %s" % self.message
In the example a class, HelloWorld, is defined. The constructor requests 2
|
[flake8] Fix C407 flake8 issue
use a generator instead a list inside filter | @@ -154,8 +154,8 @@ class MWSite(object):
self.version = list(filter(
lambda x: x.startswith('MediaWiki'),
- [l.strip()
- for l in d['error']['*'].split('\n')]))[0].split()[1]
+ (l.strip()
+ for l in d['error']['*'].split('\n'))))[0].split()[1]
except Exception:
pass
else:
|
Update molecule2d app.
Add back selected atom IDs callback and switch back to modifying model data directly. | @@ -55,8 +55,6 @@ residue = {
DATAPATH = os.path.join(".", "tests", "dashbio_demos", "sample_data", "mol2d_")
-residue = read_structure(file_path='{}aspirin.json'.format(DATAPATH))
-
def header_colors():
return {}
@@ -88,17 +86,18 @@ def layout():
def callbacks(app):
@app.callback(
- Output('mol2d-container', 'children'),
+ Output('sel-atoms-output', 'children'),
+ [Input('mol2d', 'selectedAtomIds')]
+ )
+ def show_selected(ids):
+ return str(ids)
+
+ @app.callback(
+ Output('mol2d', 'modelData'),
[Input('mol-dropdown', 'value')]
)
def change_molecule(molfile):
- ok = read_structure(file_path=molfile)
- for atm in ok['nodes']:
- print(atm)
- print()
- for bnd in ok['links']:
- print(bnd)
- return dash_bio.Molecule2dViewer(id='mol2d', modelData=ok)
+ return read_structure(file_path=molfile)
# only declare app/server if the file is being run directly
|
OpenColorIOTransformUI : Fix potential issue with colorspace presets
Sorting after the transformation to names could potentially yield an order that was different to that used for the values. Also reformatted for clarity. | @@ -44,12 +44,19 @@ import GafferImage
def colorSpacePresetNames( plug ) :
- return IECore.StringVectorData( [ "None" ] + sorted( map( lambda x: "Roles/{0}".format( x.replace( "_", " ").title() ), GafferImage.OpenColorIOTransform.availableRoles() ) ) + sorted( GafferImage.OpenColorIOTransform.availableColorSpaces() ) )
-
+ return IECore.StringVectorData(
+ [ "None" ] +
+ [ "Roles/{0}".format( x.replace( "_", " ").title() ) for x in sorted( GafferImage.OpenColorIOTransform.availableRoles() ) ] +
+ sorted( GafferImage.OpenColorIOTransform.availableColorSpaces() )
+ )
def colorSpacePresetValues( plug ) :
- return IECore.StringVectorData( [ "" ] + sorted( GafferImage.OpenColorIOTransform.availableRoles() ) + sorted( GafferImage.OpenColorIOTransform.availableColorSpaces() ) )
+ return IECore.StringVectorData(
+ [ "" ] +
+ sorted( GafferImage.OpenColorIOTransform.availableRoles() ) +
+ sorted( GafferImage.OpenColorIOTransform.availableColorSpaces() )
+ )
Gaffer.Metadata.registerNode(
|
re-enable hetr gpu test
working locally for me now, not sure what changed | @@ -252,10 +252,9 @@ def test_simple_graph():
def test_gpu_send_and_recv():
- pytest.skip("error loading GPU driver in child process")
# First check whether do we have gputransformer available, if not, xfail
if 'gpu' not in transformer_choices():
- pytest.xfail("GPUTransformer not available")
+ pytest.skip("GPUTransformer not available")
# put x+1 on cpu numpy
with ng.metadata(device='numpy'):
|
changelog: [IMP] add BREAKING syntax
Should result in something like:
```
changelog:
- [IMP] add BREAKING syntax
BREAKING: Line1 ...
Line 2 ...
Line 3 ...
``` | @@ -59,6 +59,11 @@ class GsGenerateChangeLogCommand(WindowCommand, GitCommand):
contributors.add(entry.author)
if entry.long_hash in ancestor:
messages.append("{} (Merge {})".format(entry.summary, ancestor[entry.long_hash]))
+ elif entry.raw_body.find('BREAKING:'):
+ pos_start = entry.raw_body.find('BREAKING:')
+ key_length = len('BREAKING:')
+ indented_sub_msg = ('\n\t\t' + ' ' * key_length + ' ').join(entry.raw_body[pos_start:].split('\n'))
+ messages.append("{}\n\t\t{})".format(entry.summary, indented_sub_msg))
else:
messages.append(entry.summary)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.