message
stringlengths
13
484
diff
stringlengths
38
4.63k
Update channel-settings.rst Fixed a typo and added a note on how to add markdown links to channel headers
@@ -26,7 +26,7 @@ Mark Channel Unread By default, channel names are bolded for all new messages in a channel. To only bold the channel name when you are mentioned, open the channel menu and click -**Notification Preferences > Mark Channel Unread > Only for mention**. +**Notification Preferences > Mark Channel Unread > Only for mentions**. Channel Header @@ -38,6 +38,14 @@ the channel topic or provide links to frequently accessed documents. Any channel member can edit this setting, unless the System Administrator has `restricted the permissions <https://docs.mattermost.com/administration/config-settings.html#enable-public-channel-renaming-for>`__. +Adding links to the Channel Header +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Add links to the channel header using markdown. + +Example: [This is an example link](www.example.com) + + Channel Purpose ---------------
Support TypeIdentifier::name() Summary: Pull Request resolved: Sometimes you have a TypeIdentifier, and no way to get to the TypeMeta. Still nice to be able to read out the name. This should be obsoleted by smessmer's patches.
@@ -61,6 +61,8 @@ class AT_CORE_API TypeIdentifier final : public at::IdWrapper<TypeIdentifier, ui return TypeIdentifier(11); } + const char* name() const noexcept; + private: constexpr explicit TypeIdentifier(uint16_t id) : IdWrapper(id) {} friend class TypeMeta; @@ -91,6 +93,11 @@ namespace caffe2 { AT_CORE_API std::unordered_map<TypeIdentifier, std::string>& gTypeNames(); AT_CORE_API std::unordered_set<std::string>& gRegisteredTypeNames(); +inline const char* TypeIdentifier::name() const noexcept { + auto it = gTypeNames().find(*this); + assert(it != gTypeNames().end()); + return it->second.c_str(); +} AT_CORE_API std::mutex& gTypeRegistrationMutex();
Update README.md Add blurb about results csv
@@ -60,6 +60,10 @@ Several (less common) features that I often utilize in my projects are included. * Mixup (as in https://arxiv.org/abs/1710.09412) - currently implementing/testing * An inference script that dumps output to CSV is provided as an example +## Results + +A CSV file containing an ImageNet-1K validation results summary for all included models with pretrained weights and default configurations is located [here](results/results-all.csv) + ### Self-trained Weights I've leveraged the training scripts in this repository to train a few of the models with missing weights to good levels of performance. These numbers are all for 224x224 training and validation image sizing with the usual 87.5% validation crop.
chore(test): Update API usages Striving for better readability * Use frappe.db.delete instead of frappe.db.sql * Use named kwargs instead of positional
@@ -24,7 +24,7 @@ emails = [ class TestNewsletter(unittest.TestCase): def setUp(self): frappe.set_user("Administrator") - frappe.db.sql("delete from `tabEmail Group Member`") + frappe.db.delete("Email Group Member") if not frappe.db.exists("Email Group", "_Test Email Group"): frappe.get_doc({"doctype": "Email Group", "title": "_Test Email Group"}).insert() @@ -66,9 +66,10 @@ class TestNewsletter(unittest.TestCase): @staticmethod def send_newsletter(published=0, schedule_send=None): - frappe.db.sql("delete from `tabEmail Queue`") - frappe.db.sql("delete from `tabEmail Queue Recipient`") - frappe.db.sql("delete from `tabNewsletter`") + frappe.db.delete("Email Queue") + frappe.db.delete("Email Queue Recipient") + frappe.db.delete("Newsletter") + newsletter = frappe.get_doc({ "doctype": "Newsletter", "subject": "_Test Newsletter", @@ -78,26 +79,26 @@ class TestNewsletter(unittest.TestCase): "published": published, "schedule_sending": bool(schedule_send), "schedule_send": schedule_send - }).insert(ignore_permissions=True) - + }) + newsletter.insert(ignore_permissions=True) newsletter.append("email_group", {"email_group": "_Test Email Group"}) newsletter.save() + if schedule_send: send_scheduled_email() - return - + else: newsletter.send_emails() return newsletter.name def test_portal(self): - self.send_newsletter(1) + self.send_newsletter(published=1) frappe.set_user("[email protected]") newsletters = get_newsletter_list("Newsletter", None, None, 0) self.assertEqual(len(newsletters), 1) def test_newsletter_context(self): context = frappe._dict() - newsletter_name = self.send_newsletter(1) + newsletter_name = self.send_newsletter(published=1) frappe.set_user("[email protected]") doc = frappe.get_doc("Newsletter", newsletter_name) doc.get_context(context)
Fix default cuda version in prebuild.sh for arm64 Authors: - Jordan Jacobelli (https://github.com/Ethyling) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL:
#!/usr/bin/env bash +ARCH=$(arch) +if [ "${ARCH}" = "x86_64" ]; then + DEFAULT_CUDA_VER="11.0" +elif [ "${ARCH}" = "aarch64" ]; then + DEFAULT_CUDA_VER="11.2" +else + echo "Unsupported arch ${ARCH}" + exit 1 +fi + #Upload cuspatial once per PYTHON -if [[ "$CUDA" == "11.0" ]]; then +if [[ "$CUDA" == "${DEFAULT_CUDA_VER}" ]]; then export UPLOAD_CUSPATIAL=1 else export UPLOAD_CUSPATIAL=0
tables.tableextension.Table._convert_time64_() divides by zero when its called with an empty nparr array. This happens because the __next_indexed() method tries to conver attributes even if the array is empty. Fixes
@@ -956,6 +956,7 @@ cdef class Row: # Evaluate the condition on this table fragment. iobuf = iobuf[:recout] + if len(iobuf) > 0: self.table._convert_types(iobuf, len(iobuf), 1) self.indexvalid = call_on_recarr( self.condfunc, self.condargs, iobuf, **self.condkwargs)
Fix method name for db_head_state The actual method name is 'hive.db_head_state'
@@ -45,7 +45,7 @@ $ hive server ``` ```bash -$ curl --data '{"jsonrpc":"2.0","id":0,"method":"db_head_state"}' http://localhost:8080 +$ curl --data '{"jsonrpc":"2.0","id":0,"method":"hive.db_head_state"}' http://localhost:8080 {"jsonrpc": "2.0", "result": {"db_head_block": 19930795, "db_head_time": "2018-02-16 21:35:42", "db_head_age": 10}, "id": 0} ```
Refactor move tcc function (tcc_pos) to tcc.py sent_tokenize() and subword_tokenize() must return something replace while loops with for loops in isthai() and syllable_tokenize() (faster)
@@ -62,6 +62,15 @@ def tcc_gen(w): p += n +def tcc_pos(text): + p_set = set() + p = 0 + for w in tcc_gen(text): + p += len(w) + p_set.add(p) + return p_set + + def tcc(w, sep="/"): return sep.join(tcc_gen(w))
add rule for clean * add rule for clean * Update clean rule Seems like lib/ directory is not made by the makefile So don't delete directory, just the contents of it.
@@ -49,3 +49,5 @@ lib/cpp_deploy_pack: cpp_deploy.cc lib/test_addone_sys.o lib/libtvm_runtime_pack lib/cpp_deploy_normal: cpp_deploy.cc lib/test_addone_sys.o @mkdir -p $(@D) $(CXX) $(PKG_CFLAGS) -o $@ $^ -ltvm_runtime $(PKG_LDFLAGS) +clean: + rm lib/libtvm_runtime_pack.o lib/test_addone_sys.o lib/cpp_deploy_pack lib/cpp_deploy_normal
Update generic.txt Connected with . Currently nameless malware family.
@@ -9374,7 +9374,14 @@ lxj.vvn.mybluehost.me # Reference: https://app.any.run/tasks/5279381c-b255-482a-ae64-02ed6177bc12/ -http://savannahhoney.co.ke/wp-content/uploads/ +savannahhoney.co.ke/wp-content/uploads/ + +# Reference: https://github.com/silence-is-best/c2db#unknowns + +103.136.43.131:9998 +185.222.202.29:9998 +nicholaspring.xyz +smartwaay.xyz # Generic
make fix-flake8: parallelize autoflake8 execution ...by pip-installing a PR I provided for autoflake8 packages which adds jobs option to the tool, see
@@ -9,7 +9,7 @@ TSCRIPT = psutil/tests/runner.py # Internal. DEPS = \ - autoflake \ + git+https://github.com/PyCQA/autoflake.git@refs/pull/107/head \ autopep8 \ check-manifest \ concurrencytest \ @@ -213,7 +213,7 @@ lint-all: ## Run all linters fix-flake8: ## Run autopep8, fix some Python flake8 / pep8 issues. @git ls-files '*.py' | xargs $(PYTHON) -m autopep8 --in-place --jobs 0 --global-config=.flake8 - @git ls-files '*.py' | xargs $(PYTHON) -m autoflake --in-place --remove-all-unused-imports --remove-unused-variables + @git ls-files '*.py' | xargs $(PYTHON) -m autoflake --in-place --jobs 0 --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys fix-imports: ## Fix imports with isort. @git ls-files '*.py' | xargs $(PYTHON) -m isort --settings=.isort.cfg
MAINT: Decrease merge conflicts in release notes Merge conflicts due to the release notes has been an annoying problem requiring a rebase of otherwise good PRs. The solution here is to use the --union option when merging to those files. Closes
+# Line endings for Windows scripts * text=auto tools/win32build/nsis_scripts/*.nsi.in eol=crlf # Numerical data files numpy/lib/tests/data/*.npy binary + +# Release notes, reduce number of conflicts. +doc/release/*.rst merge=union
Links to CSV/TSV/JSON data prep The links to data preparation and to sample usage of CSV/TSV(/JSON) is not explicit in the project master README. Added them in.
@@ -12,7 +12,7 @@ KGX allows conversion to and from: * RDF serializations (read/write) and SPARQL endpoints (read) * Neo4J endpoints (read) or Neo4J dumps (write) - * CSV/TSV + * CSV/TSV and JSON (see [associated data formats](./data-preparation.md) and [example script to load CSV/TSV to Neo4j](./examples/scripts/load_csv_to_neo4j.py)) * Any format supported by networkx
llvm, tests/composition: Enable LLVMRun for multi-stimulus tests Check result values in test_transfer_mechanism_composition
@@ -1050,7 +1050,8 @@ class TestExecutionOrder: @pytest.mark.benchmark(group="Transfer") @pytest.mark.parametrize("mode", ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('LLVMExec', marks=pytest.mark.llvm)]) + pytest.param('LLVMExec', marks=pytest.mark.llvm), + pytest.param('LLVMRun', marks=pytest.mark.llvm)]) def test_transfer_mechanism(self, benchmark, mode): # mechanisms @@ -1072,7 +1073,8 @@ class TestExecutionOrder: @pytest.mark.benchmark(group="Transfer") @pytest.mark.parametrize("mode", ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('LLVMExec', marks=pytest.mark.llvm)]) + pytest.param('LLVMExec', marks=pytest.mark.llvm), + pytest.param('LLVMRun', marks=pytest.mark.llvm)]) def test_transfer_mechanism_split(self, benchmark, mode): # mechanisms @@ -3358,6 +3360,7 @@ class TestNestedCompositions: comp2.run(inputs={C: [1.0, 2.0, 3.0]}, bin_execute=mode) assert np.allclose(comp1.results, comp2.results) + assert np.allclose(comp2.results, [[[0.52497918747894]], [[0.5719961329315186]], [[0.6366838893983633]]]) # Does not work yet due to initial_values bug that causes first recurrent projection to pass different values # to TranfserMechanism version vs Logistic fn + AdaptiveIntegrator fn version
MAINT: removed extra newline Removed extra newline printed when there is no extra data.
@@ -495,7 +495,9 @@ def fmt_output_in_cols(out_strs, ncols=3, max_num=6, lpad=None): output += '\n' # Print out remaining variables one at a time on a single line - for i in range(sel_len - ncols * num): + extra_cols = sel_len - ncols * num + if extra_cols > 0: + for i in range(extra_cols): if middle >= 0: if i == 0 and num > 0: output += "...".center(lpad * ncols) + '\n'
Standalone: Smoother code for detecting frozen module paths * Using hasattr has no point, esp. as None for "__file__" does happen, so that code is more readable then.
@@ -233,11 +233,12 @@ def _detectImports(command, user_provided, technical): # Print statements for stuff to show, the modules loaded. if python_version >= 0x300: - command += ( - '\nprint("\\n".join(sorted("import " + module.__name__ + " # sourcefile " + ' - 'module.__file__ for module in sys.modules.values() if hasattr(module, "__file__") and ' - 'module.__file__ not in (None, "<frozen>"))), file = sys.stderr)' - ) # do not read it + command += """ +print("\\n".join(sorted( + "import %s # sourcefile %s" % (module.__name__, module.__file__) + for module in sys.modules.values() + if getattr(module, "__file__", None) not in (None, "<frozen>" +))), file = sys.stderr)""" reduced_path = [ path_element
[commands] Fix lambda converters in non-module contexts. Not sure why anyone would do this but might as well fix it.
@@ -325,7 +325,7 @@ class Command(_BaseCommand): except AttributeError: pass else: - if module.startswith('discord.') and not module.endswith('converter'): + if module is not None and (module.startswith('discord.') and not module.endswith('converter')): converter = getattr(converters, converter.__name__ + 'Converter') try:
emulating dockerspawner approach ref populating servername to pod_name_template somehow the pre-existing approach of return(template.format(...)) in _expand_user_properties was failing to set a servername that could get captured in pod_name_template. see:
@@ -588,18 +588,14 @@ class KubeSpawner(Spawner): # Set servername based on whether named-server initialised temp_name = getattr(self, 'name', '') if temp_name: - servername = '-' + temp_name + server_name = '-' + temp_name else: - servername = '' + server_name = '' legacy_escaped_username = ''.join([s if s in safe_chars else '-' for s in self.user.name.lower()]) safe_username = escapism.escape(self.user.name, safe=safe_chars, escape_char='-').lower() - return template.format( - userid=self.user.id, - username=safe_username, - legacy_escape_username=legacy_escaped_username, - servername=servername - ) + d = {'username': safe_username, 'servername': server_name} + return self.pod_name_template.format(**d) def _expand_all(self, src): if isinstance(src, list): @@ -611,18 +607,6 @@ class KubeSpawner(Spawner): else: return src - # def determine_servername(self): - # """ - # Determine if server being spawned is of type 'default' or 'named-server'. - # From an API perspective, calling POST '/users/:user/server' results in a default server. - # Calling POST '/users/:user/servers/:server_name' results in a named-server - # In the case of the latter, the servername should get integrated into pod and pvc names to ensure uniqueness. - # """ - # if getattr(self, 'name', None) is None: - # return getattr(self, 'user.name') - # else: - # return getattr(self, 'name') - @gen.coroutine def get_pod_manifest(self): """
While sampling in `env_problem_utils.py` cast up to `np.float64` This seems to fix things.
@@ -112,19 +112,12 @@ def play_env_problem_with_policy(env, # Convert to probs, since we need to do categorical sampling. probs = np.exp(log_probs) - # Sometimes log_probs contains a 0, it shouldn't. This makes the - # probabilities sum up to more than 1, since the addition happens - # in float64, so just add and subtract 1.0 to zero those probabilites - # out. + # Let's cast up to float64, because that's what numpy does when sampling + # and it leads to the sum(pvals[:-1]) > 1.0 error. # - # Also testing for this is brittle. - probs += 1 - probs -= 1 - - # For some reason, sometimes, this isn't the case. - probs_sum = np.sum(probs, axis=1, keepdims=True) - if not all(probs_sum == 1.0): - probs = probs / probs_sum + # We also re-normalize when we do this. + probs = np.float64(probs) + probs /= np.sum(probs, axis=1, keepdims=True) # Now pick actions from this probs array. actions = np.apply_along_axis(multinomial_sample, 1, probs)
[Fix][Warning] tvm.target.create() deprecated Update the example with the newer API.
@@ -44,7 +44,7 @@ def prepare_graph_lib(base_path): params = {"y": np.ones((2, 2), dtype="float32")} mod = tvm.IRModule.from_expr(relay.Function([x, y], x + y)) # build a module - compiled_lib = relay.build(mod, tvm.target.create("llvm"), params=params) + compiled_lib = relay.build(mod, tvm.target.Target("llvm"), params=params) # export it as a shared library # If you are running cross compilation, you can also consider export # to tar and invoke host compiler later.
Fix cannot import DefaultConfig problem Resolves:
@@ -43,6 +43,7 @@ from .orm.pymilvus_orm.utility import ( ) from .orm.pymilvus_orm import utility +from .orm.pymilvus_orm.default_config import DefaultConfig from .orm.pymilvus_orm.search import SearchResult, Hits, Hit from .orm.pymilvus_orm.schema import FieldSchema, CollectionSchema @@ -56,7 +57,7 @@ __all__ = [ 'SearchResult', 'Hits', 'Hit', 'FieldSchema', 'CollectionSchema', 'SearchFuture', 'MutationFuture', - 'utility', + 'utility', 'DefaultConfig', # pymilvus old style APIs 'Milvus', 'Prepare', 'Status', 'DataType',
Update README.md Explaining the dryrun mode on omnibot in README.md
@@ -88,5 +88,11 @@ roslaunch soccerbot soccerbot_simulation.launch frozen:=true For omnibot, just run the omnibot launch file, replace robot.launch with simulation.launch for simulation ```bash -roslaunch soccerbot omnibot.launch +roslaunch soccerbot omnibot.launch dryrun:=true +``` + +For running in a mode where the hardware is not connected and you need to examine the output of what would be sent to hardware if the hardware was connected. + +```bash +roslaunch soccerbot omnibot.launch dryrun:=false ```
[Docs] Add getting_started chinese version * add getting_started chinese version * fix some typos * Update getting_started.md Modified some typos * Update getting_started.md * Update getting_started.md * Update getting_started.md * Update getting_started.md * Update getting_started.md * Update getting_started.md * Update getting_started.md
@@ -150,7 +150,7 @@ It is recommended that you run step d each time you pull some updates from githu find . -name "*.so" | xargs rm ``` -2. Following the above instructions, mmdetection is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number). +2. Following the above instructions, MMDetection3D is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number). 3. If you would like to use `opencv-python-headless` instead of `opencv-python`, you can install it before installing MMCV. @@ -176,7 +176,7 @@ docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection3d/data mmdete ## A from-scratch setup script -Here is a full script for setting up mmdetection with conda. +Here is a full script for setting up MMdetection3D with conda. ```shell conda create -n open-mmlab python=3.7 -y @@ -227,7 +227,7 @@ python demo/pcd_demo.py demo/data/kitti/kitti_000008.bin configs/second/hv_secon ``` If you want to input a `ply` file, you can use the following function and convert it to `bin` format. Then you can use the converted `bin` file to generate demo. -Note that you need to install pandas and plyfile before using this script. This function can also be used for data preprocessing for training ```ply data```. +Note that you need to install `pandas` and `plyfile` before using this script. This function can also be used for data preprocessing for training ```ply data```. ```python import numpy as np @@ -252,7 +252,7 @@ Examples: convert_ply('./test.ply', './test.bin') ``` -If you have point clouds in other format (`off`, `obj`, etc.), you can use trimesh to convert them into `ply`. +If you have point clouds in other format (`off`, `obj`, etc.), you can use `trimesh` to convert them into `ply`. ```python import trimesh
Run regendoc over fixture docs This is the result of running: $ cd doc/en && make regen REGENDOC_FILES=fixture.rst
@@ -927,6 +927,8 @@ doesn't guarantee a safe cleanup. That's covered in a bit more detail in .. code-block:: pytest $ pytest -q test_emaillib.py + . [100%] + 1 passed in 0.12s Handling errors for yield fixture """"""""""""""""""""""""""""""""" @@ -1010,6 +1012,8 @@ does offer some nuances for when you're in a pinch. .. code-block:: pytest $ pytest -q test_emaillib.py + . [100%] + 1 passed in 0.12s .. _`safe teardowns`: @@ -1062,6 +1066,8 @@ wouldn't be compact anymore). .. code-block:: pytest $ pytest -q test_emaillib.py + . [100%] + 1 passed in 0.12s .. _`safe fixture structure`: @@ -1978,11 +1984,13 @@ Running the above tests results in the following test IDs being used: platform linux -- Python 3.x.y, pytest-6.x.y, py-1.x.y, pluggy-0.x.y cachedir: $PYTHON_PREFIX/.pytest_cache rootdir: $REGENDOC_TMPDIR - collected 10 items + collected 11 items <Module test_anothersmtp.py> <Function test_showhelo[smtp.gmail.com]> <Function test_showhelo[mail.python.org]> + <Module test_emaillib.py> + <Function test_email_received> <Module test_ids.py> <Function test_a[spam]> <Function test_a[ham]> @@ -1994,7 +2002,7 @@ Running the above tests results in the following test IDs being used: <Function test_ehlo[mail.python.org]> <Function test_noop[mail.python.org]> - ======================= 10 tests collected in 0.12s ======================== + ======================= 11 tests collected in 0.12s ======================== .. _`fixture-parametrize-marks`:
[TVM] Remove dynamic batch size dispatching Summary: Pull Request resolved: Remove dynamic batch size dispatching Set caffe2_tvm_min_ops to 8 Set caffe2_tvm_profiling_based_jit to false Rename some variable names Test Plan: buck test caffe2/caffe2/fb/tvm:test_tvm_transform
C10_DEFINE_bool( caffe2_tvm_profiling_based_jit, - true, + false, "Use profiling based jit for TVM transform"); C10_DEFINE_int32( caffe2_tvm_min_ops, - 10, + 8, "Minimal number of supported ops for the subgraph to be lowered to TVM"); namespace caffe2 { @@ -179,7 +179,7 @@ void TvmTransformer::transform( } if (opts_.debug) { - dumpNet(*pred_net, shape_hints, "debug_ssa_net.pb_txt"); + dumpNet(*pred_net, shape_hints, "debug_ssa_net.pbtxt"); } // We are ready to transform the net @@ -193,7 +193,7 @@ void TvmTransformer::transform( net_opt.mutable_device_option()->CopyFrom(pred_net->device_option()); pred_net->Swap(&net_opt); if (opts_.debug) { - dumpNet(*pred_net, shape_hints, "debug_full_opt_net.pb_txt"); + dumpNet(*pred_net, shape_hints, "debug_full_opt_net.pbtxt"); } } @@ -202,23 +202,20 @@ NetDef TvmTransformer::applyTvmTransform( const std::unordered_set<std::string>& weights, const std::unordered_set<int>& blacklisted_ops, const ShapeInfoMap& shape_hints) { - auto profiling_based_jit = opts_.profiling_based_jit; + const auto profiling_based_jit = opts_.profiling_based_jit; auto tvm_supports = [&blacklisted_ops, &shape_hints, &profiling_based_jit]( const caffe2::OperatorDef& op) { const static std::unordered_set<std::string> supported_ops{ - "Add", "Sum", - "FC", "FCTransposed", - "Flatten", "Relu", - "Sigmoid", "Softmax", - "Split", "EnsureCPUOutput", - "Reshape", "ExpandDims", - "Concat", "BatchMatMul", - "MatMul", "BatchGather", - "DotProduct", "Transpose", - "Mul", "Tanh", - "Logit", "Cast", - "Copy", "ReplaceNaN", - "Clip"}; + "Add", "BatchGather", "BatchMatMul", + "Cast", "Clip", "Concat", + "Copy", "DotProduct", "EnsureCPUOutput", + "ExpandDims", "FC", "FCTransposed", + "Flatten", "Logit", "MatMul", + "Mul", "Relu", "Reshape", + "ReplaceNaN", "Sigmoid", "Softmax", + "Split", "Sum", "Tanh", + "Transpose", + }; try { // If the op position is black listed, return false
clean up sync_concepts_from_openmrs can use a simpler approach now that concepts are saved up front rather than only after adding answers
@@ -11,25 +11,16 @@ def sync_concepts_from_openmrs(account): answers_relationships = [] for concept in api.get_all(): concept = openmrs_concept_json_from_api_json(concept) - concept, answers = openmrs_concept_from_concept_json(account, concept) - if answers: - answers_relationships.append((concept, answers)) + concept, answer_uuids = openmrs_concept_from_concept_json(account, concept) + if answer_uuids: + answers_relationships.append((concept, answer_uuids)) concept.save() - while answers_relationships: - delayed = [] - for concept, answers in answers_relationships: - answer_concepts = OpenmrsConcept.objects.filter(account=account, uuid__in=answers).all() - if set(answer_concept.uuid for answer_concept in answer_concepts) == set(answers): + for concept, answer_uuids in answers_relationships: + answer_concepts = OpenmrsConcept.objects.filter(account=account, uuid__in=answer_uuids).all() + assert set(answer_concept.uuid for answer_concept in answer_concepts) == set(answer_uuids) concept.answers = answer_concepts concept.save() - else: - delayed.append((concept, answers)) - - if len(answers_relationships) == len(delayed): - # this is going to be an infinite loop - raise Exception(delayed) - answers_relationships = delayed class OpenmrsConceptJSON(jsonobject.JsonObject):
custom_profile_fields: Display data of default external type fields. Display default values of "name" and "hint" field in uneditable way while creating new default external account type profile fields.
@@ -174,18 +174,24 @@ function set_up_create_field_form() { if (Number.parseInt($("#profile_field_type").val(), 10) === field_types.EXTERNAL_ACCOUNT.id) { $field_elem.show(); - if ($("#profile_field_external_accounts_type").val() === "custom") { + const $profile_field_external_account_type = $( + "#profile_field_external_accounts_type", + ).val(); + if ($profile_field_external_account_type === "custom") { $field_url_pattern_elem.show(); - $("#profile_field_name").val("").closest(".input-group").show(); - $("#profile_field_hint").val("").closest(".input-group").show(); + $("#profile_field_name").val("").prop("disabled", false); + $("#profile_field_hint").val("").prop("disabled", false); } else { $field_url_pattern_elem.hide(); - $("#profile_field_name").closest(".input-group").hide(); - $("#profile_field_hint").closest(".input-group").hide(); + const profile_field_name = + page_params.realm_default_external_accounts[$profile_field_external_account_type] + .name; + $("#profile_field_name").val(profile_field_name).prop("disabled", true); + $("#profile_field_hint").val("").prop("disabled", true); } } else { - $("#profile_field_name").closest(".input-group").show(); - $("#profile_field_hint").closest(".input-group").show(); + $("#profile_field_name").prop("disabled", false); + $("#profile_field_hint").prop("disabled", false); $field_url_pattern_elem.hide(); $field_elem.hide(); }
Pin poetry to previous version * Pin poetry to previous version Version 1.0.0 seems to break the build. * USer poetry version 0.12.7 * Use latest poetry 0.12 minor version
@@ -20,7 +20,7 @@ ENV PYTHONUNBUFFERED 1 RUN mkdir -p /opt/poetry /app /static /opt/static /dbox/Dropbox/media RUN python -m pip install -U pip -RUN python -m pip install -U poetry +RUN python -m pip install -U poetry==0.12.17 RUN groupadd -g 2001 -r django && useradd -m -u 2001 -r -g django django RUN chown django:django /opt/poetry /app /static /opt/static /dbox/Dropbox/media
New dep-upd: compute modified_entity_ids * New dep-upd: compute modified_entity_ids There's no real reason to compute this, but we used to pass that dict to the custom workflows, so let's keep back-compat, at least for now. * flake
@@ -632,6 +632,7 @@ class InstallParameters: def __init__(self, ctx, update_params, dep_update): self._update_instances = dep_update['deployment_update_node_instances'] self.update_id = dep_update.id + self.steps = dep_update.steps for kind in ['added', 'removed', 'extended', 'reduced']: changed, related = self._split_by_modification( @@ -661,6 +662,36 @@ class InstallParameters: ]: setattr(self, param, update_params.get(param, default)) + def _modified_entity_ids(self): + modified_ids = { + 'node': [], + 'relationship': {}, + 'property': [], + 'operation': [], + 'workflow': [], + 'output': [], + 'description': [], + 'group': [], + 'policy_type': [], + 'policy_trigger': [], + 'plugin': [], + 'rel_mappings': {} + } + for step in self.steps: + entity_type = step['entity_type'] + parts = step['entity_id'].split(':') + if len(parts) < 2: + continue + entity_id = parts[1] + + if step['entity_type'] == 'relationship': + relationship = parts[3] + modified_ids[entity_type].setdefault(entity_id, []).append( + relationship) + elif entity_type in modified_ids: + modified_ids[entity_type].append(entity_id) + return modified_ids + def _split_by_modification(self, items, modification): first, second = [], [] if not items: @@ -672,15 +703,10 @@ class InstallParameters: second.append(instance) return first, second - @property - def modified_entity_ids(self): - # TODO: compute this (RD-3523) - return [] - def as_workflow_parameters(self): return { 'update_id': self.update_id, - 'modified_entity_ids': self.modified_entity_ids, + 'modified_entity_ids': self._modified_entity_ids(), 'added_instance_ids': self.added_ids, 'added_target_instances_ids': self.added_target_ids, 'removed_instance_ids': self.removed_ids,
Fix typing and missing asserts [fix-typing-in-test-autodiscovery] Summary & Motivation: While using this file for another reason I noticed typing errors and missing asserts. This fixes that. Test Plan: BK Reviewers: OwenKephart Pull Request:
@@ -26,7 +26,8 @@ def test_single_repository(): assert symbol == "single_repository" repo_def = CodePointer.from_python_file(single_repo_path, symbol, None).load_target() - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) + assert repo_def.name == "single_repository" @@ -35,7 +36,12 @@ def test_double_repository(): file_relative_path(__file__, "double_repository.py"), ) - assert set([lr.target_definition.name for lr in loadable_repos]) == {"repo_one", "repo_two"} + found_names = set() + for lr in loadable_repos: + assert isinstance(lr.target_definition, RepositoryDefinition) + found_names.add(lr.target_definition.name) + + assert found_names == {"repo_one", "repo_two"} def test_single_pipeline(): @@ -50,7 +56,7 @@ def test_single_pipeline(): CodePointer.from_python_file(single_pipeline_path, symbol, None) ) - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) assert repo_def.get_pipeline("a_pipeline") @@ -79,7 +85,7 @@ def test_single_graph(): CodePointer.from_python_file(single_graph_path, symbol, None) ) - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) assert repo_def.get_pipeline("graph_one") @@ -106,7 +112,7 @@ def test_single_asset_group(): repo_def = repository_def_from_pointer(CodePointer.from_python_file(path, symbol, None)) - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) the_job = repo_def.get_job("__ASSET_JOB") assert len(the_job.graph.node_defs) == 2 @@ -134,7 +140,7 @@ def test_multiple_assets(): repo_def = repository_def_from_pointer(CodePointer.from_python_file(path, symbol, None)) - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) the_job = repo_def.get_job("__ASSET_JOB") assert len(the_job.graph.node_defs) == 2 @@ -158,7 +164,7 @@ def test_single_pending_repository(): assert symbol == "single_pending_repository" repo_def = CodePointer.from_python_file(single_pending_repo_path, symbol, None).load_target() - isinstance(repo_def, PendingRepositoryDefinition) + assert isinstance(repo_def, PendingRepositoryDefinition) assert repo_def.name == "single_pending_repository" @@ -174,7 +180,7 @@ def test_single_repository_in_module(): repo_def = CodePointer.from_module( "dagster.utils.test.toys.single_repository", symbol, working_directory=None ).load_target() - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) assert repo_def.name == "single_repository" @@ -190,7 +196,7 @@ def test_single_repository_in_package(): repo_def = CodePointer.from_python_package( "dagster.utils.test.toys.single_repository", symbol, working_directory=None ).load_target() - isinstance(repo_def, RepositoryDefinition) + assert isinstance(repo_def, RepositoryDefinition) assert repo_def.name == "single_repository"
unread: Use consistent background-color for unread count pills. Change the background-color of all unread count pills in dark theme to have 1 consistent type of color in complete application, similar to how we have in light theme. Fixes
@@ -291,9 +291,7 @@ body.dark-theme { color: hsl(236, 33%, 90%); } - .recent_topics_container .unread_count, - .topic-list-item .unread_count, - .expanded_private_message .unread_count { + .unread_count { background-color: hsla(105, 2%, 50%, 0.5); }
Fix kolla-ansible unit test failures Seems like default path was changed and broke the tests:
@@ -35,7 +35,7 @@ class TestCase(unittest.TestCase): parsed_args = parser.parse_args([]) kolla_ansible.run(parsed_args, "command", "overcloud") expected_cmd = [ - "source", "ansible/kolla-venv/bin/activate", "&&", + "source", "/opt/kayobe/venvs/kolla/bin/activate", "&&", "kolla-ansible", "command", "--inventory", "/etc/kolla/inventory/overcloud", ] @@ -57,7 +57,7 @@ class TestCase(unittest.TestCase): parsed_args = parser.parse_args(args) kolla_ansible.run(parsed_args, "command", "overcloud") expected_cmd = [ - "source", "ansible/kolla-venv/bin/activate", "&&", + "source", "/opt/kayobe/venvs/kolla/bin/activate", "&&", "kolla-ansible", "command", "--inventory", "/path/to/inventory", "--configdir", "/path/to/config", @@ -84,7 +84,7 @@ class TestCase(unittest.TestCase): parsed_args = parser.parse_args(args) kolla_ansible.run(parsed_args, "command", "overcloud") expected_cmd = [ - "source", "ansible/kolla-venv/bin/activate", "&&", + "source", "/opt/kayobe/venvs/kolla/bin/activate", "&&", "kolla-ansible", "command", "--inventory", "/path/to/inventory", "--configdir", "/path/to/config", @@ -107,7 +107,7 @@ class TestCase(unittest.TestCase): parsed_args = parser.parse_args(args) kolla_ansible.run(parsed_args, "command", "overcloud") expected_cmd = [ - "source", "ansible/kolla-venv/bin/activate", "&&", + "source", "/opt/kayobe/venvs/kolla/bin/activate", "&&", "kolla-ansible", "command", "--key", "/path/to/vault/pw", "--inventory", "/etc/kolla/inventory/overcloud", @@ -130,7 +130,7 @@ class TestCase(unittest.TestCase): parsed_args = parser.parse_args([]) kolla_ansible.run(parsed_args, "command", "overcloud") expected_cmd = [ - "source", "ansible/kolla-venv/bin/activate", "&&", + "source", "/opt/kayobe/venvs/kolla/bin/activate", "&&", "kolla-ansible", "command", "--key", "/path/to/kayobe-vault-password-helper", "--inventory", "/etc/kolla/inventory/overcloud", @@ -157,7 +157,7 @@ class TestCase(unittest.TestCase): } kolla_ansible.run(parsed_args, "command", "overcloud", **kwargs) expected_cmd = [ - "source", "ansible/kolla-venv/bin/activate", "&&", + "source", "/opt/kayobe/venvs/kolla/bin/activate", "&&", "kolla-ansible", "command", "-v", "--inventory", "/etc/kolla/inventory/overcloud",
use onp not lnp in module-level scope fixes google import
@@ -696,9 +696,9 @@ class LaxBackedNumpyTests(jtu.JaxTestCase): "rng_factory": jtu.rand_default} for shape in all_shapes for dtype in number_dtypes for a_min, a_max in [(-1, None), (None, 1), (-1, 1), - (-lnp.ones(1), None), - (None, lnp.ones(1)), - (-lnp.ones(1), lnp.ones(1))])) + (-onp.ones(1), None), + (None, onp.ones(1)), + (-onp.ones(1), onp.ones(1))])) def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng_factory): rng = rng_factory() onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
Update README.md Add an entry in the list of publications
@@ -213,6 +213,7 @@ List of publications & preprints using `highway-env` (please open a pull request * [Accelerated Policy Evaluation: Learning Adversarial Environments with Adaptive Importance Sampling](https://arxiv.org/abs/2106.10566) (Jun 2021) * [Learning Interaction-aware Guidance Policies for Motion Planning in Dense Traffic Scenarios](https://arxiv.org/abs/2107.04538) (Jul 2021) * [Robust Predictable Control](https://arxiv.org/abs/2109.03214) (Sep 2021) +* [Improving Robustness of Deep Reinforcement Learning Agents: Environment Attack based on the Critic Network](https://ieeexplore.ieee.org/document/9892901) (Jul 2022) PhD theses * [Reinforcement learning for Dialogue Systems optimization with user adaptation](https://hal.inria.fr/tel-02422691/) (2019)
Start benchmark element sweep at 100 Summary: Anything number of elements below this always fits in a single packet and will yield ~identical results.
@@ -121,7 +121,7 @@ void Runner::run(BenchmarkFn& fn) { } // Run sweep over number of elements - for (int i = 1; i <= 1000000; i *= 10) { + for (int i = 100; i <= 1000000; i *= 10) { std::vector<int> js = {i * 1, i * 2, i * 5}; for (auto& j : js) { run(fn, j);
Add AD/LDAP setting updates * Add AD/LDAP setting updates Added AD/LDAP updates: user filter examples reminder for customers to enable ldap synchronization if they see the sync hanging * Update config-settings.rst Fixed grammar / inserted period.
@@ -1035,7 +1035,14 @@ Password of the user given in **Bind Username**. This field is required, and ano User Filter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -(Optional) Enter an AD/LDAP Filter to use when searching for user objects (accepts `general syntax <http://www.ldapexplorer.com/en/manual/109010000-ldap-filter-syntax.htm>`_). Only the users selected by the query will be able to access Mattermost. For Active Directory, the query to filter out disabled users is ``(&(objectCategory=Person)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))`` +(Optional) Enter an AD/LDAP Filter to use when searching for user objects (accepts `general syntax <http://www.ldapexplorer.com/en/manual/109010000-ldap-filter-syntax.htm>`_). Only the users selected by the query will be able to access Mattermost. + +Sample filters for Active Directory: + +- To filter out disabled users: ``(&(objectCategory=Person)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))`` +- To filter out by group membership, determine the distinguishedName of your group, then use the group membership general syntax format as your filter. + + * For example, if the security group distinguishedName is ``CN=group1,OU=groups,DC=example,DC=com``, then the user filter to use is: ``(memberOf=CN=group1,OU=groups,DC=example,DC=com)``. Note that the user must explicitly belong to this group for the filter to apply. This filter uses the permissions of the **Bind Username** account to execute the search. Administrators should make sure to use a specially created account for Bind Username with read-only access to the portion of the AD/LDAP tree specified in the **Base DN** field. @@ -1150,6 +1157,8 @@ This button causes AD/LDAP synchronization to occur as soon as it is pressed. Us You can monitor the status of the synchronization job in the table below this button. +Note: If synchronization **Status** displays as ``Pending`` and does not complete, make sure that the **Enable Synchronization with AD/LDAP** setting is set to ``true``. + .. figure:: ../images/ldap-sync-table.png ________
tooltips: Fix line height for non-English characters. Some non-English characters overflow when the line height is reduced for the tooltip text. This commit increases the line height of the tooltips to accommodate these non-English characters and fixes the hotkey hint margins for the same.
align-items: center; padding: 5px 10px; font-size: 14px; - line-height: 15px; + line-height: 20px; color: hsla(0, 0%, 100%, 1); } box-sizing: inherit; display: flex; align-self: flex-start; - margin: -2px -7px -2px 10px; + margin: 0 -5px 0 10px; gap: 4px; }
Updated comment section of "Training the Model" I believe "The decoder is given the ``<SOS>`` token as its first input, and the last hidden state of the encoder as its first hidden state." Minute correction on comment section.
@@ -538,7 +538,7 @@ def variablesFromPair(pair): # To train we run the input sentence through the encoder, and keep track # of every output and the latest hidden state. Then the decoder is given # the ``<SOS>`` token as its first input, and the last hidden state of the -# decoder as its first hidden state. +# encoder as its first hidden state. # # "Teacher forcing" is the concept of using the real target outputs as # each next input, instead of using the decoder's guess as the next input.
build appimage: rm importlib-metadata workaround The importlib-metadata pkg is no longer needed apparently (since we bumped the min python to 3.8).
@@ -215,12 +215,10 @@ rm -rf "$PYDIR"/site-packages/PyQt5/Qt.so # these are deleted as they were not deterministic; and are not needed anyway find "$APPDIR" -path '*/__pycache__*' -delete -# note that *.dist-info is needed by certain packages. -# e.g. see https://gitlab.com/python-devs/importlib_metadata/issues/71 -for f in "$PYDIR"/site-packages/importlib_metadata-*.dist-info; do mv "$f" "$(echo "$f" | sed s/\.dist-info/\.dist-info2/)"; done +# although note that *.dist-info might be needed by certain packages... +# e.g. importlib-metadata, see https://gitlab.com/python-devs/importlib_metadata/issues/71 rm -rf "$PYDIR"/site-packages/*.dist-info/ rm -rf "$PYDIR"/site-packages/*.egg-info/ -for f in "$PYDIR"/site-packages/importlib_metadata-*.dist-info2; do mv "$f" "$(echo "$f" | sed s/\.dist-info2/\.dist-info/)"; done find -exec touch -h -d '2000-11-11T11:11:11+00:00' {} +
[measurements] update 'LHCb KS->mumu 2019' to published version * Add inspire to 'LHCb KS->mumu 2019' measurement. * Use official result of BR(KS->mumu) by LHCb Previously, preliminary result was there, stated with one more significant digit.
@@ -7051,8 +7051,9 @@ PDG 2018 Kll: LHCb KS->mumu 2019: experiment: LHCb + inspire: Aaij:2020sbt values: - BR(KS->mumu): 0.94 +0.72 -0.64 e-10 + BR(KS->mumu): 0.9 +0.7 -0.6 e-10 ATLAS Bs->mumu 2018: experiment: ATLAS
Add CephClientConfigOverrides resource This patch adds a new resource for the CephClient service that specifies a set of default configs wanted by default in the [client] section of ceph.conf. Depends-On:
@@ -49,6 +49,15 @@ resources: value: vars: {} + CephClientConfigOverrides: + type: OS::Heat::Value + properties: + type: json + value: + vars: + client: + rbd_concurrent_management_ops: 20 + outputs: role_data: description: Role data for the Ceph Client service. @@ -73,6 +82,7 @@ outputs: - name: set ceph-ansible group vars clients set_fact: ceph_ansible_group_vars_clients: {get_attr: [CephClientAnsibleVars, value, vars]} + ceph_client_config_overrides: {get_attr: [CephClientConfigOverrides, value, vars]} - name: generate ceph-ansible group vars clients copy: dest: "{{playbook_dir}}/ceph-ansible/group_vars/clients.yml"
Consts for need_healing Followup for [1], this creates private module-level constants for the ``need_healing`` toggle used in the code, indicating whether the allocations need to be healed by creating them or by updating existing allocations. [1]
@@ -85,6 +85,10 @@ _EXTRA_DEFAULT_LOG_LEVELS = ['oslo_concurrency=INFO', 'oslo_db=INFO', 'oslo_policy=INFO'] +# Consts indicating whether allocations need to be healed by creating them or +# by updating existing allocations. +_CREATE = 'create' +_UPDATE = 'update' # Decorators for actions args = cmd_common.args @@ -2037,7 +2041,7 @@ class PlacementCommands(object): # there are no allocations for the instance if not allocations.get('allocations'): # This instance doesn't have allocations - need_healing = 'create' + need_healing = _CREATE allocations = self._heal_missing_alloc(ctxt, instance, node_cache) if (allocations.get('project_id') != instance.project_id or @@ -2047,7 +2051,7 @@ class PlacementCommands(object): # and re-put them. We don't use put_allocations here # because we don't want to mess up shared or nested # provider allocations. - need_healing = 'update' + need_healing = _UPDATE allocations = self._heal_missing_project_and_user_id( allocations, instance) @@ -2059,19 +2063,19 @@ class PlacementCommands(object): port_allocations, ports_to_update = {}, [] if port_allocations: - need_healing = need_healing or 'update' + need_healing = need_healing or _UPDATE # Merge in any missing port allocations allocations['allocations'] = self._merge_allocations( allocations['allocations'], port_allocations) if need_healing: if dry_run: - if need_healing == 'create': + if need_healing == _CREATE: output(_('[dry-run] Create allocations for instance ' '%(instance)s: %(allocations)s') % {'instance': instance.uuid, 'allocations': allocations}) - elif need_healing == 'update': + elif need_healing == _UPDATE: output(_('[dry-run] Update allocations for instance ' '%(instance)s: %(allocations)s') % {'instance': instance.uuid, @@ -2090,11 +2094,11 @@ class PlacementCommands(object): resp = placement.put_allocations(ctxt, instance.uuid, allocations) if resp: - if need_healing == 'create': + if need_healing == _CREATE: output(_('Successfully created allocations for ' 'instance %(instance)s.') % {'instance': instance.uuid}) - elif need_healing == 'update': + elif need_healing == _UPDATE: output(_('Successfully updated allocations for ' 'instance %(instance)s.') % {'instance': instance.uuid})
Made query/variables order reproducible. Python 3.5 inverted them
@@ -37,6 +37,7 @@ import copy import re import sys import os +from collections import OrderedDict if sys.version_info[0] < 3: from urllib import quote_plus else: @@ -165,7 +166,10 @@ class api_partinfo_kitspace(distributor_class): variables = '{{"input":{}}}'.format(variables) # Do the query using POST log_request(url, query_type, variables) - response = requests.post(url, {'query': query_type, 'variables': variables}) + data = OrderedDict() + data['query'] = query_type + data['variables'] = variables + response = requests.post(url, data) log_response(response.text) if response.status_code == requests.codes['ok']: # 200 results = json.loads(response.text)
fix remote combo HG-- branch : feature/microservices
@@ -22,7 +22,7 @@ Ext.define("NOC.core.LookupField", { query: {}, stateful: false, autoSelect: false, - pageSize: 25, + pageSize: true, listConfig: { minWidth: 240 }, @@ -30,13 +30,15 @@ Ext.define("NOC.core.LookupField", { restUrl: null, initComponent: function() { - var me = this, - p; + var me = this; + // Calculate restUrl - p = me.$className.split("."); - if(!me.restUrl && p[0] === "NOC" && p[3] === "LookupField") { - me.restUrl = "/" + p[1] + "/" + p[2] + "/lookup/" + if(!me.restUrl + && Ext.String.startsWith(me.$className, 'NOC.') + && Ext.String.endsWith(me.$className, 'LookupField')) { + me.restUrl = me.$className.replace('NOC', '').replace(/\./g, '/').replace('/LookupField', '/lookup/'); } + if(!me.restUrl) { throw "Cannot determine restUrl for " + me.$className; } @@ -44,6 +46,7 @@ Ext.define("NOC.core.LookupField", { Ext.apply(me, { store: Ext.create("Ext.data.Store", { fields: ["id", "label"], + pageSize: 25, proxy: { type: "rest", url: me.restUrl, @@ -66,6 +69,9 @@ Ext.define("NOC.core.LookupField", { if(me.query) { Ext.apply(me.store.proxy.extraParams, me.query); } + // Fix combobox with remote paging + me.pickerId = me.getId() + '_picker'; + // end me.callParent(); me.on("specialkey", me.onSpecialKey, me, {delay: 100}); me.on('change', function(element, newValue) {
Update investigations.spec.json Various changes for grammar, spelling, consistency, and flow.
"phantom": { "properties": { "phantom_server": { - "description": "IP address and username of the phantom server. Currently, we will ship this value as automation (hostname) and we encourage the users to modify those values according to their environment. Eg: automation (hostname)", + "description": "IP address and username of the Phantom server. Currently, we ship these values as automation (hostname) and encourage users to modify them according to their environments: for example, automation (hostname)", "type": "string" }, "playbook_name": { - "description": "Name of the playbook. This name should be the same as the name on phantom community repository on github with underscores and appended with community/<playbook_name>. The playbooks are hosted on https://github.com/phantomcyber/playbooks. Eg: community/simple_network_enrichment", + "description": "Name of the playbook. This name should be the same as the name on the Phantom community repository on github, with underscores, and appended with \"community/<playbook_name>.\" The playbooks are hosted on https://github.com/phantomcyber/playbooks. For example: community/simple_network_enrichment.", "type": "string" }, "playbook_url": { - "description": "Url of the playbook on Phantom website.", + "description": "URL of the playbook on the Phantom website", "type": "string" }, "schedule": { - "description": "Various fields to assist in scheduling the search", + "description": "Various fields used to assist in scheduling the search", "properties": { "cron_schedule": { "description": "Schedule of the search in cron format", "type": "string" }, "latest_time": { - "description": "The latest time tes search should run against in Splunk format", + "description": "The latest time the search should run again in Splunk format", "type": "string" } }, "type": "object" }, "sensitivity": { - "description": "TLP colors (White, Green, Amber or Red)", + "description": "TLP colors (White, Green, Amber, or Red)", "type": "string" }, "severity": { - "description": "Severity in phantom (High, Medium, Low)", + "description": "Severity in Phantom (High, Medium, Low)", "type": "string" } }, "type": "string" }, "latest_time": { - "description": "The latest time tes search should run against in Splunk format", + "description": "The latest time the search should run again in Splunk format", "type": "string" } }, "type": "object" }, "search": { - "description": "The search (in SPL) executed within core Splunk for investgation.", + "description": "The search (in SPL) executed within Splunk Enterprise for investigation", "type": "string" } }, "uniqueItems": true }, "how_to_implement": { - "description": "A discussion on how to implement this search, from what needs to be ingested, config files modified, and suggested per site modifications", + "description": "A discussion on how to implement this search, from what needs to be ingested, config files modified, and suggested per-site modifications", "type": "string" }, "id": { "type": "object" }, "known_false_positives": { - "description": "Scenarios in which detected behavior is benig, coupled with suggestions on how to verify the behavior", + "description": "Scenarios in which detected behavior is benign, coupled with suggestions on how to verify the behavior", "type": "string" }, "maintainers": { - "description": "An array of the current maintainers of the Analytic Story.", + "description": "An array of the current maintainers of the Analytic Story", "items": { "additionalProperties": false, "properties": { "type": "integer" }, "type": { - "description": "Type of product that will support this investigate object.", + "description": "Type of product that will support this investigate object", "enum": [ "phantom", "splunk",
[IMPR] use response in http.error_handling_callback() Remove some use of HttpRequest() inside http.py.
@@ -283,7 +283,7 @@ def get_authentication(uri: str) -> Optional[Tuple[str, str]]: return None -def error_handling_callback(request): +def error_handling_callback(response): """ Raise exceptions and log alerts. @@ -291,27 +291,30 @@ def error_handling_callback(request): @type request: L{threadedhttp.HttpRequest} """ # TODO: do some error correcting stuff - if isinstance(request.data, requests.exceptions.SSLError): - if SSL_CERT_VERIFY_FAILED_MSG in str(request.data): - raise FatalServerError(str(request.data)) + if isinstance(response, requests.exceptions.SSLError): + if SSL_CERT_VERIFY_FAILED_MSG in str(response): + raise FatalServerError(str(response)) - if request.status_code == 504: + if isinstance(response, Exception): + with suppress(Exception): + # request.data exception may contain response and request attribute + error('An error occurred for uri ' + response.request.url) + raise response from None + + if response.status_code == 504: raise Server504Error('Server {} timed out' - .format(urlparse(request.url).netloc)) + .format(urlparse(response.url).netloc)) - if request.status_code == 414: + if response.status_code == 414: raise Server414Error('Too long GET request') - if isinstance(request.data, Exception): - with suppress(Exception): - # request.data exception may contain response and request attribute - error('An error occurred for uri ' + request.data.request.url) - raise request.data from None + # TODO: shall it raise? this might break some code, TBC + # response.raise_for_status() # HTTP status 207 is also a success status for Webdav FINDPROP, # used by the version module. - if request.status_code not in (200, 207): - warning('Http response status {}'.format(request.status_code)) + if response.status_code not in (200, 207): + warning('Http response status {}'.format(response.status_code)) @deprecate_arg('callback', True) @@ -409,11 +412,12 @@ def fetch(uri, method='GET', params=None, body=None, headers=None, **kwargs) except Exception as e: request.data = e + response = e else: request.data = response for callback in callbacks: - callback(request) + callback(response) # if there's no data in the answer we're in trouble try:
Transfers: compress multihop if it passes through a source. Closes This may happen if source_replica_expression forces a particular source RSE, but transfer to destination from the forced source ends being a multi-hop via another source.
@@ -1192,27 +1192,35 @@ def get_dsn(scope, name, dsn): return 'other' -def __filter_unwanted_paths(candidate_paths: "Iterable[List[DirectTransferDefinition]]") -> "Generator[List[DirectTransferDefinition]]": - +def __filter_multihops_with_intermediate_tape(candidate_paths: "Iterable[List[DirectTransferDefinition]]") -> "Generator[List[DirectTransferDefinition]]": # Discard multihop transfers which contain a tape source as an intermediate hop - filtered_candidate_paths = [] for path in candidate_paths: if any(transfer.src.rse.is_tape_or_staging_required() for transfer in path[1:]): - continue - filtered_candidate_paths.append(path) - candidate_paths = filtered_candidate_paths + pass + else: + yield path - # Discard multihop transfers which contain other candidate as part of itself For example: - # if A->B->C and B->C are both candidates, discard A->B->C because it includes B->C. Doing B->C is enough. - source_rses = {path[0].src.rse.id for path in candidate_paths} - filtered_candidate_paths = [] + +def __compress_multihops( + candidate_paths: "Iterable[List[DirectTransferDefinition]]", + sources: "Iterable[TransferSource]", +) -> "Generator[List[DirectTransferDefinition]]": + # Compress multihop transfers which contain other sources as part of itself. + # For example: multihop A->B->C and B is a source, compress A->B->C into B->C + source_rses = {s.rse.id for s in sources} + seen_source_rses = set() for path in candidate_paths: - if any(hop.src.rse.id in source_rses for hop in path[1:]): - continue - filtered_candidate_paths.append(path) - candidate_paths = filtered_candidate_paths + if len(path) > 1: + # find the index of the first hop starting from the end which is also a source. Path[0] will always be a source. + last_source_idx = next((idx for idx, hop in reversed(list(enumerate(path))) if hop.src.rse.id in source_rses), (0, None)) + if last_source_idx > 0: + path = path[last_source_idx:] - yield from candidate_paths + # Deduplicate paths from same source + src_rse_id = path[0].src.rse.id + if src_rse_id not in seen_source_rses: + seen_source_rses.add(src_rse_id) + yield path def __sort_paths(candidate_paths: "Iterable[List[DirectTransferDefinition]]") -> "Generator[List[DirectTransferDefinition]]": @@ -1488,7 +1496,8 @@ def __build_transfer_paths( if len(filtered_sources) != len(candidate_paths): logger(logging.DEBUG, 'Sources after path computation for %s: %s', rws, [str(path[0].src.rse) for path in candidate_paths]) - candidate_paths = __filter_unwanted_paths(candidate_paths) + candidate_paths = __filter_multihops_with_intermediate_tape(candidate_paths) + candidate_paths = __compress_multihops(candidate_paths, rws.sources) candidate_paths = list(__sort_paths(candidate_paths)) if not candidate_paths:
Fix monkeypatch doc `delenv` is incorrectly documented.
@@ -22,7 +22,7 @@ def monkeypatch(): monkeypatch.setitem(mapping, name, value) monkeypatch.delitem(obj, name, raising=True) monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, value, raising=True) + monkeypatch.delenv(name, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path)
update descriptions of master categories to add How to cost saving guide to Cost saving NHS Low Priority NHS Low Priority Consultation
}, "cost": { "name": "Cost Saving", - "description": "The following measures and an accumulation of all openprescribing.net measures where changes to prescribing in this area will result in cost savings." + "description": "The following measures and an accumulation of all openprescribing.net measures where changes to prescribing in this area will result in cost savings. For more information on how OpenPrescribing can support identification of cost-saving opportunities please consult our _How to_ guide <a href=\https://ebmdatalab.net/qipp-planning-how-to/\">here</a>" }, "diabetes": { "name": "Diabetes", "description": [ "This is a list of products ", "considered a low priority for NHS Funding ", - "<a href=\"https://www.england.nhs.uk/2017/07/medicine-consultation/\">by NHS England</a>." + "<a href=\"https://www.england.nhs.uk/2017/07/medicine-consultation/\">by NHS England</a>. For more information on how OpenPrescribing can support identification of cost-saving opportunities please consult our _How to_ guide <a href=\https://ebmdatalab.net/qipp-planning-how-to/\">here</a" ] }, "lowpriorityconsultation": { "This is an <strong>experimental</strong> list of products ", "which NHS England ", "<a href=\"https://www.engage.england.nhs.uk/consultation/items-routinely-prescribed-update/\">", - "have proposed</a> to add to their Low Priority list." + "have proposed</a> to add to their Low Priority list. For more information on how OpenPrescribing can support identification of cost-saving opportunities please consult our _How to_ guide <a href=\https://ebmdatalab.net/qipp-planning-how-to/\">here</a" ] }, "mentalhealth": {
[IMPR] shorten lang['prefix'] to key detached from
@@ -108,10 +108,11 @@ class FamilyFileGenerator(object): """Load other language pages.""" print('Loading wikis... ') for lang in self.langs: - print(' * %s... ' % (lang['prefix']), end='') - if lang['prefix'] not in self.wikis: + key = lang['prefix'] + print(' * {}... '.format(key), end='') + if key not in self.wikis: try: - self.wikis[lang['prefix']] = self.Wiki(lang['url']) + self.wikis[key] = self.Wiki(lang['url']) print('downloaded') except Exception as e: print(e)
[Test] fix npy2apintstream tests by including cstddef may be a Vivado HLS version-dependent problem...
@@ -32,6 +32,7 @@ def make_npy2apintstream_testcase(ndarray, dtype): npy_type = npyt_to_ct[str(ndarray.dtype)] shape_cpp_str = str(shape).replace("(", "{").replace(")", "}") test_app_string = [] + test_app_string += ["#include <cstddef>"] test_app_string += ['#include "ap_int.h"'] test_app_string += ['#include "stdint.h"'] test_app_string += ['#include "hls_stream.h"']
[Qt 5.5+] Debug feature: improvements and bug fixes * Check if inspector exists before opening * Make inspector_port a class attribute Chrome Remote debugger supports only one debug server per app.
@@ -65,6 +65,7 @@ if _import_error: class BrowserView(QMainWindow): instances = {} + inspector_port = None # The localhost port at which the Remote debugger listens create_window_trigger = QtCore.pyqtSignal(object) set_title_trigger = QtCore.pyqtSignal(str) @@ -98,7 +99,6 @@ class BrowserView(QMainWindow): class WebView(QWebView): def __init__(self): super(BrowserView.WebView, self).__init__() - self.inspector_port = None # The localhost port at which the Remote debugger listens def contextMenuEvent(self, event): menu = self.page().createStandardContextMenu() @@ -119,9 +119,14 @@ class BrowserView(QMainWindow): # Create a new webview window pointing at the Remote debugger server def show_inspector(self): - title = 'Web Inspector - {}'.format(self.parent().title) uid = self.parent().uid + '-inspector' - url = 'http://localhost:{}'.format(self.inspector_port) + try: + # If inspector already exists, bring it to the front + BrowserView.instances[uid].raise_() + BrowserView.instances[uid].activateWindow() + except KeyError: + title = 'Web Inspector - {}'.format(self.parent().title) + url = 'http://localhost:{}'.format(BrowserView.inspector_port) inspector = BrowserView(uid, title, url, 700, 500, True, False, (300,200), False, '#fff', False, None, self.parent().webview_ready) @@ -169,8 +174,10 @@ class BrowserView(QMainWindow): self.view = BrowserView.WebView() if debug and _qt_version > [5, 5]: - self.view.inspector_port = BrowserView._get_free_port() - os.environ['QTWEBENGINE_REMOTE_DEBUGGING'] = self.view.inspector_port + # Initialise Remote debugging (need to be done only once) + if not BrowserView.inspector_port: + BrowserView.inspector_port = BrowserView._get_free_port() + os.environ['QTWEBENGINE_REMOTE_DEBUGGING'] = BrowserView.inspector_port else: self.view.setContextMenuPolicy(QtCore.Qt.NoContextMenu) # disable right click context menu
Update instructions.md the paragraph in the third part was why too dense, made the hint in a new line and made the "hint" bold for coherence with other exercises
@@ -66,7 +66,8 @@ Players try to get as close as possible to a score of 21, without going _over_ 2 Define the `value_of_ace(<card_one>, <card_two>)` function with parameters `card_one` and `card_two`, which are a pair of cards already in the hand _before_ getting an ace card. Your function will have to decide if the upcoming ace will get a value of 1 or a value of 11, and return that value. Remember: the value of the hand with the ace needs to be as high as possible _without_ going over 21. -Hint: if we already have an ace in hand then it's value would be 11. + +**Hint**: if we already have an ace in hand then it's value would be 11. ```python >>> value_of_ace('6', `K`)
Remove AggAwc reference I found this confusing because this table is not referenced in the management command and is only used because it happens to be in the same DB.
@@ -2,6 +2,7 @@ import inspect from datetime import datetime from django.core.management.base import BaseCommand, CommandError +from django.db import connections import dateutil @@ -13,8 +14,8 @@ from corehq.apps.hqadmin.management.commands.stale_data_in_es import ( ) from corehq.apps.userreports.util import get_table_name from corehq.form_processor.utils import should_use_sql_backend +from corehq.sql_db.connections import get_icds_ucr_citus_db_alias from corehq.sql_db.util import get_db_aliases_for_partitioned_query -from custom.icds_reports.models.aggregate import AggAwc, get_cursor class Command(BaseCommand): @@ -71,7 +72,7 @@ def _get_stale_data(run_config): def _get_ucr_insertion_dates(domain, case_ids): table_name = get_table_name(domain, 'static-household_cases') - with get_cursor(AggAwc) as cursor: + with connections[get_icds_ucr_citus_db_alias()].cursor() as cursor: query = f''' SELECT doc_id,
Update PagerDuty.yml Updated the description.
@@ -117,16 +117,16 @@ script: required: false secret: false - default: false - description: Filters the results, showing only on-calls for the specified escalation - policy IDs + description: Filters the results, showing only on-call users for the specified escalation + policy IDs. isArray: true name: escalation_policy_ids required: false secret: false - default: false - description: Filters the results, showing only on-calls for the specified schedule - IDs. If null is provided, it includes permanent on-calls due to direct user - escalation targets + description: Filters the results, showing only on-call users for the specified schedule + IDs. If the value is null, permanent on-call user are included due to direct user + escalation policy targets. isArray: true name: schedule_ids required: false
comments: add comments on runtime ordering per language re-order runtimes per comment
@@ -13,10 +13,12 @@ except ImportError: _init_path = str(pathlib.Path(os.path.dirname(__file__)).parent) _templates = os.path.join(_init_path, 'init', 'templates') + +# Note(TheSriram): The ordering of the runtimes list per language is based on the latest to oldest. RUNTIME_DEP_TEMPLATE_MAPPING = { "python": [ { - "runtimes": ["python2.7", "python3.6", "python3.7"], + "runtimes": ["python3.7", "python3.6", "python2.7"], "dependency_manager": "pip", "init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-python"), "build": True @@ -46,7 +48,7 @@ RUNTIME_DEP_TEMPLATE_MAPPING = { ], "dotnet": [ { - "runtimes": ["dotnetcore", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1"], + "runtimes": ["dotnetcore2.1", "dotnetcore2.0", "dotnetcore1.0", "dotnetcore"], "dependency_manager": "cli-package", "init_location": os.path.join(_templates, "cookiecutter-aws-sam-hello-dotnet"), "build": True
openapi: Fix reference link in `/register-queue` documentation. Fixes a reference link in `realm_enable_spectator_access` description in `/register-queue` endpoint documentation.
@@ -11165,6 +11165,9 @@ paths: setting. **Changes**: New in Zulip 5.0 (feature level 109). + + [server-settings]: https://zulip.readthedocs.io/en/stable/production/settings.html + realm_video_chat_provider: type: integer description: |
Ensure behavior of None for device_id A simple unit test to ensure that we have had the same behavior across releases WRT to explicitly passing None for device_id. Related-Bug:
@@ -947,6 +947,21 @@ class TestPortsV2(NeutronDbPluginV2TestCase): self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) + def test_create_port_None_values(self): + with self.network() as network: + keys = ['device_owner', 'name', 'device_id'] + for key in keys: + # test with each as None and rest as '' + kwargs = {k: '' for k in keys} + kwargs[key] = None + self._create_port(self.fmt, + network['network']['id'], + webob.exc.HTTPClientError.code, + tenant_id='tenant_id', + fixed_ips=[], + set_context=False, + **kwargs) + def test_create_port_public_network_with_ip(self): with self.network(shared=True) as network: ip_net = netaddr.IPNetwork('10.0.0.0/24')
adjust method to ignore some special charactes and update regex on find_label_element method. Insert words on language.py
@@ -630,6 +630,8 @@ class WebappInternal(Base): >>> self.input_value("A1_COD", "000001") """ + field = re.sub(r"(\:*)(\?*)", "", field).strip() + self.wait_element(field) success = False endtime = time.time() + 60 @@ -639,7 +641,13 @@ class WebappInternal(Base): print(f"Looking for element: {field}") + if field.lower() == self.language.From.lower(): + element = self.get_field("cDeCond", name_attr=True) + elif field.lower() == self.language.To.lower(): + element = self.get_field("cAteCond", name_attr=True) + else: element = self.get_field(field) + if not element: continue @@ -976,6 +984,7 @@ class WebappInternal(Base): >>> #Elements with class "my_class" and text "my_text" >>> elements = self.web_scrap(term="my_text", scrap_type=ScrapType.MIXED, optional_term=".my_class") """ + try: endtime = time.time() + 60 container = None @@ -2817,7 +2826,7 @@ class WebappInternal(Base): >>> self.find_label_element("User:", container_object) """ - element = next(iter(list(map(lambda x: self.find_first_div_parent(x), container.find_all(text=re.compile(f"^{re.escape(label_text)}" + r"(\*?)(\s*?)$"))))), None) + element = next(iter(list(map(lambda x: self.find_first_div_parent(x), container.find_all(text=re.compile(f"^{re.escape(label_text)}" + r"(\s*)?([\*\?]{1})?(\s*)?$"))))), None) if element is None: return []
Fix bi_id rollback HG-- branch : feature/microservices
@@ -5,15 +5,25 @@ from django.db import models class Migration: def forwards(self): - db.execute("ALTER TABLE sa_managedobject ALTER COLUMN bi_id TYPE int") - db.execute("ALTER TABLE sa_administrativedomain ALTER COLUMN bi_id TYPE int") - db.execute("ALTER TABLE sa_authprofile ALTER COLUMN bi_id TYPE int") - db.execute("ALTER TABLE sa_terminationgroup ALTER COLUMN bi_id TYPE int") - db.execute("ALTER TABLE sa_managedobjectprofile ALTER COLUMN bi_id TYPE int") + db.execute("UPDATE sa_managedobject SET bi_id=NULL") + db.execute("ALTER TABLE sa_managedobject ALTER COLUMN bi_id TYPE bigint") + db.execute("UPDATE sa_administrativedomain SET bi_id=NULL") + db.execute("ALTER TABLE sa_administrativedomain ALTER COLUMN bi_id TYPE bigint") + db.execute("UPDATE sa_authprofile SET bi_id=NULL") + db.execute("ALTER TABLE sa_authprofile ALTER COLUMN bi_id TYPE bigint") + db.execute("UPDATE sa_terminationgroup SET bi_id=NULL") + db.execute("ALTER TABLE sa_terminationgroup ALTER COLUMN bi_id TYPE bigint") + db.execute("UPDATE sa_managedobjectprofile SET bi_id=NULL") + db.execute("ALTER TABLE sa_managedobjectprofile ALTER COLUMN bi_id TYPE bigint") def backwards(self): + db.execute("UPDATE sa_managedobject SET bi_id=NULL") db.execute("ALTER TABLE sa_managedobject ALTER COLUMN bi_id TYPE decimal(20,0)") + db.execute("UPDATE sa_administrativedomain SET bi_id=NULL") db.execute("ALTER TABLE sa_administrativedomain ALTER COLUMN bi_id TYPE decimal(20,0)") + db.execute("UPDATE sa_authprofile SET bi_id=NULL") db.execute("ALTER TABLE sa_authprofile ALTER COLUMN bi_id TYPE decimal(20,0)") + db.execute("UPDATE sa_terminationgroup SET bi_id=NULL") db.execute("ALTER TABLE sa_terminationgroup ALTER COLUMN bi_id TYPE decimal(20,0)") + db.execute("UPDATE sa_managedobjectprofile SET bi_id=NULL") db.execute("ALTER TABLE sa_managedobjectprofile ALTER COLUMN bi_id TYPE decimal(20,0)")
Add test for coverage Tests creating a property with both required and default.
@@ -4,7 +4,7 @@ import pytest import pytz import stix2 -from stix2.exceptions import ExtraPropertiesError +from stix2.exceptions import ExtraPropertiesError, STIXError from stix2.properties import ( BinaryProperty, BooleanProperty, EmbeddedObjectProperty, EnumProperty, FloatProperty, HexProperty, IntegerProperty, ListProperty, Property, @@ -47,7 +47,7 @@ def test_property_default(): assert p.default() == 77 -def test_fixed_property(): +def test_property_fixed(): p = Property(fixed="2.0") assert p.clean("2.0") @@ -60,6 +60,11 @@ def test_fixed_property(): assert p.clean(p.default()) +def test_property_fixed_and_required(): + with pytest.raises(STIXError): + Property(default=lambda: 3, required=True) + + def test_list_property(): p = ListProperty(StringProperty)
[NixIO] Path->obj map for fast nix obj retrieval The _get_object_at function is called frequently and with the same arguments that it creates considerable overhead on both read and write.
@@ -117,6 +117,7 @@ class NixIO(BaseIO): self._lazy_loaded = list() self._object_hashes = dict() self._block_read_counter = 0 + self._path_map = dict() def __enter__(self): return self @@ -819,6 +820,8 @@ class NixIO(BaseIO): :param path: Path string :return: The object at the location defined by the path """ + if path in self._path_map: + return self._path_map[path] if path in ("", "/"): return self.nix_file parts = path.split("/") @@ -840,6 +843,7 @@ class NixIO(BaseIO): break else: obj = parent_container[objname] + self._path_map[path] = obj return obj def _get_parent(self, path):
Fix clang-format Summary: Pull Request resolved:
@@ -423,7 +423,8 @@ void testCudaOneBlockMultiThreadGlobalReduce1() { // for t in 0..1024: // thread-idx // if t < 1: // b[0] = 0 - ExprHandle cond_t_lt_1 = CompareSelect::make(t, 1, CompareSelectOperation::kLT); + ExprHandle cond_t_lt_1 = + CompareSelect::make(t, 1, CompareSelectOperation::kLT); Cond* masked_init_b = Cond::make(cond_t_lt_1, init_store, nullptr); LoopOptions thread_idx_options; thread_idx_options.set_gpu_thread_index(0); @@ -458,8 +459,7 @@ void testCudaOneBlockMultiThreadGlobalReduce1() { float* a_dev = nullptr; cudaMalloc(&a_dev, N * sizeof(float)); - cudaMemcpy( - a_dev, a_v.data(), N * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(a_dev, a_v.data(), N * sizeof(float), cudaMemcpyHostToDevice); float* b_dev = nullptr; cudaMalloc(&b_dev, 1 * sizeof(float)); cudaDeviceSynchronize(); @@ -467,8 +467,7 @@ void testCudaOneBlockMultiThreadGlobalReduce1() { cuda_cg(a_dev, b_dev); cudaDeviceSynchronize(); - cudaMemcpy( - b_v.data(), b_dev, 1 * sizeof(float), cudaMemcpyDeviceToHost); + cudaMemcpy(b_v.data(), b_dev, 1 * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); ExpectAllNear(b_v, b_ref, 1e-5);
Minor updates to development page Change Foxtail to Sycamore since Foxtail is deprecated. Add some clarification that instructions are for linux.
@@ -28,12 +28,12 @@ Note that if you are using PyCharm, you might have to Restart & Invalidate Cache ```bash docker build -t cirq --target cirq_stable . - docker run -it cirq python -c "import cirq_google; print(cirq_google.Foxtail)" + docker run -it cirq python -c "import cirq_google; print(cirq_google.Sycamore23)" ``` ```bash docker build -t cirq_pre --target cirq_pre_release . - docker run -it cirq_pre python -c "import cirq_google; print(cirq_google.Foxtail)" + docker run -it cirq_pre python -c "import cirq_google; print(cirq_google.Sycamore23)" ``` If you want to contribute changes to Cirq, you will instead want to fork the repository and submit pull requests from your fork. @@ -85,6 +85,9 @@ At this point your local git master should be synced with the master from the ma ## Setting up an environment +These instructions are primarily for linux-based environments that use the apt +package manager. + 0. First clone the repository, if you have not already done so. See the previous section for instructions.
Fix auth in actions if there is no auth_uri in context Closes-Bug:
@@ -31,7 +31,7 @@ CONF = cfg.CONF def client(): ctx = context.ctx() - auth_url = ctx.auth_uri + auth_url = ctx.auth_uri or CONF.keystone_authtoken.auth_uri cl = ks_client.Client( user_id=ctx.user_id,
Tip for recomputing metadata note for recomputing metadata
@@ -100,6 +100,13 @@ To ensure a dataset is complete, [`load_dataset`] will perform a series of tests - The number of samples in each split of the generated `DatasetDict`. If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files. + +<Tip> + +If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata. + +</Tip> + In this case, an error is raised to alert that the dataset has changed. To ignore the error, one needs to specify `ignore_verifications=True` in [`load_dataset`]. Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated.
Keep the front end consistent by storing in arrays but displaying strings This frontend will be redone eventually, but since this is just an API change and not changing the frontend just yet, keep things as they were
@@ -59,6 +59,9 @@ var socialRules = { var cleanByRule = function(rule) { return function(value) { + if (typeof(value) === 'object') { + value = value[0]; + } var match = value.match(rule); if (match) { return match[1]; @@ -741,6 +744,17 @@ $.extend(SocialViewModel.prototype, SerializeMixin.prototype, TrackedMixin.proto SocialViewModel.prototype.serialize = function() { var serializedData = ko.toJS(this); + // Do some extra work to store these as arrays in the DB + var arrayInDBKeys = ['twitter', 'linkedIn', 'github']; + $.each(serializedData || {}, function(key, value) { + if ($.inArray(key, arrayInDBKeys) !== -1) { + if (value === '') { + serializedData[key] = []; + } else { + serializedData[key] = [value]; + } + } + }); var profileWebsites = serializedData.profileWebsites; serializedData.profileWebsites = profileWebsites.filter( function (value) { @@ -753,11 +767,15 @@ SocialViewModel.prototype.serialize = function() { SocialViewModel.prototype.unserialize = function(data) { var self = this; var websiteValue = []; + // Do some extra work to display these as strings for now + var arrayInDBKeys = ['twitter', 'linkedIn', 'github']; $.each(data || {}, function(key, value) { if (key === 'profileWebsites') { value = value.map(function(website) { return $osf.decodeText(website); }); + } else if ($.inArray(key, arrayInDBKeys) !== -1) { + value = $osf.decodeText(value[0]); } else { value = $osf.decodeText(value); }
DOC: changelog summary for DEMETER bugfix Added a summary of the changes into the CHANGELOG.
@@ -20,6 +20,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - Fixed implementation of utils routines in model_utils and jro_isr - Fixed error catching bug in model_utils - Fixed error introduced by upstream change in NOAA F10.7 file format + - Fixed bugs in DEMETER file reading introduced by changes in codecs ## [2.0.0] - 2019-07-11 - New Features
properly close transaction overlay after adding an transaction Close
import e from './events'; import { $, $$, handleJSON } from './helpers'; import { initInput, addPostingRow, addMetadataRow, entryFormToJSON } from './entry-forms'; +import { closeOverlay } from './overlays'; function submitTransactionForm(form, successCallback) { const jsonData = { @@ -55,9 +56,7 @@ e.on('page-init', () => { form.querySelector('#transaction-form-submit').addEventListener('click', (event) => { event.preventDefault(); - submitTransactionForm(form, () => { - $('#transaction').classList.remove('shown'); - }); + submitTransactionForm(form, closeOverlay); }); form.querySelector('#transaction-form-submit-and-new').addEventListener('click', (event) => {
fix(www): fix rate limit The API rate limit is rather high to allow the test suite to run. Technical debt is tracked in
@@ -35,10 +35,10 @@ http { # rate limits are exceeded when the 'leaky bucket' is full # set up one bucket per remote ip for general purpose - limit_req_zone $binary_remote_addr zone=perip-general:100m rate=100000r/s; + limit_req_zone $binary_remote_addr zone=perip-general:100m rate=30r/s; # set up one bucket per remote ip for (costly) API access - limit_req_zone $binary_remote_addr zone=perip-api:100m rate=300000r/m; + limit_req_zone $binary_remote_addr zone=perip-api:100m rate=30r/s; # If limit_req directives are defined here, they apply to all servers that don't have their own ones #
Fix test_mdnrnn Summary: Pull Request resolved: CircleCI and internal results are different even when we set seeds. Either result makes sense.
@@ -393,7 +393,7 @@ class TestWorldModel(HorizonTestBase): config_path=os.path.join(curr_dir, config_path), use_gpu=False, ) - TestWorldModel.verify_result(feature_importance, ["state3"]) + TestWorldModel.verify_result(feature_importance, ["state1", "state3"]) TestWorldModel.verify_result(feature_sensitivity, ["state3"]) logger.info("MDNRNN feature test passes!")
Deseasonify: add helper func to resolve current month See docstring for details.
@@ -31,6 +31,20 @@ class InMonthCheckFailure(CheckFailure): pass +def _resolve_current_month() -> Month: + """ + Helper for local decorators to determine the correct Month value. + + This interfaces with the `MONTH_OVERRIDE` env var. If tha variable was set, + current month always resolves to this value. Otherwise, the current utc month + is given. + """ + if Client.month_override is not None: + return Month(Client.month_override) + else: + return Month(datetime.utcnow().month) + + def seasonal_task(*allowed_months: Month, sleep_time: t.Union[float, int] = ONE_DAY) -> t.Callable: """ Perform the decorated method periodically in `allowed_months`. @@ -50,7 +64,7 @@ def seasonal_task(*allowed_months: Month, sleep_time: t.Union[float, int] = ONE_ log.info(f"Starting seasonal task {task_body.__qualname__} ({allowed_months})") while True: - current_month = Month(datetime.utcnow().month) + current_month = _resolve_current_month() if current_month in allowed_months: await task_body(*args, **kwargs) @@ -72,7 +86,7 @@ def in_month_listener(*allowed_months: Month) -> t.Callable: @functools.wraps(listener) async def guarded_listener(*args, **kwargs) -> None: """Wrapped listener will abort if not in allowed month.""" - current_month = Month(datetime.utcnow().month) + current_month = _resolve_current_month() if current_month in allowed_months: # Propagate return value although it should always be None @@ -90,7 +104,7 @@ def in_month_command(*allowed_months: Month) -> t.Callable: Uses the current UTC month at the time of running the predicate. """ async def predicate(ctx: Context) -> bool: - current_month = datetime.utcnow().month + current_month = _resolve_current_month() can_run = current_month in allowed_months human_months = ", ".join(m.name for m in allowed_months)
STY: changed display style Changed the display style after pysat meeting by: adding flags for displaying or omitting platform/name and inst_module values, and adding a visual seperator around the tag/inst_id.
@@ -650,7 +650,8 @@ def available_instruments(inst_loc=None): return inst_info -def display_available_instruments(inst_loc=None): +def display_available_instruments(inst_loc=None, show_inst_mod=None, + show_platform_name=None): """Display basic information about instruments in a given subpackage. Parameters @@ -658,17 +659,44 @@ def display_available_instruments(inst_loc=None): inst_loc : python subpackage or NoneType The location of the instrument subpackage (e.g., pysat.instruments) or None to list all registered instruments (default=None) + show_inst_mod : boolean or NoneType + Displays the instrument module if True, does not include it if False, + and reverts to standard display based on inst_loc type if None. + (default=None) + show_platform_name : boolean or NoneType + Displays the platform and name if True, does not include it if False, + and reverts to standard display based on inst_loc type if None. + (default=None) Note ---- - Prints to standard out, a user-friendly interface for availabe_instruments + Prints to standard out, a user-friendly interface for availabe_instruments. + Defaults to including the instrument module and not the platform/name values + if inst_loc is an instrument module and to including the platform/name + values and not the instrument module if inst_loc is None (listing the + registered instruments). """ inst_info = available_instruments(inst_loc) - print("Platform Name Instrument_Module Tag Inst_ID Description") - print("-" * 80) + if show_platform_name is None and inst_loc is None: + show_platform_name = True + + if show_inst_mod is None and inst_loc is not None: + show_inst_mod = True + + if show_platform_name: + header = "Platform Name " + else: + header = "" + + if show_inst_mod: + header = "{:s}Instrument_Module".format(header) + + header = "{:s} [Tag Inst_ID] Description".format(header) + print(header) + print("-" * len(header)) for platform in inst_info.keys(): for name in inst_info[platform].keys(): mod_str = "" @@ -676,13 +704,18 @@ def display_available_instruments(inst_loc=None): for tag in inst_info[platform][name][ 'inst_ids_tags'][inst_id].keys(): if len(mod_str) == 0: - mod_str = " ".join([ - platform.__repr__(), name.__repr__(), - inst_info[platform][name]['inst_module']]) + if show_platform_name: + mod_str = "".join([platform.__repr__(), " ", + name.__repr__(), " "]) + if show_inst_mod: + mod_str = "{:s}{:s}".format( + mod_str, + inst_info[platform][name]['inst_module']) else: mod_str = " " * len(mod_str) - print(" ".join([mod_str, inst_id.__repr__(), tag.__repr__(), + print("".join([mod_str, " [", tag.__repr__(), " ", + inst_id.__repr__(), "] ", inst_info[platform][name]['inst_ids_tags'][ inst_id][tag]])) return
F.cross_entropy(y_hat, y)(y_hat, y) typo. This seems to be a typo. Throws TypeError: 'Tensor' object is not callable.
@@ -50,13 +50,13 @@ class CoolModel(pl.LightningModule): # REQUIRED x, y = batch y_hat = self.forward(x) - return {'loss': F.cross_entropy(y_hat, y)(y_hat, y)} + return {'loss': F.cross_entropy(y_hat, y)} def validation_step(self, batch, batch_nb): # OPTIONAL x, y = batch y_hat = self.forward(x) - return {'val_loss': F.cross_entropy(y_hat, y)(y_hat, y)} + return {'val_loss': F.cross_entropy(y_hat, y)} def validation_end(self, outputs): # OPTIONAL
Fixed format string mistake in Error Message While debugging a issue with creating a Kivy exe using PyInstaller I found this bug where it adds the 's' to file names in the error message.
@@ -245,7 +245,7 @@ class LabelBase(object): font = resource_find(font_type) if font is None: - raise IOError('File {0}s not found'.format(font_type)) + raise IOError('File {0} not found'.format(font_type)) else: fonts.append(font) else:
C API: add exceptions wrapping in destroy_text TN:
@@ -996,6 +996,9 @@ package body ${ada_lib_name}.Analysis.C is end Wrap; procedure ${capi.get_name('destroy_text')} (T : ${text_type}_Ptr) is + begin + Clear_Last_Exception; + declare use System; begin if T.Is_Allocated /= 0 and then T.Chars /= System.Null_Address then @@ -1009,6 +1012,10 @@ package body ${ada_lib_name}.Analysis.C is T.Chars := System.Null_Address; end if; end; + exception + when Exc : others => + Set_Last_Exception (Exc); + end; % if ctx.default_unit_provider: function ${capi.get_name('create_unit_provider')}
Fix variable interpolation by fixing bad line break Fixes
<p> {{ entity_type_human}}s are ordered by mean percentile over the past six - months. Each chart shows the results for the individual {{ - entity_type_human}}, plus deciles across all {{ entity_type_human}}s in the + months. Each chart shows the results for the individual {{ entity_type_human}}, + plus deciles across all {{ entity_type_human }}s in the NHS in England. </p>
Release: For Python3 bytecode compilation errors of inline scons copies. * There will be errors for compiling the Python2 only version of scons that is included.
%global python3_sitearch %(%{__python3} -c "import sys, distutils.sysconfig; sys.stdout.write(distutils.sysconfig.get_python_lib(0))") +%global _python_bytecompile_errors_terminate_build 0 + Name: nuitka Version: VERSION Release: 5%{?dist}
salt-api no longer forces the default timeout Conflicts: - salt/config/__init__.py
@@ -3315,15 +3315,12 @@ def api_config(path): Read in the Salt Master config file and add additional configs that need to be stubbed out for salt-api ''' - # Let's grab a copy of salt's master opts - opts = client_config(path, defaults=DEFAULT_MASTER_OPTS) - # Let's override them with salt-api's required defaults - api_opts = DEFAULT_API_OPTS - api_opts.update({ - 'pidfile': opts.get('api_pidfile', DEFAULT_API_OPTS['api_pidfile']), - 'log_file': opts.get('api_logfile', DEFAULT_API_OPTS['api_logfile']), - }) - opts.update(api_opts) + # Let's grab a copy of salt-api's required defaults + opts = DEFAULT_API_OPTS + + # Let's override them with salt's master opts + opts.update(client_config(path, defaults=DEFAULT_MASTER_OPTS)) + prepend_root_dir(opts, [ 'api_pidfile', 'api_logfile',
Fix bug Add Issue tracking to autocomplete list of External Resources Also: Remove unused entry Language pack
<option value="Development build"> <option value="Production build"> <option value="Screenshots"> - <option value="Language pack"> + <option value="Issue tracking"> </datalist> <h3>In-context localization <span class="small stress">(optional)</span></h3>
Install a handler for SIGHUB We were not handling SIGHUB previously so resources were not freed up when the terminal was killed/closed. Closes
@@ -8,6 +8,7 @@ with the agents. import atexit import os import random +import signal import subprocess import sys @@ -137,7 +138,15 @@ class HolodeckEnvironment: self._initial_reset = False self.reset() + # System event handlers for graceful exit. We may only need to handle + # SIGHUB, but I'm being a little paranoid + signal.signal(signal.SIGHUP, self.graceful_exit) + signal.signal(signal.SIGTERM, self.graceful_exit) + signal.signal(signal.SIGINT, self.graceful_exit) + def clean_up_resources(self): + """ Frees up references to mapped memory files. + """ self._command_center.clean_up_resources() if hasattr(self, "_reset_ptr"): del self._reset_ptr @@ -145,6 +154,12 @@ class HolodeckEnvironment: self.agents[key].clean_up_resources() del self.agents[key] + def graceful_exit(self, signum, frame): + """ Signal handler to gracefully exit the script + """ + self.__on_exit__() + sys.exit() + @property def action_space(self): """Gives the action space for the main agent.
Update Notification schema to preserve template_history attribute `Notification.template_history` relationship has been removed but we want to keep the `template_history` key in existing notification serializations, so we serialize it from `Notifications.template`. This keeps the data format the same, but both `template` and `template_history` keys will now contain data from the `TemplateHistory` instance.
@@ -450,7 +450,7 @@ class NotificationWithTemplateSchema(BaseSchema): class NotificationWithPersonalisationSchema(NotificationWithTemplateSchema): - template_history = fields.Nested(TemplateHistorySchema, + template_history = fields.Nested(TemplateHistorySchema, attribute="template", only=['id', 'name', 'template_type', 'content', 'subject', 'version'], dump_only=True)
docs/ fix faq market making Delete double word on faq market making
@@ -39,7 +39,7 @@ Another common risk that market makers need to be aware of is trending markets. ![Figure 4: A trending market](/assets/img/pure-mm-trending.png) -If a pure market maker set his spreads naively in such a market, e.g. equidistant bid/ask spread, there's a risk of the market maker's bid consistently being filled as prices trend down, while at the same time the market continues to move away from the market maker's ask, decreasing the probability of sells. This would result in an accumulation of inventory at exactly the time where this would reduce inventory inventory value, which is "wrong-way" risk. +If a pure market maker set his spreads naively in such a market, e.g. equidistant bid/ask spread, there's a risk of the market maker's bid consistently being filled as prices trend down, while at the same time the market continues to move away from the market maker's ask, decreasing the probability of sells. This would result in an accumulation of inventory at exactly the time where this would reduce inventory value, which is "wrong-way" risk. However, it is still possible to improve the probability of generating profits in this kind of market by skewing bid asks, i.e. setting a wider bid spread (e.g. -4%) than ask spread (e.g. +0.5%). In this way, the market maker is trying to catch price spikes in the direction of the trend and buy additional inventory only in the event of a larger moves, but sell more quickly when there is an opportunity so as to minimize the duration the inventory is held. This approach also has a mean reversion bias, i.e. buy only when there is a larger move downwards, in the hopes of stabilization or recovery after such a large move.
Fix symlink docstring Related-Change-Id:
@@ -237,8 +237,8 @@ def _check_symlink_header(req): x-symlink-target header is present in req.headers. :param req: HTTP request object - :returns: a tuple, the full versioned WSGI quoted path to the object and - the value of the X-Symlink-Target-Etag header which may be None + :returns: a tuple, the full versioned path to the object (as a WSGI string) + and the X-Symlink-Target-Etag header value which may be None :raise: HTTPPreconditionFailed if x-symlink-target value is not well formatted. :raise: HTTPBadRequest if the x-symlink-target value points to the request
Update CVE-2019-12616.yaml I don't know why the matcher was changed. The matcher phpmyadmin.net doesn't work in my test cases.
@@ -22,6 +22,7 @@ requests: - type: word words: - "phpmyadmin.net" + - "phpMyAdmin" - type: regex regex: @@ -30,4 +31,4 @@ requests: - type: status status: - 200 - - 401 + - 401 #password protected
Fix extension panel appearance Fix a bug where the extensions panel wouldn't appear
@@ -341,7 +341,6 @@ class ExportGLTF2_Base: def invoke(self, context, event): settings = context.scene.get(self.scene_key) self.will_save_settings = False - self.has_active_extenions = False if settings: try: for (k, v) in settings.items(): @@ -358,10 +357,10 @@ class ExportGLTF2_Base: try: if hasattr(sys.modules[addon_name], 'glTF2ExportUserExtension') or hasattr(sys.modules[addon_name], 'glTF2ExportUserExtensions'): extension_panel_unregister_functors.append(sys.modules[addon_name].register_panel()) - self.has_active_extenions = True except Exception: pass + self.has_active_extenions = len(extension_panel_unregister_functors) > 0 return ExportHelper.invoke(self, context, event) def save_settings(self, context):
Chunking fac2real multi processing in `apply_array_pars()` methods have also tried to remove duplication where mults are repetedly calculated for the same file (e.g. where a constant spatial distribution is multiplied across a number of kper)
@@ -2950,6 +2950,10 @@ class PstFromFlopyModel(object): self.logger.statement("forward_run line:{0}".format(line)) self.frun_post_lines.append(line) +def _process_chunk_fac2real(chunk): + for args in chunk: + pyemu.geostats.fac2real(**args) + def _process_chunk_model_files(chunk, df): for model_file in chunk: @@ -3020,16 +3024,22 @@ def apply_array_pars(arr_par_file="arr_pars.csv"): if 'pp_file' in df.columns: print("starting fac2real",datetime.now()) - pp_args = [] - for pp_file,fac_file,mlt_file in zip(df.pp_file,df.fac_file,df.mlt_file): - if pd.isnull(pp_file): - continue - pp_args.append({"pp_file":pp_file,"factors_file":fac_file,"out_file":mlt_file,"lower_lim":1.0e-10}) - # pyemu.geostats.fac2real(pp_file=pp_file,factors_file=fac_file, - # out_file=mlt_file,lower_lim=1.0e-10) + pp_df = df.loc[df.pp_file.notna(), + ['pp_file', 'fac_file', 'mlt_file']].rename( + columns={'fac_file': 'factors_file', 'mlt_file': 'out_file'}) + pp_df.loc[:, 'lower_lim'] = 1.0e-10 + # don't need to process all (e.g. if const. mults apply across kper...) + pp_args = pp_df.drop_duplicates().to_dict('records') + num_ppargs = len(pp_args) + chunk_len = 50 + num_chunk_floor = num_ppargs // chunk_len + main_chunks = np.array(pp_args)[:num_chunk_floor * chunk_len].reshape( + [-1, chunk_len]).tolist() + remainder = np.array(pp_args)[num_chunk_floor * chunk_len:].tolist() + chunks = main_chunks + [remainder] procs = [] - for args in pp_args: - p = mp.Process(target=pyemu.geostats.fac2real,kwargs=args) + for chunk in chunks: + p = mp.Process(target=_process_chunk_fac2real, args=[chunk]) p.start() procs.append(p) for p in procs:
Fix test_slurm due to time out Put the torque command test at the end of test suite to avoid computing node termination time out.
@@ -53,8 +53,6 @@ def test_slurm(region, os, pcluster_config_reader, clusters_factory, test_datadi if supports_impi: _test_mpi_job_termination(remote_command_executor, test_datadir) - _test_torque_job_submit(remote_command_executor, test_datadir) - _test_dynamic_max_cluster_size(remote_command_executor, region, cluster.asg, max_queue_size=max_queue_size) _test_cluster_limits(remote_command_executor, max_queue_size) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_queue_size) @@ -64,6 +62,8 @@ def test_slurm(region, os, pcluster_config_reader, clusters_factory, test_datadi ) _test_dynamic_dummy_nodes(remote_command_executor, region, cluster.asg, max_queue_size) + _test_torque_job_submit(remote_command_executor, test_datadir) + assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"])
Make sure `y` importance score stays 0 in test Otherwise we could have randomly failing tests
@@ -84,7 +84,10 @@ def test_switch_label_when_param_insignificant() -> None: return x ** 2 study = create_study() - study.optimize(_objective, n_trials=100) + for x in range(1, 3): + study.enqueue_trial({"x": x, "y": 0}) + + study.optimize(_objective, n_trials=2) ax = plot_param_importances(study) # Test if label for `y` param has been switched to `<0.01`.
Add batch and txn validation methods to CandidateBlock These methods check that no batch or transaction have been committed in the same chain and that all dependencies are satisfied.
@@ -101,4 +101,62 @@ impl CandidateBlock { pub fn can_add_batch(&self) -> bool { self.max_batches == 0 || self.pending_batches.len() < self.max_batches } + + fn check_batch_dependencies(&mut self, batch: &Batch) -> bool { + for txn in &batch.transactions { + if self.txn_is_already_committed(txn, &self.committed_txn_cache) { + debug!( + "Transaction rejected as it is already in the chain {}", + txn.header_signature + ); + return false; + } else if !self.check_transaction_dependencies(txn) { + self.committed_txn_cache.remove_batch(batch); + return false; + } + self.committed_txn_cache.add(txn.header_signature.clone()); + } + true + } + + fn check_transaction_dependencies(&self, txn: &Transaction) -> bool { + for dep in &txn.dependencies { + if !self.committed_txn_cache.contains(dep.as_str()) { + debug!( + "Transaction rejected due to missing dependency, transaction {} depends on {}", + txn.header_signature.as_str(), + dep.as_str() + ); + return false; + } + } + true + } + + fn txn_is_already_committed( + &self, + txn: &Transaction, + committed_txn_cache: &TransactionCommitCache, + ) -> bool { + committed_txn_cache.contains(txn.header_signature.as_str()) || { + let py = unsafe { cpython::Python::assume_gil_acquired() }; + self.block_store + .call_method(py, "has_batch", (txn.header_signature.as_str(),), None) + .expect("Blockstore has no method 'has_batch'") + .extract::<bool>(py) + .unwrap() + } + } + + fn batch_is_already_committed(&self, batch: &Batch) -> bool { + self.pending_batch_ids + .contains(batch.header_signature.as_str()) || { + let py = unsafe { cpython::Python::assume_gil_acquired() }; + self.block_store + .call_method(py, "has_batch", (batch.header_signature.as_str(),), None) + .expect("Blockstore has no method 'has_batch'") + .extract::<bool>(py) + .unwrap() + } + } }
Update get_youtube_view.py Updated the file so it contains no errors and runs the youtube videos.
@@ -23,12 +23,12 @@ refreshrate = minutes * 60 + seconds #Selecting Safari as the browser driver = webdriver.Safari() -if(url.startswith("https://"): +if url.startswith("https://"): driver.get(url) else: driver.get("https://"+url) -for _ in range(count): +for i in range(count): #Sets the page to refresh at the refreshrate. time.sleep(refreshrate) driver.refresh()
Make font in metric selector black. Currently, the default font color is used against a white background. This is bad for themes that use a light font color, such as dark themes. This commit fixes that by making the font always black.
@@ -18,6 +18,10 @@ template.innerHTML = ` <style> #metric-and-slice-selector-title { padding: 16px 16px 0 16px; + /* We set font color to black because the Fairness widget background is always white. + * Without explicitly setting it, the font color is selected by the Jupyter environment theme. + */ + color: black } paper-listbox {
Added changelog entry Updated changelog entry
@@ -76,6 +76,8 @@ astropy.io.fits - Add an ``ignore_hdus`` keyword to ``FITSDiff`` to allow ignoring HDUs by NAME when diffing two FITS files [#7538] +- All time coordinates can now be written to and read from FITS binary tables, + including those with vectorized locations. [#7430] astropy.io.registry ^^^^^^^^^^^^^^^^^^^ @@ -480,6 +482,9 @@ astropy.stats astropy.table ^^^^^^^^^^^^^ +- Added a feature to write and read vectorized location for time coordinates + to/from FITS Binary Table. [#7430] + astropy.tests ^^^^^^^^^^^^^
Last change I make to filtering generators I swear Also I added docs for filter
@@ -121,7 +121,7 @@ class Generator: if index == l: break obj = self.__getitem__(index) - ret = _safe_apply(function, [obj])[-1][-1] + ret = _safe_apply(function, obj)[-1] if ret: yield obj index += 1 @@ -543,6 +543,8 @@ def interleave(lhs, rhs): ret += list(lhs[i + 1:]) return ret def is_prime(n): + if type(n) is str: return False + if type(n) in [list, Generator]: return vectorise(is_prime, n) if n % 2 == 0 and n > 2: return False return all(n % i for i in range(3, int(math.sqrt(n)) + 1, 2))
Allow any list type for .as_array, not just arrays of root nodes TN:
@@ -368,7 +368,7 @@ def as_array(self, list_expr): abstract_result = Map(list_expr, expr=collection_expr_identity) abstract_result.prepare() result = construct(abstract_result) - root_list_type = get_context().root_grammar_class.list_type() + root_list_type = get_context().generic_list_type check_source_language( issubclass(result.collection.type, root_list_type), '.as_array input must be an AST list (here: {})'.format(
RTD: use new requirements files The sphinx_bootstrap_theme requirement is now in a second file. RTD v1 only supports a single requirements file (which was previously auto-detected). With v2 both `CI` and `STRICT` can be referenced. see:
+version: 2 build: image: latest python: version: 3.6 - -requirements_file: - null + install: + - requirements: REQUIREMENTS-CI.txt + - requirements: REQUIREMENTS-STRICT.txt
perf: faster processing of black tiles for Graphene No need to talk to the graph server about tiles that are purely background color.
@@ -185,6 +185,8 @@ class CloudVolumeGraphene(CloudVolumePrecomputed): def agglomerate_cutout(self, img, timestamp=None, stop_layer=None): """Remap a graphene volume to its latest root ids. This creates a flat segmentation.""" + if np.all(img == self.image.background_color): + return img labels = fastremap.unique(img) roots = self.get_roots(labels, timestamp=timestamp, binary=True, stop_layer=stop_layer) mapping = { segid: root for segid, root in zip(labels, roots) }
[fix] irc: Allow extra config keys again. Some trackers require them. Revert "irc: Don't allow extra keys in config" This reverts commit
@@ -96,7 +96,7 @@ schema = { } ], 'required': ['port'], - 'additionalProperties': False, + 'additionalProperties': {'type': 'string'}, }, }, {'type': 'boolean', 'enum': [False]},
Add pseudo-random number generator for position noise needs testing
@@ -287,23 +287,40 @@ void StartRX(void const * argument){ } +// x^6 + x^5 + 1 with period 63 +static const uint8_t POLY_MASK = 0b0110000; + +/** + * @brief Updates the contents of the linear feedback shift register passed in. + * At any given time, its contents will contain a psuedorandom sequence + * which repeats after a period dependent on the polynomial structure. + * @param lfsr Pointer to shift register contents. Holds output prn sequence + */ +static inline void update_lfsr(uint8_t* lfsr){ + uint8_t stream_in = *lfsr & 1; + *lfsr >>= 1; + + if(stream_in == 1){ + *lfsr ^= POLY_MASK; + } +} + + void StartTX(void const * argument){ bool status; uint8_t buf[8] = {0xFF, 0xFF, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00}; Data_t data; - int8_t var = -32; // add some variance to the data + int8_t prn = 0x2F; // seed value for PRNG for(;;){ while(xQueueReceive(toBeSentQHandle, &data, portMAX_DELAY) != pdTRUE); - ++var; - if(var > 32){ - var = -32; - } + // Generate new pseudo-random number + update_lfsr(&prn); buf[2] = data.id; - buf[5] = (data.pos & 0xFF) + var; // low byte + buf[5] = (data.pos & 0xFF) + prn; // low byte with statistical noise buf[6] = (data.pos >> 8) & 0xFF; // high byte buf[7] = ~sumBytes(buf, 6);
Refactor check_tags to account for optional tags check_tags previously checked that no tags besides those specified were present. It now accounts for optional tags; tags which are not a cause for failure if present or absent, as long as they are not empty. The updated interface is backwards-compatible.
@@ -18,9 +18,11 @@ STD_WAITTIME = 15 * 60 * 1000 STD_INTERVAL = 5 * 1000 -def check_tags(tags: dict, expected_tag_names: set): +def check_tags(tags: dict, required_tag_names: set, optional_tag_names: set=set()): """Assert that tags contains only expected keys with nonempty values.""" - assert set(tags.keys()) == expected_tag_names + keys = set(tags.keys()) + assert keys & required_tag_names == required_tag_names, 'Not all required tags were set' + assert keys - required_tag_names - optional_tag_names == set(), 'Encountered unexpected tags' for tag_name, tag_val in tags.items(): assert tag_val != '', 'Value for tag "%s" must not be empty'.format(tag_name)