message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Remove note about needing at least one route
That's no longer true with the new deployer. | @@ -22,10 +22,6 @@ decorator. Let's look at an example.
def every_hour(event):
print(event.to_dict())
- @app.route('/')
- def index():
- return {'hello': 'world'}
-
In this example, we've updated the starter hello world app with
a scheduled event. When you run ``chalice deploy`` Chalice will create
@@ -48,8 +44,3 @@ instance of :class:`Rate` or :class:`Cron`. For example:
The function you decorate must accept a single argument,
which will be of type :class:`CloudWatchEvent`.
-
-Limitations:
-
-* You must provide at least 1 ``@app.route`` decorator. It is not
- possible to deploy only scheduled events without an API Gateway API.
|
Fix Msh2MeshIO:
Skip writting data if out is None, write all digits for node coordinates,
write material ids. | @@ -2899,7 +2899,7 @@ class Msh2MeshIO(MeshIO):
if drop_z and nm.sum(coors[:, -1]) == 0.0:
coors = coors[:, :-1]
- mesh._set_io_data(coors[:,1:], nm.int32(coors[:,-1] * 0),
+ mesh._set_io_data(coors[:,1:], nm.int32(coors[:,-1] * 1),
conns0, mat_ids0, descs0)
return mesh
@@ -3009,14 +3009,6 @@ class Msh2MeshIO(MeshIO):
return mesh, [data], [time], [time_n], scheme
return [data], [time], [time_n], scheme
-
-
-
-
-
-
-
-
def write(self, filename, mesh, out=None, ts=None, **kwargs):
"""
Writes data into msh v2.0 file, handles cell_nodes data from DGField
@@ -3037,21 +3029,26 @@ class Msh2MeshIO(MeshIO):
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
dim = mesh.dim
+ if len(descs) != 1:
+ raise ValueError("Different element types not supported.")
+
+ mat_ids = mat_ids[0]
+
fd.write("$Nodes\n")
fd.write(str(mesh.n_nod) + "\n")
- s = "{}" + dim*" {:.3f}" + (3 - dim)*" 0.0" + "\n"
+ s = "{}" + dim*" {}" + (3 - dim)*" 0.0" + "\n"
for i, node in enumerate(coors, 1):
fd.write(s.format(i, *node))
fd.write("$EndNodes\n")
fd.write("$Elements\n")
fd.write(str(sum( len(conn) for conn in conns)) + "\n") # sum number of elements acrcoss all conns
- for desc, conn in zip(descs, conns):
+ for desc, mat_id, conn in zip(descs, mat_ids, conns):
_, n_el_verts = [int(f) for f in desc.split("_")]
el_type = self.geo2msh_type[desc]
- s = "{} {} 2 0 0" + n_el_verts * " {}" + "\n"
+ s = "{} {} 2 {} 0" + n_el_verts * " {}" + "\n"
for i, element in enumerate(conn, 1):
- fd.write(s.format(i, el_type, *nm.array(element) + 1))
+ fd.write(s.format(i, el_type, mat_id, *nm.array(element) + 1))
fd.write("$EndElements\n")
def write_interpolation_scheme(fd, scheme):
@@ -3114,6 +3111,7 @@ class Msh2MeshIO(MeshIO):
fd = open(filename, 'w')
fd.writelines(self.msh20header)
write_mesh(fd, mesh)
+ if out:
write_elementnodedata(fd, out, ts)
fd.close()
return
|
Allow username detection on older Cisco ios versions
This switches to the older `| include` syntax, and allows usernames with `password` to be detected. | @@ -3034,14 +3034,14 @@ class IOSDriver(NetworkDriver):
"""
username_regex = (
r"^username\s+(?P<username>\S+)\s+(?:privilege\s+(?P<priv_level>\S+)"
- r"\s+)?(?:secret \d+\s+(?P<pwd_hash>\S+))?$"
+ r"\s+)?(?:(password|secret) \d+\s+(?P<pwd_hash>\S+))?$"
)
pub_keychain_regex = (
r"^\s+username\s+(?P<username>\S+)(?P<keys>(?:\n\s+key-hash\s+"
r"(?P<hash_type>\S+)\s+(?P<hash>\S+)(?:\s+\S+)?)+)$"
)
users = {}
- command = "show run | section username"
+ command = "show run | include username"
output = self._send_command(command)
for match in re.finditer(username_regex, output, re.M):
users[match.groupdict()["username"]] = {
|
Ensure topic as bytes when zmq_filtering enabled
when send multipart in zeromq, the first data should be bytes
Conflicts:
- salt/transport/zeromq.py | @@ -864,7 +864,7 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
else:
# TODO: constants file for "broadcast"
log.trace('Sending broadcasted data over publisher %s', pub_uri)
- pub_sock.send('broadcast', flags=zmq.SNDMORE)
+ pub_sock.send(b'broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
log.trace('Broadcasted data has been sent')
else:
|
protocols: don't listen when using udp
forgot to correctly tests udp part
Fixes: (protocols: support SO_REUSEPORT) | @@ -50,12 +50,12 @@ class CarbonService(service.Service):
if hasattr(socket, "SO_REUSEPORT"):
carbon_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
carbon_sock.bind((self.interface, self.port))
- carbon_sock.listen(tmp_port.backlog)
if hasattr(self.protocol, 'datagramReceived'):
self._port = reactor.adoptDatagramPort(
carbon_sock.fileno(), socket.AF_INET, self.protocol())
else:
+ carbon_sock.listen(tmp_port.backlog)
self._port = reactor.adoptStreamPort(
carbon_sock.fileno(), socket.AF_INET, self.factory)
carbon_sock.close()
|
DOC: Update to clarify actual behavior real_if_(all elements)_close
Updated the description to consider all array elements
Updated the examples to use multiple elements array, to show that one element not close enough prevent for the whole array to be considered as real
Closes | @@ -492,7 +492,8 @@ def _real_if_close_dispatcher(a, tol=None):
@array_function_dispatch(_real_if_close_dispatcher)
def real_if_close(a, tol=100):
"""
- If complex input returns a real array if complex parts are close to zero.
+ If input is complex with all imaginary parts close to zero, return
+ real parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
@@ -527,10 +528,10 @@ def real_if_close(a, tol=100):
>>> np.finfo(float).eps
2.2204460492503131e-16 # may vary
- >>> np.real_if_close([2.1 + 4e-14j], tol=1000)
- array([2.1])
- >>> np.real_if_close([2.1 + 4e-13j], tol=1000)
- array([2.1+4.e-13j])
+ >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
+ array([2.1, 5.2])
+ >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
+ array([2.1+4.e-13j, 5.2 + 3e-15j])
"""
a = asanyarray(a)
|
Update callback_data.py
Can't pass 0 to value, raises error 'ValueError: Value for {part} is not passed!' | @@ -55,7 +55,7 @@ class CallbackData:
for part in self._part_names:
value = kwargs.pop(part, None)
- if not value:
+ if value is None:
if args:
value = args.pop(0)
else:
|
Adding background and for color to mulititextinput
Adding background and foreground color support to multilinetextinput.
This makes it possible to change colors. Only color that is still fixed is the placeholder text (SystemColors.GrayText). | from travertino.size import at_least
from toga_winforms.libs import WinForms, SystemColors
+from toga_winforms.colors import native_color
from .base import Widget
@@ -14,6 +15,7 @@ class MultilineTextInput(Widget):
self.native.Enter += self.winforms_enter
self.native.Leave += self.winforms_leave
self._placeholder = None
+ self._color = SystemColors.WindowText
def winforms_enter(self, sender, event):
if self._placeholder != '' and self.native.Text == self._placeholder:
@@ -63,7 +65,16 @@ class MultilineTextInput(Widget):
self._update_text_color()
def _update_text_color(self):
- self.native.ForeColor = SystemColors.WindowText
+ self.native.ForeColor = self._color
def _update_placeholder_color(self):
self.native.ForeColor = SystemColors.GrayText
+
+ def set_color(self, color):
+ if color:
+ self._color = native_color(color)
+ self._update_text_color()
+
+ def set_background_color(self, value):
+ if value:
+ self.native.BackColor = native_color(value)
|
Add assertion to test_consolidate_hashes_raises_exception
Trivial follow up to related change to verify logger warning
with both starting conditions.
Related-Change: | @@ -6089,7 +6089,7 @@ class TestSuffixHashes(unittest.TestCase):
open_loc = '__builtin__.open' if six.PY2 else 'builtins.open'
with mock.patch(open_loc, watch_open):
self.assertTrue(os.path.exists(inv_file))
- # no new suffixes get invalided... so no write iop
+ # no new suffixes get invalidated... so no write iop
df_mgr.get_hashes('sda1', '0', [], policy)
# each file is opened once to read
expected = {
@@ -6212,6 +6212,7 @@ class TestSuffixHashes(unittest.TestCase):
self.assertEqual(warnings, ["Unable to read %r" % hashes_file])
# repeat with pre-existing hashes.pkl
+ self.logger.clear()
with mock.patch.object(df_mgr, '_hash_suffix',
return_value='new fake hash'):
with mock.patch.object(df_mgr, 'consolidate_hashes',
@@ -6226,6 +6227,10 @@ class TestSuffixHashes(unittest.TestCase):
with open(hashes_file, 'rb') as f:
self.assertEqual(hashes, pickle.load(f))
+ # sanity check log warning
+ warnings = self.logger.get_lines_for_level('warning')
+ self.assertEqual(warnings, ["Unable to read %r" % hashes_file])
+
# invalidate_hash tests - error handling
def test_invalidate_hash_bad_pickle(self):
|
qt gui: display nice error if QR code data overflows
there is existing handler-code at e.g.
but we should make sure setData() always raises the exc when needed,
as paintEvent() is too late for nice handling.
closes
closes | from typing import Optional
import qrcode
+import qrcode.exceptions
from PyQt5.QtGui import QColor, QPen
import PyQt5.QtGui as QtGui
@@ -16,6 +17,10 @@ from electrum.simple_config import SimpleConfig
from .util import WindowModalDialog, WWLabel, getSaveFileName
+class QrCodeDataOverflow(qrcode.exceptions.DataOverflowError):
+ pass
+
+
class QRCodeWidget(QWidget):
def __init__(self, data=None, *, manual_size: bool = False):
@@ -27,20 +32,25 @@ class QRCodeWidget(QWidget):
self.setData(data)
def setData(self, data):
- if self.data != data:
- self.data = data
- if self.data:
- self.qr = qrcode.QRCode(
+ if data:
+ qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=0,
)
- self.qr.add_data(self.data)
+ try:
+ qr.add_data(data)
+ qr_matrix = qr.get_matrix() # test that data fits in QR code
+ except (ValueError, qrcode.exceptions.DataOverflowError) as e:
+ raise QrCodeDataOverflow() from e
+ self.qr = qr
+ self.data = data
if not self._manual_size:
- k = len(self.qr.get_matrix())
+ k = len(qr_matrix)
self.setMinimumSize(k * 5, k * 5)
else:
self.qr = None
+ self.data = None
self.update()
|
Fix SSLError caused by not passing the cafile
When tls is enabled, the cafile needs to be passed in the session.
Closes-Bug: | @@ -23,6 +23,7 @@ PASSWORD_PLUGIN = 'password'
TRUSTEE_CONF_GROUP = 'trustee'
KEYSTONE_AUTHTOKEN_GROUP = 'keystone_authtoken'
loading.register_auth_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)
+loading.register_session_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)
loading.register_auth_conf_options(cfg.CONF, KEYSTONE_AUTHTOKEN_GROUP)
_ZAQAR_ENDPOINTS = {}
@@ -38,18 +39,21 @@ def get_trusted_token(trust_id):
auth_plugin = loading.load_auth_from_conf_options(
cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=trust_id)
- trust_session = session.Session(auth=auth_plugin)
+ trust_session = loading.load_session_from_conf_options(
+ cfg.CONF, TRUSTEE_CONF_GROUP, auth=auth_plugin)
return trust_session.auth.get_access(trust_session).auth_token
def _get_admin_session(conf_group):
auth_plugin = loading.load_auth_from_conf_options(
cfg.CONF, conf_group)
- return session.Session(auth=auth_plugin)
+ return session.Session(
+ auth=auth_plugin, verify=getattr(cfg.CONF, conf_group).cafile)
def _get_user_client(auth_plugin):
- sess = session.Session(auth=auth_plugin)
+ sess = loading.load_session_from_conf_options(
+ cfg.CONF, TRUSTEE_CONF_GROUP, auth=auth_plugin)
return client.Client(session=sess)
|
Clarify environment reset after configuration change
See | @@ -66,6 +66,11 @@ For example, the number of lanes can be changed with:
plt.imshow(env.render(mode="rgb_array"))
plt.show()
+.. warning::
+
+ The environment must be :py:meth:`~highway_env.envs.common.abstract.AbstractEnv.reset` before the change of configuration
+ is effective.
+
Training an agent
-------------------
|
GafferOSL::ShadingEngine : Get shading context from OSL
The way we were previously caching these in thread local storage
seems to have been creating some sort weird hard to reproduce
crash. | @@ -622,6 +622,23 @@ OSL::ShadingSystem *shadingSystem()
return g_shadingSystem;
}
+// This just exists to ensure that release is called
+struct ShadingContextWrapper
+{
+
+ ShadingContextWrapper()
+ : shadingContext( ::shadingSystem()->get_context() )
+ {
+ }
+
+ ~ShadingContextWrapper()
+ {
+ ::shadingSystem()->release_context( shadingContext );
+ }
+
+ ShadingContext *shadingContext;
+};
+
} // namespace
@@ -1156,41 +1173,17 @@ IECore::CompoundDataPtr ShadingEngine::shade( const IECore::CompoundData *points
// Iterate over the input points, doing the shading as we go
- struct ThreadContext
- {
-
- ThreadContext()
- : m_shadingSystem( ::shadingSystem() ), m_shadingContext( m_shadingSystem->get_context() )
- {
- }
-
- ~ThreadContext()
- {
- m_shadingSystem->release_context( m_shadingContext );
- }
-
- ShadingResults::DebugResultsMap results;
-
- ShadingContext *shadingContext() const { return m_shadingContext; }
-
- private :
-
- ShadingSystem *m_shadingSystem;
- ShadingContext *m_shadingContext;
-
- };
-
- typedef tbb::enumerable_thread_specific<ThreadContext> ThreadContextType;
- ThreadContextType contexts;
+ typedef tbb::enumerable_thread_specific<ShadingResults::DebugResultsMap> ThreadLocalDebugResults;
+ ThreadLocalDebugResults debugResultsCache;
const IECore::Canceller *canceller = context->canceller();
ShadingSystem *shadingSystem = ::shadingSystem();
ShaderGroup &shaderGroup = **static_cast<ShaderGroupRef *>( m_shaderGroupRef );
- auto f = [&shadingSystem, &renderState, &results, &shaderGlobals, &p, &u, &v, &uv, &n, &shaderGroup, &contexts, canceller]( const tbb::blocked_range<size_t> &r )
+ auto f = [&shadingSystem, &renderState, &results, &shaderGlobals, &p, &u, &v, &uv, &n, &shaderGroup, &debugResultsCache, canceller]( const tbb::blocked_range<size_t> &r )
{
- ThreadContextType::reference context = contexts.local();
+ ThreadLocalDebugResults::reference resultCache = debugResultsCache.local();
ThreadRenderState threadRenderState( renderState );
@@ -1198,6 +1191,7 @@ IECore::CompoundDataPtr ShadingEngine::shade( const IECore::CompoundData *points
threadShaderGlobals.renderstate = &threadRenderState;
+ ShadingContextWrapper contextWrapper;
for( size_t i = r.begin(); i < r.end(); ++i )
{
IECore::Canceller::check( canceller );
@@ -1229,9 +1223,9 @@ IECore::CompoundDataPtr ShadingEngine::shade( const IECore::CompoundData *points
threadShaderGlobals.Ci = nullptr;
threadRenderState.pointIndex = i;
- shadingSystem->execute( context.shadingContext(), shaderGroup, threadShaderGlobals );
+ shadingSystem->execute( contextWrapper.shadingContext, shaderGroup, threadShaderGlobals );
- results.addResult( i, threadShaderGlobals.Ci, context.results );
+ results.addResult( i, threadShaderGlobals.Ci, resultCache );
}
};
|
Gameboy : Rename gb_boot.bin to gb_bios.bin
Since gambatte, mgba and vbam use it as gb_bios.bin, should we don't rename it to be in conformity?
Thanks | @@ -143,7 +143,7 @@ game (
comment "Nintendo - Gameboy"
rom ( name dmg_boot.bin size 256 crc 59c8598e md5 32fbbd84168d3482956eb3c5051637f5 sha1 4ed31ec6b0b175bb109c0eb5fd3d193da823339f )
- rom ( name gb_boot.bin size 256 crc 59c8598e md5 32fbbd84168d3482956eb3c5051637f5 sha1 4ed31ec6b0b175bb109c0eb5fd3d193da823339f )
+ rom ( name gb_bios.bin size 256 crc 59c8598e md5 32fbbd84168d3482956eb3c5051637f5 sha1 4ed31ec6b0b175bb109c0eb5fd3d193da823339f )
comment "Nintendo - Game Boy Advance"
rom ( name gba_bios.bin size 16384 crc 81977335 md5 a860e8c0b6d573d191e4ec7db1b1e4f6 sha1 300c20df6731a33952ded8c436f7f186d25d3492 )
|
Test reading empty input with multiple cores
See | @@ -31,9 +31,9 @@ def test_small(run):
run('-a TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq')
-def test_empty(run):
+def test_empty(run, cores):
"""empty input"""
- run('-a TTAGACATATCTCCGTCG', 'empty.fastq', 'empty.fastq')
+ run("--cores {} -a TTAGACATATCTCCGTCG".format(cores), "empty.fastq", "empty.fastq")
def test_newlines(run):
|
Update gozi.txt
[0]
[1]
```Involved Domains``` ([1]). | @@ -65,3 +65,20 @@ yraco.cn/wpapi
em2eddryi6ptkcnh.onion/wpapi
nap7zb4gtnzwmxsv.onion/wpapi
t7yz3cihrrzalznq.onion/wpapi
+
+# Reference: https://twitter.com/campuscodi/status/1039531511144431616
+# Reference: https://marcoramilli.blogspot.com/2018/08/hacking-hacker-stopping-big-botnet.html
+
+1000numbers.com
+batterygator.com
+beard-style.com
+englandlistings.com
+next.gardenforyou.org
+pomidom.com
+pool.jfklandscape.com
+pool.thefutureiskids.com
+romanikustop.space
+securitytransit.site
+sssloop.host
+sssloop.space
+upsvarizones.space
|
Closes netbox-community#9762
Added nat_outside to IPAddressTable class | @@ -369,6 +369,11 @@ class IPAddressTable(TenancyColumnsMixin, NetBoxTable):
orderable=False,
verbose_name='NAT (Inside)'
)
+ nat_outside = tables.Column(
+ linkify=True,
+ orderable=False,
+ verbose_name='NAT (Outside)'
+ )
assigned = columns.BooleanColumn(
accessor='assigned_object_id',
linkify=True,
@@ -381,7 +386,7 @@ class IPAddressTable(TenancyColumnsMixin, NetBoxTable):
class Meta(NetBoxTable.Meta):
model = IPAddress
fields = (
- 'pk', 'id', 'address', 'vrf', 'status', 'role', 'tenant', 'tenant_group', 'nat_inside', 'assigned', 'dns_name', 'description',
+ 'pk', 'id', 'address', 'vrf', 'status', 'role', 'tenant', 'tenant_group', 'nat_inside', 'nat_outside', 'assigned', 'dns_name', 'description',
'tags', 'created', 'last_updated',
)
default_columns = (
|
Fix, do not pass actual locals to import calls.
* This was wasteful as generating the locals dictionary means a lot
of C code potentially.
* This only affects imports inside functions and classes, these are
relatively rare. It is however going to be slightly faster of course. | @@ -172,8 +172,6 @@ def getImportModuleHardCode(to_name, module_name, import_name, needs_check,
def generateImportModuleCode(to_name, expression, emit, context):
- provider = expression.getParentVariableProvider()
-
globals_name = context.allocateTempName("import_globals")
getLoadGlobalsCode(
@@ -182,19 +180,6 @@ def generateImportModuleCode(to_name, expression, emit, context):
context = context
)
- if provider.isCompiledPythonModule():
- locals_name = globals_name
- else:
- locals_name = context.allocateTempName("import_locals")
-
- getLoadLocalsCode(
- to_name = locals_name,
- provider = expression.getParentVariableProvider(),
- mode = "updated",
- emit = emit,
- context = context
- )
-
old_source_ref = context.setCurrentSourceCodeReference(expression.getSourceReference())
getBuiltinImportCode(
@@ -203,7 +188,9 @@ def generateImportModuleCode(to_name, expression, emit, context):
constant = expression.getModuleName()
),
globals_name = globals_name,
- locals_name = locals_name,
+ locals_name = context.getConstantCode(
+ constant = None
+ ),
import_list_name = context.getConstantCode(
constant = expression.getImportList()
),
|
Reorder some stuff to make it a little easier to follow
This does mean a second pass through the list of control nodes, but the
separation of concerns is worth it IMO | @@ -990,9 +990,6 @@ class XForm(WrappedNode):
return []
questions = []
- repeat_contexts = set()
- group_contexts = set()
- excluded_paths = set() # prevent adding the same question twice
# control_nodes will contain all nodes in question tree (the <h:body> of an xform)
# The question tree doesn't contain every question - notably, it's missing hidden values - so
@@ -1004,15 +1001,6 @@ class XForm(WrappedNode):
for cnode in control_nodes:
node = cnode.node
path = cnode.path
- excluded_paths.add(path)
-
- repeat = cnode.repeat
- if repeat is not None:
- repeat_contexts.add(repeat)
-
- group = cnode.group
- if group is not None:
- group_contexts.add(group)
if not cnode.is_leaf and not include_groups:
continue
@@ -1029,7 +1017,7 @@ class XForm(WrappedNode):
"label_ref": self._get_label_ref(node),
"tag": node.tag_name,
"value": path,
- "repeat": repeat,
+ "repeat": cnode.repeat,
"group": cnode.group,
"type": cnode.data_type,
"relevant": cnode.relevant,
@@ -1047,6 +1035,15 @@ class XForm(WrappedNode):
questions.append(question)
+ repeat_contexts = set()
+ group_contexts = set()
+ excluded_paths = set() # prevent adding the same question twice
+ for cnode in control_nodes:
+ excluded_paths.add(cnode.path)
+ if cnode.repeat is not None:
+ repeat_contexts.add(cnode.repeat)
+ if cnode.group is not None:
+ group_contexts.add(cnode.group)
repeat_contexts = sorted(repeat_contexts, reverse=True)
group_contexts = sorted(group_contexts, reverse=True)
|
Update mediaprocessor.py
extra sort if streams were purged to clean things up | @@ -496,6 +496,7 @@ class MediaProcessor:
options.remove(p)
except:
self.log.debug("Unable to purge stream, may already have been removed.")
+ return len(purge) > 0
def sublistIndexes(self, x, y):
indexes = []
@@ -1062,7 +1063,8 @@ class MediaProcessor:
}
attachments.append(attachment)
- self.purgeDuplicateStreams(acombinations, audio_settings, info)
+ if self.purgeDuplicateStreams(acombinations, audio_settings, info):
+ self.sortStreams(audio_settings, awl)
# Collect all options
options = {
|
Tests: Added support for testing installed version of Nuitka
* We normally carefully try to avoid using the import path to find
Nuitka to compile and test with, but we might also precisely want
to use the installed version to compare against. | @@ -340,6 +340,8 @@ def compareWithCPython(
"""
+ # Many cases to consider here, pylint: disable=too-many-branches
+
if dirname is None:
path = filename
else:
@@ -351,12 +353,20 @@ def compareWithCPython(
else:
converted = False
+ if os.getenv("NUITKA_TEST_INSTALLED", "") == "1":
command = [
sys.executable,
- os.path.join("..", "..", "bin", "compare_with_cpython"),
+ "-m",
+ "nuitka.tools.testing.compare_with_cpython",
path,
"silent",
]
+ else:
+ compare_with_cpython = os.path.join("..", "..", "bin", "compare_with_cpython")
+ if os.path.exists(compare_with_cpython):
+ command = [sys.executable, compare_with_cpython, path, "silent"]
+ else:
+ sys.exit("Error, cannot find Nuitka comparison runner.")
if extra_flags is not None:
command += extra_flags
|
Update survey with new IDs for Q4
Fix bug | @@ -32,10 +32,18 @@ SURVEYS = {
'exit_survey_campaign_id': 2208951,
'active': False, # allows cron job to skip this survey
},
+ # bug 1416244
'general-q3-2018': { # General survey for en-US users enabled in Q3 of 2018
'email_collection_survey_id': 4494159,
'exit_survey_id': 4456859,
'exit_survey_campaign_id': 7259518,
+ 'active': False,
+ },
+ # bug 1510201
+ 'general-q4-2018': { # General survey for en-US users enabled in Q4 of 2018
+ 'email_collection_survey_id': 4494159,
+ 'exit_survey_id': 4669267,
+ 'exit_survey_campaign_id': 7259518,
'active': True,
},
}
|
Added BlackHosts link to the "Interesting Applications" section
Hope someone gets good use of this! | @@ -444,6 +444,8 @@ devices under a variety of operating systems.
* [dnsmasq conversion script](https://gist.github.com/erlepereira/c11f4f7a3f60cd2071e79018e895fc8a#file-dnsmasq-antimalware) This GitHub gist has a short shell script (bash, will work on any 'nix) and uses `wget` & `awk` present in most distros, to fetch a specified hosts file and convert it the format required by dnsmasq. Supports IPv4 and IPv6. Designed to be used as either a shell script, or can be dropped into /etc/cron.weekly (or wherever suits). The script is short and easily edited, also has a short document attached with notes on dnsmasq setup.
+* [BlackHosts - Command Line Installer/Updater](https://github.com/Lateralus138/blackhosts) This is a cross-platform command line utility to help install/update hosts files found at this repository.
+
## Contribute!
Please read our [Contributing Guide](https://github.com/StevenBlack/hosts/blob/master/contributing.md). Among other things, this explains how we organize files and folders in this repository.
|
Add methods removed in 3.x to the changes doc.
Refs | @@ -77,6 +77,7 @@ instead use :py:func:`prefetch` to achieve the same result.
* The ``naive()`` method is now :py:meth:`~BaseQuery.objects`, which defaults
to using the model class as the constructor, but accepts any callable to use
as an alternate constructor.
+* The ``annotate()`` query method is no longer supported.
The :py:func:`Case` helper has moved from the ``playhouse.shortcuts`` module
into the main peewee module.
@@ -87,6 +88,9 @@ a method on all column-like objects.
The ``InsertQuery.return_id_list()`` method has been replaced by a more general
pattern of using :py:meth:`_WriteQuery.returning`.
+The ``InsertQuery.upsert()`` method has been replaced by the more general and
+flexible :py:meth:`Insert.on_conflict` method.
+
When using :py:func:`prefetch`, the collected instances will be stored in the
same attribute as the foreign-key's ``backref``. Previously, you would access
joined instances using ``(backref)_prefetch``.
|
Fix Slicer matplotlib-warning.
Closes | @@ -1344,7 +1344,9 @@ class Slicer(object):
the shown range.
clim : None or list of [min, max]
- For pcolormesh (vmin, vmax).
+ For `pcolormesh` (`vmin`, `vmax`).
+
+ Note that this overrules `vmin`/`vmax` provided in `pcolor_opts`.
xlim, ylim, zlim : None or list of [min, max]
Axis limits.
@@ -1363,7 +1365,13 @@ class Slicer(object):
Number of cells occupied by x, y, and z dimension on plt.subplot2grid.
pcolor_opts : dictionary
- Passed to pcolormesh.
+ Passed to `pcolormesh`.
+
+ Note 1: `vmin`/`vmax` provided in `pcolor_opts` are overruled by the
+ `clim`.
+
+ Note 2: If a `norm` is provided, then `vmin` and `vmax` should be
+ defined in the norm.
"""
@@ -1378,6 +1386,9 @@ class Slicer(object):
pcolor_opts = kwargs["pcolorOpts"]
warnings.warn("pcolorOpts has been deprecated, please use pcolor_opts", DeprecationWarning)
+ # Add pcolor_opts to self
+ self.pc_props = pcolor_opts if pcolor_opts is not None else {}
+
# (a) Mesh dimensionality
if mesh.dim != 3:
err = 'Must be a 3D mesh. Use plotImage instead.'
@@ -1459,14 +1470,35 @@ class Slicer(object):
else:
aspect3 = 1.0/aspect2
- # Store min and max of all data
+ # Get color limits.
+
+ # Alternatively, look in pc_props.
+ vmin = self.pc_props.pop('vmin', None)
+ vmax = self.pc_props.pop('vmax', None)
+
+ # Check if there is a norm and if it has color limits.
+ if 'norm' in self.pc_props:
+ vmin = self.pc_props['norm'].vmin
+ vmax = self.pc_props['norm'].vmax
+
+ # If nowhere defined, get it from the data.
if clim is None:
- clim = [np.nanmin(self.v), np.nanmax(self.v)]
- # In the case of a homogeneous fullspace provide a small range to avoid
- # problems with colorbar and the three subplots.
+
+ clim = [np.nanmin(self.v) if vmin is None else vmin,
+ np.nanmax(self.v) if vmax is None else vmax]
+
+ # In the case of a homogeneous fullspace provide a small range to
+ # avoid problems with colorbar and the three subplots.
if clim[0] == clim[1]:
clim = [0.99*clim[0], 1.01*clim[1]]
- self.pc_props = {'vmin': clim[0], 'vmax': clim[1]}
+
+ # clim overrules any of them.
+ if 'norm' in self.pc_props:
+ self.pc_props['norm'].vmin = clim[0]
+ self.pc_props['norm'].vmax = clim[1]
+ else:
+ self.pc_props['vmin'] = clim[0]
+ self.pc_props['vmax'] = clim[1]
# 2. Start populating figure
@@ -1536,10 +1568,6 @@ class Slicer(object):
self.clpropsw = {'c': 'w', 'lw': 2, 'zorder': 10}
self.clpropsk = {'c': 'k', 'lw': 1, 'zorder': 11}
- # Add pcolor_opts
- if pcolor_opts is not None:
- self.pc_props.update(pcolor_opts)
-
# Initial draw
self.update_xy()
self.update_xz()
|
Fix bug [FTL] Copying from Machinery to Source editor results in a broken string
In editor.js->componentDidUpdate add else if to check the content copied from machinery tab.
In case parsed to fluent message content is Junk, convert it to complex | @@ -92,6 +92,24 @@ export class EditorBase extends React.Component<EditorProps, State> {
) {
this.analyzeFluentMessage(this.props.editor.translation);
}
+ // If translation changes from machinery tab,
+ // check if it's valid and convert syntax to comlex if no.
+ else if (
+ this.props.entity &&
+ this.state.forceSource &&
+ this.props.editor.translation !== prevProps.editor.translation &&
+ this.props.editor.changeSource === 'machinery' &&
+ typeof(this.props.editor.translation) === 'string'
+ ) {
+ const message = fluent.parser.parseEntry(this.props.editor.translation);
+ if (message.type === 'Junk') {
+ this.updateEditorContent(
+ this.props.editor.translation,
+ 'simple',
+ 'complex',
+ );
+ }
+ }
}
/**
|
Add the decorator to the reactor integration test
This test occassionally fails on the develop branch and I cannot
reproduce it. recommended to add the flaky decorator
to the test until we can circle back around and look at this more
closely. | @@ -12,6 +12,7 @@ from __future__ import absolute_import
# Import Salt testing libs
import tests.integration as integration
+from tests.support.helpers import flaky
# Import Salt libs
import salt.utils.event
@@ -22,6 +23,7 @@ class ReactorTest(integration.ModuleCase, integration.SaltMinionEventAssertsMixI
Test Salt's reactor system
'''
+ @flaky()
def test_ping_reaction(self):
'''
Fire an event on the master and ensure
|
Fix response handling in tenant
Response object from requests doesn't have status attribute,
some code path will hit this:
AttributeError: 'Response' object has no attribute 'status'
Response doesn't have read() method either, changed to use text
attribute. | @@ -514,7 +514,7 @@ class Tenant():
keylime_logging.log_http_response(
logger, logging.ERROR, response.json())
logger.error(
- f"POST command response: {response.status} Unexpected response from Cloud Verifier: {response.read()}")
+ f"POST command response: {response.status_code} Unexpected response from Cloud Verifier: {response.text}")
sys.exit()
def do_cvstatus(self, listing=False):
@@ -550,7 +550,7 @@ class Tenant():
if response.status_code != 200:
logger.error(
- f"Status command response: {response.status}. Unexpected response from Cloud Verifier.")
+ f"Status command response: {response.status_code}. Unexpected response from Cloud Verifier.")
sys.exit()
else:
response_json = response.json()
@@ -844,7 +844,7 @@ class Tenant():
if response.status_code == 200:
if "results" not in response_body or 'hmac' not in response_body['results']:
logger.critical(
- f"Error: unexpected http response body from Cloud Agent: {response.status}")
+ f"Error: unexpected http response body from Cloud Agent: {response.status_code}")
break
mac = response_body['results']['hmac']
|
lnutil: add rationale for MIN_FUNDING_SAT
related: | @@ -309,6 +309,12 @@ REDEEM_AFTER_DOUBLE_SPENT_DELAY = 30
CHANNEL_OPENING_TIMEOUT = 24*60*60
+# Small capacity channels are problematic for many reasons. As the onchain fees start to become
+# significant compared to the capacity, things start to break down. e.g. the counterparty
+# force-closing the channel costs much of the funds in the channel.
+# Closing a channel uses ~200 vbytes onchain, feerates could spike to 100 sat/vbyte or even higher;
+# that in itself is already 20_000 sats. This mining fee is reserved and cannot be used for payments.
+# The value below is chosen arbitrarily to be one order of magnitude higher than that.
MIN_FUNDING_SAT = 200_000
##### CLTV-expiry-delta-related values
|
Added optional random seed for transition model of SwitchMultiTargetGroundTruthSimulator
Removed random state argument from property. Now it just uses self.random_state | @@ -144,14 +144,18 @@ class SwitchMultiTargetGroundTruthSimulator(MultiTargetGroundTruthSimulator):
The element in the ith row and the jth column is the probability of\
switching from the ith transition model in :attr:`transition_models`\
to the jth")
+ seed: Optional[int] = Property(default=None, doc="Seed for random number generation."
+ " Default None")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = 0
+ self.random_state = np.random.RandomState(self.seed)
@property
- def transition_model(self):
- self.index = np.random.choice(range(0, len(self.transition_models)),
+ def transition_model(self, random_state=None):
+ random_state = random_state if random_state is not None else self.random_state
+ self.index = random_state.choice(range(0, len(self.transition_models)),
p=self.model_probs[self.index])
return self.transition_models[self.index]
|
Fixed docstrings typo
is_single, to as_single | @@ -294,7 +294,7 @@ def iterable_like(target, reference, fillvalue=None, as_single=False):
Object taken as departure point.
fillvalue : object, optional
Fill value. Defaults to `None`.
- is_single : bool, optional
+ as_single : bool, optional
Reference should be regarded as a single entry. Defaults to `True`.
Returns
|
Prevent viewbox auto-scaling to items that are not in the same scene.
This can happen when an item that was previously added to the viewbox
is then removed using scene.removeItem(). | @@ -1280,7 +1280,7 @@ class ViewBox(GraphicsWidget):
## First collect all boundary information
itemBounds = []
for item in items:
- if not item.isVisible():
+ if not item.isVisible() or not item.scene() is self.scene():
continue
useX = True
|
Phrasing updates to new vendor doc
Product suggested a slight change in phrasing
for this page. This only changes a few sentences in
the RFC page about new vendor integrations. | @@ -110,13 +110,12 @@ An approved RFC is not a commitment to implementation on any sort of timeline. T
## New hardware integrations
-Several hardware vendors already have integrations with cirq. We are not
-currently soliciting additional vendors. However, if you are considering
-integrating with cirq, we would highly encourage you to engage with the
-cirq-maintainer team through attending the weekly cirq cync and submitting
-an RFC as specified above. Everyone benefits from a well-maintained, user
-friendly interface with a high reliability, which is the goal of having this
-RFC process.
+Several hardware vendors already have integrations with cirq.
+If you are considering integrating with cirq, please engage with us through the
+cirq-maintainer team by attending the weekly cirq cync meeting and submitting
+an RFC to get feedback on your design proposal (as specified above). The goal of the RFC
+review process is to ensure all integrations end up with a well-maintained, user
+friendly interface with a high reliability.
Examples of other integrations can be found on the
[Hardware page](/cirq/hardware).
|
Silence: persist silenced channels
Can be used to support rescheduling. | @@ -2,6 +2,7 @@ import asyncio
import json
import logging
from contextlib import suppress
+from datetime import datetime, timedelta
from typing import Optional
from discord import TextChannel
@@ -63,6 +64,10 @@ class Silence(commands.Cog):
# Overwrites are stored as JSON.
muted_channel_perms = RedisCache()
+ # Maps muted channel IDs to POSIX timestamps of when they'll be unsilenced.
+ # A timestamp equal to -1 means it's indefinite.
+ muted_channel_times = RedisCache()
+
def __init__(self, bot: Bot):
self.bot = bot
self.scheduler = Scheduler(self.__class__.__name__)
@@ -90,16 +95,21 @@ class Silence(commands.Cog):
"""
await self._get_instance_vars_event.wait()
log.debug(f"{ctx.author} is silencing channel #{ctx.channel}.")
+
if not await self._silence(ctx.channel, persistent=(duration is None), duration=duration):
await ctx.send(f"{Emojis.cross_mark} current channel is already silenced.")
return
+
if duration is None:
await ctx.send(f"{Emojis.check_mark} silenced current channel indefinitely.")
+ await self.muted_channel_times.set(ctx.channel.id, -1)
return
await ctx.send(f"{Emojis.check_mark} silenced current channel for {duration} minute(s).")
self.scheduler.schedule_later(duration * 60, ctx.channel.id, ctx.invoke(self.unsilence))
+ unsilence_time = (datetime.utcnow() + timedelta(minutes=duration))
+ await self.muted_channel_times.set(ctx.channel.id, unsilence_time.timestamp())
@commands.command(aliases=("unhush",))
async def unsilence(self, ctx: Context) -> None:
@@ -178,6 +188,7 @@ class Silence(commands.Cog):
self.scheduler.cancel(channel.id)
self.notifier.remove_channel(channel)
await self.muted_channel_perms.delete(channel.id)
+ await self.muted_channel_times.delete(channel.id)
if prev_overwrites is None:
await self._mod_alerts_channel.send(
|
Python3.7: Fixup static linking with Anaconda on Linux
* This may fix a whole class of errors with also pyenv.
* Starting with 3.7 it's not longer an option to remove the
"main.o" from the link library, so this is a workaround,
that will also mean the function "Py_GetArgcArgv" is wrong
result. | @@ -608,6 +608,7 @@ extern "C" {
#if PYTHON_VERSION >= 300
#if defined(__GNUC__)
+__attribute__((weak))
__attribute__((visibility("default")))
#endif
void Py_GetArgcArgv(int *argc, wchar_t ***argv) {
@@ -617,6 +618,7 @@ void Py_GetArgcArgv(int *argc, wchar_t ***argv) {
#else
#if defined(__GNUC__)
+__attribute__((weak))
__attribute__((visibility("default")))
#endif
void Py_GetArgcArgv(int *argc, char ***argv) {
@@ -628,5 +630,4 @@ void Py_GetArgcArgv( int *argc, char ***argv ) {
#ifdef __cplusplus
}
#endif
-
#endif
|
Amt of candy gained added to event
Since now candy gained is no longer fixed at 3, good to show user how many candies gained instead of just total | @@ -517,7 +517,7 @@ class PokemonGoBot(object):
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
- self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
+ self.event_manager.register_event('gained_candy', parameters=('gained_candy', 'quantity', 'type'))
self.event_manager.register_event('catch_limit')
self.event_manager.register_event('spin_limit')
self.event_manager.register_event('show_best_pokemon', parameters=('pokemons'))
|
Make modpings rescheduling robust to unfilled cache
Additionally, this adds a check which will remove entries in the redis cache of former moderators. | @@ -13,6 +13,7 @@ from bot.constants import Colours, Emojis, Guild, Icons, MODERATION_ROLES, Roles
from bot.converters import Expiry
from bot.log import get_logger
from bot.utils import time
+from bot.utils.members import get_or_fetch_member
log = get_logger(__name__)
@@ -57,18 +58,29 @@ class ModPings(Cog):
log.trace("Applying the moderators role to the mod team where necessary.")
for mod in mod_team.members:
- if mod in pings_on: # Make sure that on-duty mods aren't in the cache.
+ if mod in pings_on: # Make sure that on-duty mods aren't in the redis cache.
if mod.id in pings_off:
await self.pings_off_mods.delete(mod.id)
continue
- # Keep the role off only for those in the cache.
+ # Keep the role off only for those in the redis cache.
if mod.id not in pings_off:
await self.reapply_role(mod)
else:
expiry = isoparse(pings_off[mod.id])
self._role_scheduler.schedule_at(expiry, mod.id, self.reapply_role(mod))
+ # At this stage every entry in `pings_off` is expected to have a scheduled task, but that might not be the case
+ # if the discord.py cache is missing members, or if the ID belongs to a former moderator.
+ for mod_id, expiry_iso in pings_off.items():
+ if mod_id not in self._role_scheduler:
+ mod = await get_or_fetch_member(self.guild, mod_id)
+ # Make sure the member is still a moderator and doesn't have the pingable role.
+ if mod is None or mod.get_role(Roles.mod_team) is None or mod.get_role(Roles.moderators) is not None:
+ await self.pings_off_mods.delete(mod_id)
+ else:
+ self._role_scheduler.schedule_at(isoparse(expiry_iso), mod_id, self.reapply_role(mod))
+
async def reschedule_modpings_schedule(self) -> None:
"""Reschedule moderators schedule ping."""
await self.bot.wait_until_guild_available()
|
tooling: Make test-js-with-puppeteer fail fast on passing invalid file names.
fixes: | @@ -106,15 +106,19 @@ def add_provision_check_override_param(parser: ArgumentParser) -> None:
def find_js_test_files(test_dir: str, files: Iterable[str]) -> List[str]:
test_files = []
for file in files:
- file = min(
+ relative_file_path = min(
(
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
if file_name.startswith(file)
),
- default=file,
+ default=None,
)
- test_files.append(os.path.abspath(file))
+
+ if relative_file_path is None:
+ raise Exception(f"Cannot find a matching file for '{file}' in '{test_dir}'")
+
+ test_files.append(os.path.abspath(relative_file_path))
if not test_files:
test_files = sorted(
|
Update worst_asns.txt
Related to | # AS36352 (ColoCrossing)
107.172.0.0/16,colocrossing
+
+# Reference: https://twitter.com/malwrhunterteam/status/1248226241527844865
+# Reference: https://www.virustotal.com/gui/ip-address/213.176.32.0/details
+
+# AS22769
+213.176.32.0/19,ddosing network
|
MAINT: Tweaks to progress display.
Factor out methods for starting/stopping display.
Clear output after closing widget. | @@ -260,7 +260,7 @@ except ImportError:
HAVE_WIDGETS = False
try:
- from IPython.display import display, HTML as IPython_HTML
+ from IPython.display import clear_output, display, HTML as IPython_HTML
HAVE_IPYTHON = True
except ImportError:
HAVE_IPYTHON = False
@@ -336,42 +336,52 @@ class IPythonWidgetProgressPublisher(object):
if model.state == 'init':
self._heading.value = '<b>Analyzing Pipeline...</b>'
self._set_progress(0.0)
+ self._ensure_displayed()
elif model.state in ('loading', 'computing'):
+
term_list = self._render_term_list(model.current_work)
if model.state == 'loading':
details_heading = '<b>Loading Inputs:</b>'
else:
details_heading = '<b>Computing Expression:</b>'
-
self._details_body.value = details_heading + term_list
+
chunk_start, chunk_end = model.current_chunk_bounds
self._heading.value = (
"<b>Running Pipeline</b>: Chunk Start={}, Chunk End={}"
.format(chunk_start.date(), chunk_end.date())
)
+
self._set_progress(model.percent_complete)
+ self._ensure_displayed()
+
elif model.state == 'success':
# Replace widget layout with html that can be persisted.
- self._layout.close()
+ self._stop_displaying()
display(
IPython_HTML("<b>Pipeline Execution Time:</b> {}".format(
self._format_execution_time(model.execution_time)
- ))
+ )),
)
elif model.state == 'error':
self._bar.bar_style = 'danger'
- self._layout.close()
-
+ self._stop_displaying()
else:
+ self._layout.close()
raise ValueError('Unknown display state: {!r}'.format(model.state))
+ def _ensure_displayed(self):
if not self._displayed:
display(self._layout)
self._displayed = True
+ def _stop_displaying(self):
+ self._layout.close()
+ clear_output()
+
@staticmethod
def _render_term_list(terms):
list_elements = ''.join([
|
Check in forgotten entity definition [skip appveyor]
Added a plural of build signature to scons.mod and forgot to
check in the change to that file, so validation failed on the CI run. | <!ENTITY contentsig "<phrase xmlns='http://www.scons.org/dbxsd/v1.0'>content signature</phrase>">
<!ENTITY contentsigs "<phrase xmlns='http://www.scons.org/dbxsd/v1.0'>content signatures</phrase>">
<!ENTITY buildsig "<phrase xmlns='http://www.scons.org/dbxsd/v1.0'>build signature</phrase>">
+<!ENTITY buildsigs "<phrase xmlns='http://www.scons.org/dbxsd/v1.0'>build signatures</phrase>">
<!ENTITY true "<literal xmlns='http://www.scons.org/dbxsd/v1.0'>true</literal>">
<!ENTITY false "<literal xmlns='http://www.scons.org/dbxsd/v1.0'>false</literal>">
|
Update ASP README to highlight default recipe
The Recipe was presented after some non-standard API calls, so moving the suggested usage up, giving it its own section, and reinforcing the suggested usage in the non-standard section. | # Introduction to ASP
-This page documents the API for ASP (Automatic Sparsity), a tool that enables sparse training and inference for PyTorch models by adding 2 lines of Python.
+This serves as a quick-start for ASP (Automatic SParsity), a tool that enables sparse training and inference for PyTorch models by adding 2 lines of Python.
## Importing ASP
```
@@ -14,7 +14,7 @@ Apart from the import statement, it is sufficient to add just the following line
ASP.prune_trained_model(model, optimizer)
```
-In a typical PyTorch training loop, it might look like this:
+In the context of a typical PyTorch training loop, it might look like this:
```
ASP.prune_trained_model(model, optimizer)
@@ -27,21 +27,14 @@ for epoch in range(epochs):
torch.save(...)
```
-The `prune_trained_model` calculates the sparse mask and applies it to the weights. This is done once, i.e., sparse locations in the weights matrix remain fixed after this step. In order to recompute the sparse mask in between training, say after an epoch, use the following method:
-
-```
-ASP.compute_sparse_masks()
-```
-
-A more thorough example can be found in `./test/toy_problem.py`.
+The `prune_trained_model` step calculates the sparse mask and applies it to the weights. This is done once, i.e., sparse locations in the weights matrix remain fixed after this step.
+## Generate a Sparse Network
-
-
-The following approach serves as a guiding example on how to generate a pruned model that can use Sparse Tensor Core in NVIDIA Ampere Architecture. This approach generates a model for deployment, i.e. inference mode.
+The following approach serves as a guiding example on how to generate a pruned model that can use Sparse Tensor Cores in the NVIDIA Ampere Architecture. This approach generates a model for deployment, i.e. inference mode.
```
-(1) Given a fully trained (dense) network, prune parameter values in 2:4 sparsepattern.
+(1) Given a fully trained (dense) network, prune parameter values in a 2:4 sparse pattern.
(2) Fine-tune the pruned model with optimization method and hyper-parameters (learning-rate, schedule, number of epochs, etc.) exactly as those used to obtain the trained model.
(3) (If required) Quantize the model.
```
@@ -68,3 +61,18 @@ for epoch in range(epochs): # train the pruned model for the same number of epoc
torch.save(...) # saves the pruned checkpoint with sparsity masks
```
+
+## Non-Standard Usage
+
+If your goal is to easily perpare a network for accelerated inference, please follow the recipe above. However, ASP can also be used to perform experiments in advanced techniques like training with sparsity from initialization. For example, in order to recompute the sparse mask in between training steps, use the following method:
+
+```
+ASP.compute_sparse_masks()
+```
+
+A more thorough example can be found in `./test/toy_problem.py`.
+
+
+
+
+
|
Switch pypi & github release to do pypi last.
It allows retagging in case of error, without being blocked by pypi because of the release already published. It is also not recommended at all to delete a pypi release, while github release is doable | @@ -27,20 +27,20 @@ notifications:
on_failure: always
on_start: never
deploy:
- - provider: pypi
- user: JonathanHuot
- password:
- secure: "OozNM16flVLvqDoNzmoTENchhS1w0/dEJZvXBQK2KWmh8fyGj2UZus1vkl6bA5V3Yu9MZLYFpDcltl/qraY3Up6iXQpwKz4q+ICygAudYM2kJ5l8ZEe+wy2FikWbD6LkXf5uKIJJnPNSC8AI86ZyxM/XZxbYjj/+jXyJ1YFZwwQ="
- distributions: sdist bdist_wheel
+ - provider: releases
+ api_key:
+ secure: "eqEWOzKWZCuvd1a77CA03OX/HCrsYlsu1/Sz/RhXQIEhKz6tKp10KGw9zr57bHAIl0OfJFK9k63lI2HOctAmwkKeeQ4HdNqw4pHFa8Gk3liGp31KSmshVtHX8Rtn0DuFA028Wm7w5n+fOVc8tJVU/UsKjsfsAzRHnQjMamckoXU="
+ skip_cleanup: true
on:
tags: true
all_branches: true
condition: $TOXENV = py36
repo: oauthlib/oauthlib
- - provider: releases
- api_key:
- secure: "eqEWOzKWZCuvd1a77CA03OX/HCrsYlsu1/Sz/RhXQIEhKz6tKp10KGw9zr57bHAIl0OfJFK9k63lI2HOctAmwkKeeQ4HdNqw4pHFa8Gk3liGp31KSmshVtHX8Rtn0DuFA028Wm7w5n+fOVc8tJVU/UsKjsfsAzRHnQjMamckoXU="
- skip_cleanup: true
+ - provider: pypi
+ user: JonathanHuot
+ password:
+ secure: "OozNM16flVLvqDoNzmoTENchhS1w0/dEJZvXBQK2KWmh8fyGj2UZus1vkl6bA5V3Yu9MZLYFpDcltl/qraY3Up6iXQpwKz4q+ICygAudYM2kJ5l8ZEe+wy2FikWbD6LkXf5uKIJJnPNSC8AI86ZyxM/XZxbYjj/+jXyJ1YFZwwQ="
+ distributions: sdist bdist_wheel
on:
tags: true
all_branches: true
|
Update coords.py
Added new unit possibilities for velocity
Reduced the amount of time spent in initial for-loop | @@ -176,40 +176,60 @@ def scale_units(out_unit, in_unit):
'rad': ['rad', 'radian', 'radians'],
'h': ['h', 'hr', 'hrs', 'hours'],
'm': ['m', 'km', 'cm'],
- 'm/s': ['m/s', 'cm/s', 'km/s']}
+ 'm/s': ['m/s', 'cm/s', 'km/s', 'm s$^{-1}$',
+ 'cm s$^{-1}$', 'km s$^{-1}$', 'm s-1', 'cm s-1',
+ 'km s-1']}
+ replace_str = {'/s': [' s$^{-1}$', ' s-1']}
scales = {'deg': 180.0, 'rad': np.pi, 'h': 12.0,
'm': 1.0, 'km': 0.001, 'cm': 100.0,
'm/s': 1.0, 'cm/s': 100.0, 'km/s': 0.001}
# Test input and determine transformation type
- out_key = None
- in_key = None
+ out_key = out_unit.lower()
+ in_key = in_unit.lower()
for kk in accepted_units.keys():
- if out_unit.lower() in accepted_units[kk]:
+ if out_key in accepted_units.keys() and in_key in accepted_units.keys():
+ break
+
+ if(out_key not in accepted_units.keys() and
+ out_unit.lower() in accepted_units[kk]):
out_key = kk
- if in_unit.lower() in accepted_units[kk]:
+ if(in_key not in accepted_units.keys() and
+ in_unit.lower() in accepted_units[kk]):
in_key = kk
- if out_key is None and in_key is None:
- raise ValueError(''.join(['Cannot scale {:s} and '.format(in_unit)
+ if(out_key not in accepted_units.keys() and
+ in_key not in accepted_units.keys()):
+ raise ValueError(''.join(['Cannot scale {:s} and '.format(in_unit),
'{:s}, unknown units'.format(out_unit)]))
- if out_key is None:
+ if out_key not in accepted_units.keys():
raise ValueError('Unknown output unit {:}'.format(out_unit))
- if in_key is None:
+ if in_key not in accepted_units.keys():
raise ValueError('Unknown input unit {:}'.format(in_unit))
if out_key == 'm' or out_key == 'm/s' or in_key == 'm' or in_key == 'm/s':
if in_key != out_key:
raise ValueError('Cannot scale {:s} and {:s}'.format(out_unit,
in_unit))
- # Recast units as keys for the scales dictionary
- out_key = out_unit
- in_key = in_unit
-
- unit_scale = scales[out_key.lower()] / scales[in_key.lower()]
+ # Recast units as keys for the scales dictionary and ensure that
+ # the format is consistent
+ rkey = ''
+ for rr in replace_str.keys():
+ if out_key.find(rr):
+ rkey = rr
+
+ out_key = out_unit.lower()
+ in_key = in_unit.lower()
+
+ if rkey in replace_str.keys():
+ for rval in replace_str[rkey]:
+ out_key = out_key.replace(rval, rkey)
+ in_key = in_key.replace(rval, rkey)
+
+ unit_scale = scales[out_key] / scales[in_key]
return unit_scale
|
$.Lexer.Extract_Tokens: add contracts for the TDH formal
TN: | @@ -55,7 +55,9 @@ package ${ada_lib_name}.Lexer is
Tab_Stop : Positive := ${ctx.default_tab_stop};
With_Trivia : Boolean;
TDH : in out Token_Data_Handler;
- Diagnostics : in out Diagnostics_Vectors.Vector);
+ Diagnostics : in out Diagnostics_Vectors.Vector)
+ with Pre => Initialized (TDH) and then not Has_Source_Buffer (TDH),
+ Post => Has_Source_Buffer (TDH);
-- Extract tokens out of the given ``Input`` and store them into ``TDH``.
--
-- ``Tab_Stop`` is a positive number to describe the effect of tabulation
|
Update pantheon-takeover.yaml
Correct link to issue.
Medium article (wrote by me) | @@ -5,7 +5,8 @@ info:
author: pdteam
severity: high
reference:
- - https://github.com/EdOverflow/can-i-take-over-xyz
+ - https://github.com/EdOverflow/can-i-take-over-xyz/issues/24
+ - https://medium.com/bug-bounty/how-i-took-over-several-stanford-subdomains-also-let-me-explain-you-the-pain-to-report-it-d84b08704be8
tags: takeover
requests:
|
Bugfix use the websocket context in the websocket method
Rather than the request context | @@ -1696,7 +1696,7 @@ class Quart(Scaffold):
for function in functions:
response = await self.ensure_async(function)(response)
- session_ = (websocket_context or _request_ctx_stack.top).session
+ session_ = (websocket_context or _websocket_ctx_stack.top).session
if not self.session_interface.is_null_session(session_):
if response is None and isinstance(session_, SecureCookieSession) and session_.modified:
self.logger.exception(
|
lint: Improve data-tippy-allowHTML error message.
The error message a user gets from the linter when using the
data-tippy-allowHTML attribute now conveys the fact that the
<template> tag is supposed to hold the tooltip content. This
might make understanding the correct workflow easier for
someone who encounters this error. | @@ -738,7 +738,7 @@ html_rules: List["Rule"] = [
},
{
"pattern": r"(?i:data-tippy-allowHTML)",
- "description": "Never use data-tippy-allowHTML; for an HTML tooltip, set data-tooltip-template-id to the id of a <template>.",
+ "description": "Never use data-tippy-allowHTML; for an HTML tooltip, set data-tooltip-template-id to the id of a <template> containing the tooltip content.",
},
]
|
remove cardiac from docstring
since intervals are meant to be more general (can be breath-to-breath) | @@ -15,7 +15,7 @@ def intervals_to_peaks(intervals, intervals_time=None, sampling_rate=1000):
intervals_time : list or array, optional
List or numpy array of timestamps corresponding to intervals, in seconds.
sampling_rate : int, optional
- Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur.
+ Sampling rate (Hz) of the continuous signal in which the peaks occur.
Returns
-------
|
Calories
correct issue by replacing is by == | @@ -40,9 +40,9 @@ class calories:
return None
gender_no = 0
- if(gender is "male" or gender is "man" or gender is "m"):
+ if(gender == "male" or gender == "man" or gender == "m"):
gender_no = 5
- elif(gender is "female" or gender is 'woman' or gender is "f"):
+ elif(gender == "female" or gender == 'woman' or gender == "f"):
gender_no = -161
if gender_no != 0 and age > 14 and height > 0.0 and weight > 0.0 and level > 0 and level < 5:
|
Run 'nosetests tests' for all Python versions except PyPy3
Also: make sure all tests, in whatever subdirectory, are run on PyPy3 | @@ -53,4 +53,6 @@ jobs:
install:
- if [[ "$TRAVIS_PYTHON_VERSION" == "2"* ]] || [[ "$TRAVIS_PYTHON_VERSION" == "pypy"* ]]; then pip install -r requirements-py2.txt; else pip3 install -r requirements-py3.txt; fi
# command to run tests
-script: for s in tests/*.py; do nosetests -v "$s" || exit $?; done
+script:
+ # pypy3 segfaults if running all tests in the same process
+ - if [[ "$TRAVIS_PYTHON_VERSION" == "pypy3"* ]]; then find tests -name "*.py" -print0 | xargs -0 -n1 nosetests -v; else nosetests tests; fi
|
update to use a single line if statement when dealing with prepended
text
add comment on using +2 | @@ -3094,17 +3094,11 @@ def _getAdmlPresentationRefId(adml_data, ref_id):
else:
if etree.QName(p_item.tag).localname == 'text':
if prepended_text:
- if getattr(p_item, 'text', ''):
- prepended_text = ' '.join([prepended_text, getattr(p_item, 'text', '').rstrip()])
+ prepended_text = ' '.join((text for text in (prepended_text, getattr(p_item, 'text', '').rstrip()) if text))
else:
- prepended_text = ''
- else:
- if getattr(p_item, 'text', ''):
prepended_text = getattr(p_item, 'text', '').rstrip()
else:
prepended_text = ''
- else:
- prepended_text = ''
if prepended_text.endswith('.'):
prepended_text = ''
if the_localname == 'textBox' \
@@ -4185,6 +4179,8 @@ def _regexSearchKeyValueCombo(policy_data, policy_regpath, policy_regkey):
b'\00;'])
match = re.search(_thisSearch, policy_data, re.IGNORECASE)
if match:
+ # add 2 so we get the ']' and the \00
+ # to return the full policy entry
return policy_data[match.start():(policy_data.index(b']', match.end())) + 2]
return None
|
[Hexagon] Add default constructor to struct Optional in session.cc
It's needed for older compilers. | @@ -62,6 +62,7 @@ struct Optional : public dmlc::optional<T> {
using dmlc::optional<T>::optional;
using dmlc::optional<T>::operator=;
Optional(const T& val) : dmlc::optional<T>(val) {} // NOLINT(*)
+ Optional() = default;
T* operator->() { return &this->operator*(); }
const T* operator->() const { return &this->operator*(); }
|
Disco Pane content update - Octo. 2017
* Disco Pane content update - Octo. 2017
Fixes
* Fix ublock origin | @@ -19,13 +19,13 @@ class TestDiscoveryViewList(TestCase):
# Represents a dummy version of `olympia.discovery.data`
self.addons = OrderedDict([
- (696234, addon_factory(id=696234, type=amo.ADDON_PERSONA)),
- (626810, addon_factory(id=626810, type=amo.ADDON_EXTENSION)),
- (511962, addon_factory(id=511962, type=amo.ADDON_EXTENSION)),
- (265123, addon_factory(id=265123, type=amo.ADDON_PERSONA)),
- (708770, addon_factory(id=708770, type=amo.ADDON_EXTENSION)),
+ (44686, addon_factory(id=44686, type=amo.ADDON_PERSONA)),
+ (607454, addon_factory(id=607454, type=amo.ADDON_EXTENSION)),
(700308, addon_factory(id=700308, type=amo.ADDON_EXTENSION)),
- (644254, addon_factory(id=644254, type=amo.ADDON_PERSONA)),
+ (376685, addon_factory(id=376685, type=amo.ADDON_PERSONA)),
+ (455926, addon_factory(id=455926, type=amo.ADDON_EXTENSION)),
+ (511962, addon_factory(id=511962, type=amo.ADDON_EXTENSION)),
+ (208568, addon_factory(id=208568, type=amo.ADDON_PERSONA)),
])
def test_reverse(self):
|
Use -e and -x options when running bash
Some errors were being masked before | @@ -34,7 +34,10 @@ cat << EOF | {{ docker.executable }} run -i \
{{ docker.image }} \
{{ docker.command }} || exit 1
+set -e
+set +x
export BINSTAR_TOKEN=${BINSTAR_TOKEN}
+set -x
export PYTHONUNBUFFERED=1
echo "$config" > ~/.condarc
|
Update .travis.yml for 3.4 workaround
We'll drop 3.4 support in our next major release but until then this should keep travis builds working. | @@ -11,6 +11,8 @@ python:
- "3.8"
install:
- pip install -U pip setuptools
+ # remove pyyaml line when we drop py3.4 support
+ - pip install "pyyaml<5.3"
- pip install tox-travis pre-commit
- pip install codecov
script:
|
Tag the git clone version for penguin example during TFX 1.0 release period.
During the release period, we change the API exported folders, examples and tutorials, to keep user experience for playing the example during the releasing period, tag the version for now and will update to head after release finished. | @@ -37,7 +37,7 @@ pip install -U tfx[examples]
Then, clone the tfx repo and copy penguin/ folder to home directory:
<pre class="devsite-terminal devsite-click-to-copy">
-git clone https://github.com/tensorflow/tfx ~/tfx-source && pushd ~/tfx-source
+git clone https://github.com/tensorflow/tfx/releases/tag/v0.29.0 ~/tfx-source && pushd ~/tfx-source
cp -r ~/tfx-source/tfx/examples/penguin ~/
</pre>
|
Add "KeyError" to bare except in azurearm
Fixes pylint, refs | @@ -390,7 +390,7 @@ def list_nodes(conn=None, call=None): # pylint: disable=unused-argument
try:
provider, driver = __active_provider_name__.split(':')
active_resource_group = __opts__['providers'][provider][driver]['resource_group']
- except:
+ except KeyError:
pass
for node in nodes:
|
Fix invalid alembic revision comment
closes
closes | """Add track principal
Revision ID: 2496c4adc7e9
-Revises: eefba82b42c5
+Revises: 4e459d27adab
Create Date: 2019-10-02 18:20:33.866458
"""
|
Rally must not log clear text user passwords
Relates | @@ -12,7 +12,12 @@ class EsClientFactory:
Abstracts how the Elasticsearch client is created. Intended for testing.
"""
def __init__(self, hosts, client_options):
- logger.info("Creating ES client connected to %s with options [%s]" % (hosts, client_options))
+ masked_client_options = dict(client_options)
+ if "basic_auth_password" in masked_client_options:
+ masked_client_options["basic_auth_password"] = "*****"
+ if "http_auth" in masked_client_options:
+ masked_client_options["http_auth"] = (client_options["http_auth"][0], "*****")
+ logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
self.hosts = hosts
self.client_options = client_options
|
lemonwhale: try to find the ID of the video in a new way.
fixes | @@ -4,7 +4,6 @@ from __future__ import absolute_import
import re
import json
-from svtplay_dl.utils.urllib import unquote_plus
from svtplay_dl.service import Service
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
@@ -12,40 +11,56 @@ from svtplay_dl.utils import decode_html_entities
class Lemonwhale(Service):
# lemonwhale.com is just bogus for generic
- supported_domains = ['svd.se', 'vk.se', 'lemonwhale.com']
+ supported_domains = ['vk.se', 'lemonwhale.com']
def get(self):
- vid = None
- data = self.get_urldata()
-
if self.exclude():
yield ServiceError("Excluding video")
return
- match = re.search(r'video url-([^"]+)', data)
- if not match:
- match = re.search(r'embed.jsp\?([^"]+)"', self.get_urldata())
- if not match:
- yield ServiceError("Can't find video id")
- return
- vid = match.group(1)
+ vid = self.get_vid()
if not vid:
- path = unquote_plus(match.group(1))
- data = self.http.request("get", "http://www.svd.se%s" % path).content
- match = re.search(r'embed.jsp\?([^"]+)', data)
- if not match:
yield ServiceError("Can't find video id")
return
- vid = match.group(1)
url = "http://ljsp.lwcdn.com/web/public/item.json?type=video&%s" % decode_html_entities(vid)
data = self.http.request("get", url).text
jdata = json.loads(data)
- videos = jdata["videos"][0]["media"]["streams"]
- for i in videos:
- if i["name"] == "auto":
- hls = "%s%s" % (jdata["videos"][0]["media"]["base"], i["url"])
- streams = hlsparse(self.options, self.http.request("get", hls), hls)
+ if "videos" in jdata:
+ streams = self.get_video(jdata)
if streams:
for n in list(streams.keys()):
yield streams[n]
+
+ url = "http://ljsp.lwcdn.com/web/public/video.json?id={0}&delivery=hls".format(decode_html_entities(vid))
+ data = self.http.request("get", url).text
+ jdata = json.loads(data)
+ if "videos" in jdata:
+ streams = self.get_video(jdata)
+ if streams:
+ for n in list(streams.keys()):
+ yield streams[n]
+
+ def get_vid(self):
+ match = re.search(r'video url-([^"]+)', self.get_urldata())
+ if match:
+ return match.group(1)
+
+ match = re.search(r'__INITIAL_STATE__ = ({.*})</script>', self.get_urldata())
+ if match:
+ janson = json.loads(match.group(1))
+ vid = janson["content"]["current"]["data"]["templateData"]["pageData"]["video"]["id"]
+ return vid
+
+ match = re.search(r'embed.jsp\?([^"]+)"', self.get_urldata())
+ if match:
+ return match.group(1)
+ return None
+
+ def get_video(self, janson):
+ videos = janson["videos"][0]["media"]["streams"]
+ for i in videos:
+ if i["name"] == "auto":
+ hls = "%s%s" % (janson["videos"][0]["media"]["base"], i["url"])
+ streams = hlsparse(self.options, self.http.request("get", hls), hls)
+ return streams
|
sync: switch to multiprocessing.Event
We've switched most of this command over to multiprocessing and off
of _threading, so do the Event object too. The APIs are the same
between the modules, so we shouldn't need to update anything else.
Tested-by: Mike Frysinger | @@ -850,7 +850,7 @@ later is required to fix a server side protocol bug.
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
- err_event = _threading.Event()
+ err_event = multiprocessing.Event()
rp = self.manifest.repoProject
rp.PreSync()
|
Replace 'pluginfooter' block with 'footer' and 'footer_links' blocks
'footer' blocks represents the <footer> html tag
'footer_links' are the anchor tags inside nav | {# Page footer #}
<footer class="footer container-fluid">
- {# Plugin Custom Footer #}
- {% block pluginfooter %}{% endblock %}
-
+ {% block footer %}
<div class="row align-items-center justify-content-between mx-0">
- {# Docs & Community Links #}
<div class="col-sm-12 col-md-auto fs-4 noprint">
<nav class="nav justify-content-center justify-content-lg-start">
+ {% block footer_links %}
{# Documentation #}
<a type="button" class="nav-link" href="{% static 'docs/' %}" target="_blank">
<i title="Docs" class="mdi mdi-book-open-variant text-primary" data-bs-placement="top" data-bs-toggle="tooltip"></i>
<a type="button" class="nav-link" href="https://netdev.chat/" target="_blank">
<i title="Community" class="mdi mdi-slack text-primary" data-bs-placement="top" data-bs-toggle="tooltip"></i>
</a>
+ {% endblock footer_links %}
</nav>
</div>
- {# System Info #}
<div class="col-sm-12 col-md-auto text-center text-lg-end text-muted">
<span class="d-block d-md-inline">{% annotated_now %} {% now 'T' %}</span>
<span class="ms-md-3 d-block d-md-inline">{{ settings.HOSTNAME }} (v{{ settings.VERSION }})</span>
</div>
</div>
+ {% endblock footer %}
</footer>
</div>
|
docs: Remove unnecessary type annotations from tutorial.
In we removed the need
to explicitly declare types for Django model fields. Here, we update that detail
in our documentation. | @@ -192,9 +192,9 @@ boolean field, `mandatory_topics`, to the Realm model in
class Realm(models.Model):
# ...
- emails_restricted_to_domains: bool = models.BooleanField(default=True)
- invite_required: bool = models.BooleanField(default=False)
-+ mandatory_topics: bool = models.BooleanField(default=False)
+ emails_restricted_to_domains = models.BooleanField(default=True)
+ invite_required = models.BooleanField(default=False)
++ mandatory_topics = models.BooleanField(default=False)
```
The Realm model also contains an attribute, `property_types`, which
|
Update README formattng at README: Usage.
fixed missing newlines.
upadted indentation. | @@ -179,6 +179,7 @@ Magic shell completions are now enabled!
Usage Examples:
+
Create a new project using Python 3.7, specifically:
$ pipenv --python 3.7
@@ -204,6 +205,7 @@ Magic shell completions are now enabled!
$ pipenv run pip freeze
Commands:
+
check Checks for PyUp Safety security vulnerabilities and against
PEP 508 markers provided in Pipfile.
clean Uninstalls all packages not specified in Pipfile.lock.
|
Fixes failing wildcard for ZMQ
Resolves: | @@ -15,9 +15,8 @@ tls_check_hostnames = False
ca_implementation = openssl
# Revocation IP & Port used by either the cloud_agent or keylime_ca to receive
-# revocation events from the verifier. A wildcard can be used to listen on all
-# interfaces, or likewise a specific IP (e.g 192.168.0.1)
-receive_revocation_ip= *
+# revocation events from the verifier.
+receive_revocation_ip = 127.0.0.1
receive_revocation_port = 8992
#=============================================================================
|
Update map signals for 1.9.2
Use the from_claims variant for Hospital Admissions | @@ -38,7 +38,7 @@ map](https://covidcast.cmu.edu/):
| Early Indicators | COVID-Related Doctor Visits | [`doctor-visits`](covidcast-signals/doctor-visits.md) | `smoothed_adj_cli` |
| Early Indicators | COVID Indicator Combination | [`indicator-combination`](covidcast-signals/indicator-combination.md) | `nmf_day_doc_fbc_fbs_ght` |
| Late Indicators | COVID Antigen Test Positivity (Quidel) | [`quidel`](covidcast-signals/quidel.md) | `covid_ag_smoothed_pct_positive` |
-| Late Indicators | COVID Hospital Admissions | [`hospital-admissions`](covidcast-signals/hospital-admissions.md) | `smoothed_adj_covid19` |
+| Late Indicators | COVID Hospital Admissions | [`hospital-admissions`](covidcast-signals/hospital-admissions.md) | `smoothed_adj_covid19_from_claims` |
| Late Indicators | Cases | [`indicator-combination`](covidcast-signals/indicator-combination.md) | `confirmed_7dav_incidence_num` |
| Late Indicators | Cases per 100,000 People | [`indicator-combination`](covidcast-signals/indicator-combination.md) | `confirmed_7dav_incidence_prop` |
| Late Indicators | Deaths | [`indicator-combination`](covidcast-signals/indicator-combination.md) | `deaths_7dav_incidence_num` |
|
document `regex_replace` Jinja filter
Fixes | @@ -405,6 +405,29 @@ Returns:
None
+.. jinja_ref:: regex_replace
+
+``regex_replace``
+-----------------
+
+.. versionadded:: 2017.7.0
+
+Searches for a pattern and replaces with a sequence of characters.
+
+Example:
+
+.. code-block:: jinja
+
+ {% set my_text = 'yes, this is a TEST' %}
+ {{ my_text | regex_replace(' ([a-z])', '__\\1', ignorecase=True) }}
+
+Returns:
+
+.. code-block:: text
+
+ yes,__this__is__a__TEST
+
+
.. jinja_ref:: uuid
``uuid``
|
Partial revert "Updated docstrings"
This reverts commit | @@ -53,7 +53,10 @@ def make_new_dset(parent, shape=None, dtype=None, data=None, name=None,
fletcher32=None, maxshape=None, compression_opts=None,
fillvalue=None, scaleoffset=None, track_times=None,
external=None, track_order=None, dcpl=None):
- """ Return a new low-level dataset identifier """
+ """ Return a new low-level dataset identifier
+
+ Only creates anonymous datasets.
+ """
# Convert data to a C-contiguous ndarray
if data is not None and not isinstance(data, Empty):
@@ -169,8 +172,11 @@ def make_new_dset(parent, shape=None, dtype=None, data=None, name=None,
def make_new_virtual_dset(parent, shape, sources, dtype=None,
maxshape=None, fillvalue=None):
- """ Return a new low-level dataset identifier for a virtual dataset """
+ """Return a new low-level dataset identifier for a virtual dataset
+ Like make_new_dset(), this creates an anonymous dataset, which can be given
+ a name later.
+ """
# create the creation property list
dcpl = h5p.create(h5p.DATASET_CREATE)
if fillvalue is not None:
|
ENH: composable apply_to takes advatnage of tinydb based data stores
[NEW] more efficient determination of data remaining to be processed | @@ -378,8 +378,11 @@ class Composable(ComposableType):
process.output = None
self.input = None
+ # with a tinydb dstore, this also excludes data that failed to complete
+ todo = [m for m in dstore if not self.job_done(m)]
+
for result in ui.imap(
- process, dstore, parallel=parallel, par_kw=par_kw, mininterval=mininterval
+ process, todo, parallel=parallel, par_kw=par_kw, mininterval=mininterval
):
outcome = self(result)
results.append(outcome)
@@ -387,13 +390,18 @@ class Composable(ComposableType):
member = dstore[i]
LOGGER.log_message(member, label="input")
LOGGER.log_message(member.md5, label="input md5sum")
- if outcome:
mem_id = self.data_store.make_relative_identifier(member.name)
+ if outcome:
member = self.data_store.get_member(mem_id)
LOGGER.log_message(member, label="output")
LOGGER.log_message(member.md5, label="output md5sum")
else:
# we have a NotCompletedResult
+ try:
+ # tinydb supports storage
+ self.data_store.write_incomplete(mem_id, outcome.to_rich_dict())
+ except AttributeError:
+ pass
LOGGER.log_message(
f"{outcome.origin} : {outcome.message}", label=outcome.type
)
@@ -406,6 +414,7 @@ class Composable(ComposableType):
LOGGER.log_message(f"{taken}", label="TIME TAKEN")
LOGGER.shutdown()
self.data_store.add_file(str(log_file_path), cleanup=cleanup)
+ self.data_store.close()
# now reconnect input
if process is not self:
|
Add reproduce_failure failure to hypothesis tests
Add it by default. This makes it easier to replay an error. | @@ -28,6 +28,7 @@ from io import StringIO
from typing import Iterable
from gaphas.connector import Handle
+from hypothesis import reproduce_failure # noqa
from hypothesis.control import assume, cleanup
from hypothesis.errors import UnsatisfiedAssumption
from hypothesis.stateful import (
|
finalize backend JuMP.Model in http.jl
should fix our memory leak! | @@ -10,8 +10,7 @@ function job(req::HTTP.Request)
m = xpress_model(timeout, tol)
@info "Starting REopt with timeout of $(timeout) seconds..."
results = reopt(m, d)
- # fix our memory leak? https://github.com/jump-dev/CPLEX.jl/issues/185
- m = nothing
+ finalize(backend(m))
GC.gc()
@info "REopt model solved with status $(results["status"])."
return HTTP.Response(200, JSON.json(results))
|
Update README.md
Correcting link for Enterprise Ethical Hacking | @@ -8,7 +8,7 @@ The following are the different video courses that will be part of the Art of Ha
* [Security Penetration Testing (The Art of Hacking Series)](https://www.safaribooksonline.com/library/view/security-penetration-testing/9780134833989)
* [Wireless Networks, IoT, and Mobile Devices Hacking (The Art of Hacking Series)](https://www.safaribooksonline.com/library/view/wireless-networks-iot/9780134854632/)
-* [Enterprise Penetration Testing and Continuous Monitoring (The Art of Hacking Series)](https://www.safaribooksonline.com/library/view/enterprise-penetration-testing/9780134854748)
+* [Enterprise Penetration Testing and Continuous Monitoring (The Art of Hacking Series)](https://www.safaribooksonline.com/videos/enterprise-penetration-testing/9780134854779)
* Hacking Web Applications (The Art of Hacking Series) LiveLessons - Coming Soon!
These courses serve as comprehensive guide for any network and security professional who is starting a career in ethical hacking and penetration testing. It also can help individuals preparing for the [Offensive Security Certified Professional (OSCP)](https://www.offensive-security.com/information-security-certifications/), the [Certified Ethical Hacker (CEH)](https://www.eccouncil.org/programs/certified-ethical-hacker-ceh/), [CompTIA PenTest+](https://certification.comptia.org/certifications/pentest) and any other ethical hacking certification. This course helps any cyber security professional that want to learn the skills required to becoming a professional ethical hacker or that want to learn more about general hacking methodologies and concepts.
|
qt5: Fixup CC/CXX environment variables usage in test recipe
The environment variables `CC`/`CXX` may contain spaces, e.g. if you use
a Yocto SDK for cross-compiling. Therefore, we should enclose the values
in quotes to prevent word splitting.
Use same syntax as in `recipes/qt/5.x.x/conanfile.py` to be consist. | @@ -43,17 +43,17 @@ class TestPackageConan(ConanFile):
os.environ[var] = val
return val
- value = _getenvpath('CC')
+ value = _getenvpath("CC")
if value:
- args += ['QMAKE_CC=' + value,
- 'QMAKE_LINK_C=' + value,
- 'QMAKE_LINK_C_SHLIB=' + value]
+ args += ['QMAKE_CC="' + value + '"',
+ 'QMAKE_LINK_C="' + value + '"',
+ 'QMAKE_LINK_C_SHLIB="' + value + '"']
value = _getenvpath('CXX')
if value:
- args += ['QMAKE_CXX=' + value,
- 'QMAKE_LINK=' + value,
- 'QMAKE_LINK_SHLIB=' + value]
+ args += ['QMAKE_CXX="' + value + '"',
+ 'QMAKE_LINK="' + value + '"',
+ 'QMAKE_LINK_SHLIB="' + value + '"']
self.run("qmake %s" % " ".join(args), run_environment=True)
if tools.os_info.is_windows:
|
CI/CD: Update base image `nvidia/cuda` from 11.1 to 11.1.1
update cuda_version 11.1 -> 11.1.1 | @@ -88,8 +88,8 @@ jobs:
# the config used in '.azure-pipelines/gpu-tests.yml'
- {python_version: "3.9", pytorch_version: "1.12", cuda_version: "11.3.1", ubuntu_version: "20.04"}
# latest (used in Tutorials)
- - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1", ubuntu_version: "20.04"}
- - {python_version: "3.9", pytorch_version: "1.10", cuda_version: "11.1", ubuntu_version: "20.04"}
+ - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1.1", ubuntu_version: "20.04"}
+ - {python_version: "3.9", pytorch_version: "1.10", cuda_version: "11.1.1", ubuntu_version: "20.04"}
- {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1", ubuntu_version: "20.04"}
steps:
- uses: actions/checkout@v2
@@ -126,8 +126,8 @@ jobs:
fail-fast: false
matrix:
include:
- - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1"}
- - {python_version: "3.8", pytorch_version: "1.10", cuda_version: "11.1"}
+ - {python_version: "3.8", pytorch_version: "1.9", cuda_version: "11.1.1"}
+ - {python_version: "3.8", pytorch_version: "1.10", cuda_version: "11.1.1"}
- {python_version: "3.9", pytorch_version: "1.11", cuda_version: "11.3.1"}
# nightly: add when there's a release candidate
# - {python_version: "3.9", pytorch_version: "1.12"}
|
Fix noc-1459
Fix reportobjectsserial
EXCEPTION: <class 'AttributeError'> 'NoneType' object has no attribute 'full_name' | @@ -92,15 +92,15 @@ class ReportFilterApplication(SimpleReport):
platform = Platform.get_by_id(mo["platform"]) if mo.get("platform") else None
vendor = Vendor.get_by_id(mo["vendor"]) if mo.get("vendor") else None
version = Firmware.get_by_id(mo["version"]) if mo.get("version") else None
- sn, hw = ra[mo["id"]][:2]
+ sn, hw = ra[mo["id"]][:2] if mo["id"] in ra else (None, None)
if mo["id"] in objects_serials:
sn = objects_serials[mo["id"]]
data += [
[
mo["name"],
mo["address"],
- vendor or None,
- platform.full_name,
+ vendor,
+ platform,
hw,
version,
sn,
|
Add the missing documentation for mariadb recovery
This change is a missing part for the new Kayobe functionality
introduced in | @@ -250,6 +250,27 @@ Further information on backing up and restoring the database is available in
the :kolla-ansible-doc:`Kolla Ansible documentation
<admin/mariadb-backup-and-restore.html>`.
+Performing Database Recovery
+============================
+
+Recover a completely stopped MariaDB cluster using the underlying support in
+Kolla Ansible.
+
+To perform recovery run the following command:
+
+.. code-block:: console
+
+ kayobe overcloud database recover
+
+Or to perform recovery on specified host, run the following command:
+
+.. code-block:: console
+
+ kayobe overcloud database recover --force-recovery-host <host>
+
+By default the underlying kolla-ansible will automatically determine which
+host to use, and this option should not be used.
+
Gathering Facts
===============
|
fix graphql log message
Summary: The GraphQL interface says this should not be null - so don't set it to None
Test Plan: existing snapshot tests
Reviewers: prha, max, schrockn | @@ -813,8 +813,9 @@ def construct_basic_params(graphene_info, event_record, execution_plan):
check.opt_inst_param(execution_plan, 'execution_plan', ExecutionPlan)
return {
'runId': event_record.run_id,
- 'message': event_record.user_message
- or (event_record.dagster_event.message if event_record.dagster_event else None),
+ 'message': event_record.dagster_event.message
+ if event_record.dagster_event
+ else event_record.user_message,
'timestamp': int(event_record.timestamp * 1000),
'level': DauphinLogLevel.from_level(event_record.level),
'step': create_dauphin_step(graphene_info, event_record, execution_plan)
|
update kramdown
security alert related to older kramdown version | @@ -68,7 +68,7 @@ GEM
jekyll-theme-time-machine (= 0.1.1)
jekyll-titles-from-headings (= 0.5.3)
jemoji (= 0.12.0)
- kramdown (= 2.3.0)
+ kramdown (= 2.3.1)
kramdown-parser-gfm (= 1.1.0)
liquid (= 4.0.3)
mercenary (~> 0.3)
|
Add flag to disable gossip compression - Self-Managed
Documentation for:
Updated:
Self-Managed Admin Guide > Configure Mattermost > Configuration Settings > High Availability > Enable Gossip Compression
- Removed Cloud-first disclaimer note | @@ -1020,10 +1020,6 @@ Enable Gossip Compression
**False**: All communication using the gossip protocol remains uncompressed. Once all servers in a cluster are upgraded to Mattermost v5.33 or later, we recommend that you disable this configuration setting for better performance.
-.. note::
-
- This configuration setting is available in Mattermost Cloud. It will be available in Mattermost Server v5.33 on March 16, 2021.
-
+--------------------------------------------------------------------------------------------------------------------------------+
| This feature's ``config.json`` setting is ``"EnableGossipCompression": true`` with options ``true`` and ``false``. |
+--------------------------------------------------------------------------------------------------------------------------------+
|
Remove unused method from metadata app
_get_ovsdb_connection_string is not used anywhere
in the project. | @@ -485,9 +485,6 @@ class DFMetadataProxyHandler(BaseMetadataProxyHandler):
self.conf = conf
self.nb_api = nb_api
- def _get_ovsdb_connection_string(self):
- return 'tcp:{}:6640'.format(cfg.CONF.df.management_ip)
-
def get_headers(self, req):
remote_addr = req.remote_addr
if not remote_addr:
|
Bug fix for stock item traking list
Query was returning ALL stock tracking objects!
Now filter by StockItem ID | sortable: true,
search: true,
method: 'get',
+ queryParams: function(p) {
+ return {
+ item: {{ item.pk }},
+ }
+ },
columns: [
{
field: 'date',
|
revent: Ignore name prefixes in add_listener
When add_listener is given just a function and it uses the function
name to infer the event type, it will now ignore things that look
like a prefix. In other words, if you have a function like:
def _handle_BLAH_SomeEvent (event): ...
.. it now ignores the BLAH. | @@ -398,7 +398,8 @@ class EventMixin (object):
if (not event_type) and not (event_name):
if not handler.func_name.startswith("_handle_"):
raise RuntimeError("Could not infer event type")
- event_name = handler.func_name[8:]
+ #event_name = handler.func_name[8:]
+ event_name = handler.func_name.rsplit('_', 1)[-1]
by_name = True if event_name else False
t = event_name if by_name else event_type
|
Update instructor for switching to team edition
Since the enterprise branch and team branch are merged into master, the instruction to switch edition has changed. | @@ -51,4 +51,4 @@ Production Docker Setup on Mac OS X
You can run a production deployment on Mac OS X by `installing Docker Compose using the online guide <http://docs.docker.com/installation/mac/>`_ then following the above instructions.
-**Other options:** To install a feature-equivalent version of Mattermost that does not upgrade to enterprise features using a license key, Mattermost Team Edition, repeat steps above excluding ``-b enterprise`` from ``git clone`` command.
+**Other options:** To install a feature-equivalent version of Mattermost that does not upgrade to enterprise features using a license key, Mattermost Team Edition, please see follow this `instruction. <https://github.com/mattermost/mattermost-docker#choose-edition-to-install>`__
|
Cleanup unused components in error-page.
Remove display conditionals from template. | <h1>{{ $tr('selectLearners', { className }) }}</h1>
<p>{{ $tr('showingAllUnassigned') }}</p>
- <p v-if="facilityUsers.length === 0">{{ $tr('noUsersExist') }}</p>
-
- <p v-else-if="usersNotInClass.length === 0">{{ $tr('allUsersAlready') }}</p>
-
- <div v-else>
<div class="actions-header">
<!-- TODO align right -->
/>
</div>
-
<user-table
v-model="selectedUsers"
:users="visibleFilteredUsers"
/>
</nav>
</div>
- </div>
-
- <user-create-modal v-if="showCreateUserModal" />
<!-- TODO align right -->
<k-button
import responsiveWindow from 'kolibri.coreVue.mixins.responsiveWindow';
import orderBy from 'lodash/orderBy';
import kButton from 'kolibri.coreVue.components.kButton';
- import kRouterLink from 'kolibri.coreVue.components.kRouterLink';
import kCheckbox from 'kolibri.coreVue.components.kCheckbox';
import uiIconButton from 'keen-ui/src/UiIconButton';
- import uiIcon from 'keen-ui/src/UiIcon';
import kFilterTextbox from 'kolibri.coreVue.components.kFilterTextbox';
- import userCreateModal from '../user-page/user-create-modal';
import confirmEnrollmentModal from './confirm-enrollment-modal';
- import userRole from '../user-role';
export default {
name: 'managementClassEnroll',
components: {
kButton,
- kRouterLink,
kCheckbox,
uiIconButton,
- uiIcon,
kFilterTextbox,
kGrid,
kGridItem,
- userCreateModal,
confirmEnrollmentModal,
- userRole,
userTable,
},
mixins: [responsiveWindow],
);
},
emptyMessage() {
+ if (this.usersNotInClass.length === 0) {
+ return this.$tr('allUsersAlready');
+ }
+ if (this.facilityUsers.length === 0) {
+ return this.$tr('noUsersExist');
+ }
if (this.filteredUsers.length === 0 && this.filterInput !== '') {
// TODO internationalize this
return `${this.$tr('noUsersMatch')}: '${this.filterInput}'`;
}
- return this.$tr('noUsersExist');
+
+ return '';
},
},
watch: {
|
Show that the enum values are unimportant
Show that the numerical enum values are unimportant by assigning them
with enum.auto(). | @@ -29,24 +29,22 @@ Base = declarative_base(metadata=meta)
class PhoneStatus(enum.Enum):
- # unverified
- unverified = 1
- # verified
- verified = 2
+ unverified = enum.auto()
+ verified = enum.auto()
class HostingStatus(enum.Enum):
- can_host = 1
- maybe = 2
- difficult = 3
- cant_host = 4
+ can_host = enum.auto()
+ maybe = enum.auto()
+ difficult = enum.auto()
+ cant_host = enum.auto()
class SmokingLocation(enum.Enum):
- yes = 1
- window = 2
- outside = 3
- no = 4
+ yes = enum.auto()
+ window = enum.auto()
+ outside = enum.auto()
+ no = enum.auto()
class User(Base):
@@ -218,10 +216,10 @@ class User(Base):
class FriendStatus(enum.Enum):
- pending = 1
- accepted = 2
- rejected = 3
- cancelled = 4
+ pending = enum.auto()
+ accepted = enum.auto()
+ rejected = enum.auto()
+ cancelled = enum.auto()
class FriendRelationship(Base):
@@ -374,9 +372,9 @@ class UserSession(Base):
class ReferenceType(enum.Enum):
- FRIEND = 1
- SURFED = 2 # The "from" user have surfed at the "to" user
- HOSTED = 3 # The "from" user have hosted the "to" user
+ FRIEND = enum.auto()
+ SURFED = enum.auto() # The "from" user have surfed at the "to" user
+ HOSTED = enum.auto() # The "from" user have hosted the "to" user
class Reference(Base):
@@ -442,8 +440,8 @@ class GroupChat(Base):
class GroupChatRole(enum.Enum):
- admin = 1
- participant = 2
+ admin = enum.auto()
+ participant = enum.auto()
class GroupChatSubscription(Base):
@@ -488,26 +486,26 @@ class GroupChatSubscription(Base):
class MessageType(enum.Enum):
- text = 0
+ text = enum.auto()
# e.g.
# image =
# emoji =
# ...
- chat_created = 1
- chat_edited = 2
- user_invited = 3
- user_left = 4
- user_made_admin = 5
- user_removed_admin = 6
- host_request_status_changed = 7
+ chat_created = enum.auto()
+ chat_edited = enum.auto()
+ user_invited = enum.auto()
+ user_left = enum.auto()
+ user_made_admin = enum.auto()
+ user_removed_admin = enum.auto()
+ host_request_status_changed = enum.auto()
class HostRequestStatus(enum.Enum):
- pending = 0
- accepted = 1
- rejected = 2
- confirmed = 3
- cancelled = 4
+ pending = enum.auto()
+ accepted = enum.auto()
+ rejected = enum.auto()
+ confirmed = enum.auto()
+ cancelled = enum.auto()
class Message(Base):
@@ -763,8 +761,8 @@ class NodeClusterAssociation(Base):
class ClusterRole(enum.Enum):
- member = 1
- admin = 2
+ member = enum.auto()
+ admin = enum.auto()
class ClusterSubscription(Base):
@@ -809,9 +807,9 @@ class ClusterPageAssociation(Base):
class PageType(enum.Enum):
- main_page = 1
- place = 2
- guide = 3
+ main_page = enum.auto()
+ place = enum.auto()
+ guide = enum.auto()
class Page(Base):
|
remove matrixGroup surfaces
it's better to handle each element surfaces seperatly, because not all elements have surfaces which creates empty gaps when drawing. | @@ -35,19 +35,6 @@ class MatrixGroup(Matrix):
self._lastRayToBeTraced = None
self._lastRayTrace = None
- @property
- def surfaces(self):
- """ A list of interfaces that represents the element for drawing purposes
-
- We combine all interfaces into a single list of interfaces
- """
-
- allSurfaces = []
- for element in self.elements:
- allSurfaces.extend(element.surfaces)
-
- return allSurfaces
-
def append(self, matrix):
r"""This function adds an element at the end of the path.
|
Update integrate_new.py
return removed family | @@ -81,7 +81,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"assembly",
"fbx",
"textures",
- "action"
+ "action",
+ "harmony.template"
]
exclude_families = ["clip"]
db_representation_context_keys = [
|
travis-ci: fix pip install typos and run lint under py2.7 only from lint branch
When more of the pylint failures are fixed it might be run by default. | @@ -31,23 +31,29 @@ after_success:
- codecov
stages:
- - test
+ - name: test
+ if: NOT branch = lint
- lint
+ if: branch = lint
- name: deploy
if: branch = master
if: type IN (push, api)
if: tag IS present
jobs:
+ fast_finish: true
+ allow_failures:
+ - stage: lint
+
include:
# do various lint scans
- stage: lint
- python: 3.6
+ python: 2.7
before_script:
- git clone https://github.com/pkgcore/pkgdist.git
- cp -av pkgdist/requirements/* requirements/
- - pip install requirements/lint.txt
- script: ./pkgdist/pylint src
+ - pip install -r requirements/lint.txt
+ script: ./pkgdist/pylint src/pkgcore
# push tarball and wheels to pypi
- stage: deploy
@@ -72,7 +78,7 @@ jobs:
before_script:
- git clone https://github.com/pkgcore/pkgdist.git
- cp -av pkgdist/requirements/* requirements/
- - pip install requirements/docs.txt
+ - pip install -r requirements/docs.txt
script: python setup.py sdist
deploy:
provider: releases
|
Warn on conditions that can trigger cuBLAS sgemm bug
Summary:
The sgemm in cuBLAS 9.0 has some issues with sizes above 2M on Maxwell and Pascal architectures. Warn in this case.
Pull Request resolved: | #include <ATen/cuda/CUDABlas.h>
#include <algorithm>
+#include <mutex>
float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy)
{
@@ -189,6 +190,26 @@ void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, i
}
+// Check https://github.com/pytorch/pytorch/issues/22078
+// for information about the bug. We don't know the exact conditions that trigger it,
+// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
+// necessary condition.
+static void checkCuda90Bug(int i_m, int i_n, int i_k)
+{
+#if CUDA_VERSION < 9200 && CUDA_VERSION >= 9000
+ static std::once_flag alreadyWarned;
+ const int LIMIT = 1 << 21;
+ if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
+ cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
+ if (prop->major == 5 || prop->major == 6) {
+ std::call_once(alreadyWarned, []() {
+ TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
+ });
+ }
+ }
+#endif
+}
+
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
|
Change the name of kubernetes-dashboard deployment.
Related-Bug: | @@ -99,7 +99,7 @@ do
done
#echo check for existence of kubernetes-dashboard deployment
-/usr/bin/kubectl get deployment kube-dashboard --namespace=kube-system
+/usr/bin/kubectl get deployment kubernetes-dashboard --namespace=kube-system
if [ "\$?" != "0" ]; then
/usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-dash-deploy.yaml --namespace=kube-system
|
Description improvement for Google Cloud installs.
Google Cloud allows port 587 for smtp.gmail.com and 2525 for other services such as SparkPost and Mailgun. Took a long time to figure this out. | "collapsible": 0,
"columns": 0,
"depends_on": "eval:!doc.domain && doc.enable_outgoing",
- "description": "If non standard port (e.g. 587)",
+ "description": "If non standard port (e.g. 587). If on Google Cloud, try port 2525.",
"fieldname": "smtp_port",
"fieldtype": "Data",
"hidden": 0,
|
fix: Start week from Sunday instead of Monday
Like we do in client-side | @@ -246,7 +246,7 @@ def get_quarter_start(dt, as_str=False):
def get_first_day_of_week(dt, as_str=False):
dt = getdate(dt)
- date = dt - datetime.timedelta(days=dt.weekday())
+ date = dt - datetime.timedelta(days=(dt.weekday() + 1) % 7)
return date.strftime(DATE_FORMAT) if as_str else date
def get_year_start(dt, as_str=False):
|
Typos and dataset properties.
We are moving forward in the investigation of the Federal Senate dataset. Soon we will send more news about it. | import pandas as pd
-data = pd.read_csv('../data/2017-05-02-senado_2017.csv',sep=';',encoding = "ISO-8859-1", skiprows=1)
+data = pd.read_csv('../data/senate_2017.csv',sep=';',encoding = "ISO-8859-1", skiprows=1)
data.columns = map(str.lower, data.columns)
data.shape
@@ -36,12 +36,12 @@ data.rename(columns={
'fornecedor': 'supplier',
'documento': 'document_id',
'data': 'date',
- 'detalhamento': 'expense_detais',
+ 'detalhamento': 'expense_details',
'valor_reembolsado': 'reimbursement_value',
}, inplace=True)
-# # Expense types translation
+# ## Expense types translation
# In[5]:
@@ -72,7 +72,11 @@ data.head()
data.iloc[0]
-# In[ ]:
-
-
-
+# ## Dataset properties
+# The Federal Senate datasets are divided by years, we have data from the year `2008 - 2013`. It had experienced a few changes through time. So I'll be telling this dataset properties below:
+#
+# * Until 2013 there wasn't a expense details field, but the other older dataset already have this field, but empty.
+# * Until 2010 there wasn't the `National air, water and land transport` and `Private Security Services` categories of expense type, so when we start translating all the data we need to check if the dataset has those categories.
+# * Studying the datasets to what we are doing by now, we can start using the `cnpj_cpf` classifier from the begining, since the data is pretty good to use.
+#
+# This is a `work in progress` we are aiming to be adding it soon to our project.
|
solve ambiguous variable name 'l'
this fix is far from ideal, it just fixes the flake8 style error
for now. Solving in a more proper way would require further
refactoring of this code. Preferably also reducing number of nested
blocks. | @@ -91,10 +91,10 @@ def dia2django(archivo):
myname = "\nclass %s(models.Model) :\n" % actclas
clases[actclas] = [[], myid, myname, 0]
if j.getAttribute("name") == "attributes":
- for l in j.getElementsByTagName("dia:composite"):
- if l.getAttribute("type") == "umlattribute":
+ for ll in j.getElementsByTagName("dia:composite"):
+ if ll.getAttribute("type") == "umlattribute":
# Look for the attribute name and type
- for k in l.getElementsByTagName("dia:attribute"):
+ for k in ll.getElementsByTagName("dia:attribute"):
if k.getAttribute("name") == "name":
nc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "type":
|
[doc] a better doc string for site.code and site.lang properties
give a better explanation for the differences | @@ -811,7 +811,7 @@ class BaseSite(ComparableMixin):
@property
def code(self):
"""
- The identifying code for this Site.
+ The identifying code for this Site equal to the wiki prefix.
By convention, this is usually an ISO language code, but it does
not have to be.
@@ -822,7 +822,7 @@ class BaseSite(ComparableMixin):
def lang(self):
"""The ISO language code for this Site.
- Presumed to be equal to the wiki prefix, but this can be overridden.
+ Presumed to be equal to the site code, but this can be overridden.
"""
return self.__code
|
Fixing the incremental build to correctly normalize.
Also switched to use the `match_dependents`.
Fixes | @@ -96,6 +96,13 @@ class Pod(object):
raise ValueError('.. not allowed in file paths.')
return os.path.join(self.root, pod_path.lstrip('/'))
+ def _normalize_pod_path(self, pod_path):
+ if '..' in pod_path:
+ raise ValueError('.. not allowed in pod paths.')
+ if not pod_path.startswith('/'):
+ pod_path = '/{}'.format(pod_path)
+ return pod_path
+
def _parse_cache_yaml(self):
podcache_file_name = '/{}'.format(self.FILE_PODCACHE)
if not self.file_exists(podcache_file_name):
@@ -226,8 +233,8 @@ class Pod(object):
# the docs that are dependent based on the dependecy graph.
def _gen_docs(pod_paths):
for pod_path in pod_paths:
- for dep_path in self.podcache.dependency_graph.get_dependents(
- self._normalize_path(pod_path)):
+ for dep_path in self.podcache.dependency_graph.match_dependents(
+ self._normalize_pod_path(pod_path)):
yield self.get_doc(dep_path)
routes = grow_routes.Routes.from_docs(self, _gen_docs(pod_paths))
else:
|
set_user_status: Fix the alignment of the selected emoji widget.
This was previously hackily centered; we now center it properly. | .status_emoji_wrapper {
height: 20px;
width: 22px;
- padding: 4px 8px 4px 8px;
+ padding: 6px 8px 2px 8px;
border-right: 1px solid;
border-color: inherit;
cursor: pointer;
.selected_emoji {
width: 18px;
height: 18px;
- top: 4px;
+ top: 50%;
+ transform: translateY(-50%);
cursor: pointer;
}
.smiley_icon {
|
Fix a variable shadowing bug
TODO: Update unit tests to catch this; current graph in testdata/sync_pipeline.pbtxt is too simple (with only 3 components). | @@ -209,14 +209,13 @@ class SyncPipelineTaskGenerator(task_gen.TaskGenerator):
execution.id),
pipeline=self._pipeline)
- def _upstream_nodes_executed(self, node: pipeline_pb2.PipelineNode) -> bool:
+ def _upstream_nodes_executed(self,
+ cur_node: pipeline_pb2.PipelineNode) -> bool:
"""Returns `True` if all the upstream nodes have been successfully executed."""
upstream_nodes = [
node for node_id, node in self._node_map.items()
- if node_id in set(node.upstream_nodes)
+ if node_id in set(cur_node.upstream_nodes)
]
- if not upstream_nodes:
- return True
for node in upstream_nodes:
if self._service_job_manager.is_pure_service_node(self._pipeline_state,
node.node_info.id):
|
Mark configuration file as required for both image and cluster creation
The image builder process was using the default cli configuration path
when not specified. | @@ -39,7 +39,9 @@ class CliCommand(ABC):
if region_arg:
parser.add_argument("-r", "--region", help="AWS Region to use.", choices=SUPPORTED_REGIONS)
if config_arg:
- parser.add_argument("-c", "--config", dest="config_file", help="Defines an alternative config file.")
+ parser.add_argument(
+ "-c", "--config", dest="config_file", help="Defines an alternative config file.", required=True
+ )
if nowait_arg:
parser.add_argument(
"-nw",
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.