code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def get(node_key:str, property_name:str, default=None): <NEW_LINE> <INDENT> node_names = split_node_key(node_key) <NEW_LINE> node = root <NEW_LINE> try: <NEW_LINE> <INDENT> property = node.properties[property_name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> property = default <NEW_LINE> <DEDENT> for node_name in node_names: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> node = node.nodes[node_name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> property = node.properties[property_name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> return property
Lookup value of a property in the hierarchy. Args: node_key: dotted name, typically a module's __name__ attribute. property_name: the global variable's name e.g. 'lut' for pyqtgraph colormap lookup table default. default: default value to return if property not found Returns: property value
625941bade87d2750b85fc2a
def cleanup(self): <NEW_LINE> <INDENT> LOGGER.info('Cleaning up after observation %s!' % self.archive_name) <NEW_LINE> self.arch_files_remote = None <NEW_LINE> self.arch_files_local = [] <NEW_LINE> self.arch_urls = None <NEW_LINE> self.ar_files = None <NEW_LINE> self.psr_archive = None <NEW_LINE> rmtree(self.local_archive_path)
short fix, use multiprocessing
625941ba32920d7e50b28069
def freq_handler(freqdict_entry,site,ads): <NEW_LINE> <INDENT> perfect_matches = [] <NEW_LINE> partial_matches = [] <NEW_LINE> if self.frequency_surface_names is None: <NEW_LINE> <INDENT> self.frequency_surface_names = [] <NEW_LINE> <DEDENT> for entry in freqdict_entry: <NEW_LINE> <INDENT> masked = [entry[0] in self.frequency_surface_names, entry[1] in self.species_definitions.get(site,{'site_names':[]})['site_names'], entry[2]] <NEW_LINE> if not self.frequency_surface_names: <NEW_LINE> <INDENT> if site in self._gas_sites and entry[0] == 'None': <NEW_LINE> <INDENT> masked[0] = True <NEW_LINE> <DEDENT> elif site not in self._gas_sites: <NEW_LINE> <INDENT> masked[0] = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if site in self._gas_sites and entry[0] == 'None': <NEW_LINE> <INDENT> masked[0] = True <NEW_LINE> <DEDENT> <DEDENT> if all(masked): <NEW_LINE> <INDENT> perfect_matches.append(masked[-1]) <NEW_LINE> <DEDENT> elif masked[0] and site not in self._gas_sites: <NEW_LINE> <INDENT> if entry[1] != 'gas': <NEW_LINE> <INDENT> partial_matches.append(masked[-1]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def match_handler(perfect_matches): <NEW_LINE> <INDENT> if len(perfect_matches) == 1: <NEW_LINE> <INDENT> return perfect_matches[0] <NEW_LINE> <DEDENT> elif len(perfect_matches) > 1: <NEW_LINE> <INDENT> if len(set([len(pm) for pm in perfect_matches]))>1: <NEW_LINE> <INDENT> raise ValueError('Frequency vectors have different '+ 'lengths for '+ str(ads)) <NEW_LINE> <DEDENT> matcharray = np.array(perfect_matches) <NEW_LINE> freqout = matcharray.mean(0) <NEW_LINE> return list(freqout) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> <DEDENT> if len(perfect_matches) > 0: <NEW_LINE> <INDENT> return match_handler(perfect_matches) <NEW_LINE> <DEDENT> elif self.estimate_frequencies: <NEW_LINE> <INDENT> return match_handler(partial_matches) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return []
Returns a single list of frequencies from a freqdict_entry, which is a list of all frequency data for a given species. Entries matching both site and surface (if specified in self.frequency_surface_names) are preferred over those that only match surface. If more than match of the highest validity is found, the mean of those frequencies is returned.
625941bae8904600ed9f1dc5
def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors='k,', colorbar=False, res=256, size=1, show=True): <NEW_LINE> <INDENT> import matplotlib.pyplot as plt <NEW_LINE> if layout is None: <NEW_LINE> <INDENT> from .layouts import read_layout <NEW_LINE> layout = read_layout('Vectorview-all') <NEW_LINE> <DEDENT> if not isinstance(layout, list): <NEW_LINE> <INDENT> layout = [layout] <NEW_LINE> <DEDENT> n_projs = len(projs) <NEW_LINE> nrows = math.floor(math.sqrt(n_projs)) <NEW_LINE> ncols = math.ceil(n_projs / nrows) <NEW_LINE> fig = plt.gcf() <NEW_LINE> fig.clear() <NEW_LINE> for k, proj in enumerate(projs): <NEW_LINE> <INDENT> ch_names = _clean_names(proj['data']['col_names']) <NEW_LINE> data = proj['data']['data'].ravel() <NEW_LINE> idx = [] <NEW_LINE> for l in layout: <NEW_LINE> <INDENT> is_vv = l.kind.startswith('Vectorview') <NEW_LINE> if is_vv: <NEW_LINE> <INDENT> from .layouts.layout import _pair_grad_sensors_from_ch_names <NEW_LINE> grad_pairs = _pair_grad_sensors_from_ch_names(ch_names) <NEW_LINE> if grad_pairs: <NEW_LINE> <INDENT> ch_names = [ch_names[i] for i in grad_pairs] <NEW_LINE> <DEDENT> <DEDENT> idx = [l.names.index(c) for c in ch_names if c in l.names] <NEW_LINE> if len(idx) == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> pos = l.pos[idx] <NEW_LINE> if is_vv and grad_pairs: <NEW_LINE> <INDENT> from .layouts.layout import _merge_grad_data <NEW_LINE> shape = (len(idx) / 2, 2, -1) <NEW_LINE> pos = pos.reshape(shape).mean(axis=1) <NEW_LINE> data = _merge_grad_data(data[grad_pairs]).ravel() <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> ax = plt.subplot(nrows, ncols, k + 1) <NEW_LINE> ax.set_title(proj['desc']) <NEW_LINE> if len(idx): <NEW_LINE> <INDENT> plot_topomap(data, pos, vmax=None, cmap=cmap, sensors=sensors, res=res) <NEW_LINE> if colorbar: <NEW_LINE> <INDENT> plt.colorbar() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise RuntimeError('Cannot find a proper layout for projection %s' % proj['desc']) <NEW_LINE> <DEDENT> <DEDENT> fig = ax.get_figure() <NEW_LINE> if show and plt.get_backend() != 'agg': <NEW_LINE> <INDENT> fig.show() <NEW_LINE> <DEDENT> return fig
Plot topographic maps of SSP projections Parameters ---------- projs : list of Projection The projections layout : None | Layout | list of Layout Layout instance specifying sensor positions (does not need to be specified for Neuromag data). Or a list of Layout if projections are from different sensor types. cmap : matplotlib colormap Colormap. sensors : bool | str Add markers for sensor locations to the plot. Accepts matplotlib plot format string (e.g., 'r+' for red plusses). colorbar : bool Plot a colorbar. res : int The resolution of the topomap image (n pixels along each side). size : scalar Side length of the topomaps in inches (only applies when plotting multiple topomaps at a time). show : bool Show figures if True Returns ------- fig : instance of matplotlib figure Figure distributing one image per channel across sensor topography.
625941ba6fece00bbac2d5d8
def get_schema_validation_errors(action_data, schema, allow_unknown_fields=False): <NEW_LINE> <INDENT> errors = [] <NEW_LINE> for name, field in getFieldsInOrder(schema): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> value = action_data[name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> if not field.required: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> errors.append((name, RequiredMissing(name))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> field.bind(action_data).validate(value) <NEW_LINE> <DEDENT> except ValidationError as e: <NEW_LINE> <INDENT> errors.append((name, e)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if not allow_unknown_fields: <NEW_LINE> <INDENT> errors.extend(get_unknown_fields(action_data, schema)) <NEW_LINE> <DEDENT> return errors
Validate a dict against a schema. Return a list of basic schema validation errors (required fields, constraints, but doesn't check invariants yet). Loosely based on zope.schema.getSchemaValidationErrors, but: - Processes fields in schema order - Handles dict subscription access instead of object attribute access - Respects required / optional fields - Raises RequiredMissing instead of SchemaNotFullyImplemented
625941baa79ad161976cbfe1
def defSix(my, ny): <NEW_LINE> <INDENT> if type(my) == tuple or type(ny) == tuple: <NEW_LINE> <INDENT> if type(my) == tuple: <NEW_LINE> <INDENT> my = list(my) <NEW_LINE> <DEDENT> if type(ny) == tuple: <NEW_LINE> <INDENT> ny = list(ny) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if type(my) != frozenset: <NEW_LINE> <INDENT> my = {my} <NEW_LINE> <DEDENT> if type(ny) != frozenset: <NEW_LINE> <INDENT> ny = {ny} <NEW_LINE> <DEDENT> <DEDENT> total = list(my) + list(ny) <NEW_LINE> doubleNeg = False <NEW_LINE> for i in total: <NEW_LINE> <INDENT> for j in total: <NEW_LINE> <INDENT> if (i.getName() == j.getName() and i.getNeg() != j.getNeg()): <NEW_LINE> <INDENT> doubleNeg = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if(list(my)[0].getName() == 'ff' or list(ny)[0].getName() == 'ff'): <NEW_LINE> <INDENT> return lFormula('ff') <NEW_LINE> <DEDENT> elif(doubleNeg is True): <NEW_LINE> <INDENT> return lFormula('ff') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> solution = set() <NEW_LINE> for x in list(my): <NEW_LINE> <INDENT> solution.add(x) <NEW_LINE> <DEDENT> for x in list(ny): <NEW_LINE> <INDENT> solution.add(x) <NEW_LINE> <DEDENT> return frozenset(solution)
Check wheter calculation of linear factor is allowed. if there are contradictory parts or a part is ff then reject and return false(ff) Input: two ltl formulas Output: Union of both or false.
625941ba63f4b57ef0000fbe
def _file_archive_hash_paths(self, named_paths=None): <NEW_LINE> <INDENT> if named_paths is None: <NEW_LINE> <INDENT> named_paths = sorted( self._working_dir_mgr.name_to_path('archive').items()) <NEW_LINE> <DEDENT> for name, path in named_paths: <NEW_LINE> <INDENT> if not name: <NEW_LINE> <INDENT> name = self._working_dir_mgr.name('archive', path) <NEW_LINE> <DEDENT> archive_file_name = self._working_dir_mgr.name( 'archive_file', path) <NEW_LINE> uri = self._dest_in_wd_mirror(path, archive_file_name) or path <NEW_LINE> yield uri
Helper function for the *upload_args methods. The names of archives to pass to the ``--files`` switch of ``spark-submit``, since we can't use ``--archives``. The names in *named_paths* should be of the archive destination (the 'archive' type in WorkingDirManager) not of the filename we're going to copy the archive to before unpacking it into its destination (the 'archive_file' type).
625941bae64d504609d746dd
def make_payment(self, id, amount): <NEW_LINE> <INDENT> if id in self.accounts: <NEW_LINE> <INDENT> return self.accounts[id].make_payment(amount, self.current_date) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Unknown account id') <NEW_LINE> return 0
Make payment on specified account
625941ba1f5feb6acb0c49f1
def format(self, *args, **kwargs): <NEW_LINE> <INDENT> name = self.formatter_name <NEW_LINE> cache = getattr(self, '_formatter_cache', None) <NEW_LINE> if not cache or name != cache[0]: <NEW_LINE> <INDENT> formatter = formatters.registry.get(name)(self) <NEW_LINE> self._formatter_cache = (name, formatter) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> formatter = cache[1] <NEW_LINE> <DEDENT> return formatter(*args, **kwargs)
Convenience method for formatting data relative to this concept's associated formatter. To prevent redundant initializations (say, in a tight loop) the formatter instance is cached until the formatter name changes.
625941ba4428ac0f6e5ba68e
def to_pair_of_standard_tableaux(self): <NEW_LINE> <INDENT> from sage.combinat.tableau import Tableau <NEW_LINE> n = self.semilength() <NEW_LINE> if n == 0: <NEW_LINE> <INDENT> return (Tableau([]), Tableau([])) <NEW_LINE> <DEDENT> elif self.height() == n: <NEW_LINE> <INDENT> T = Tableau([list(range(1, n + 1))]) <NEW_LINE> return (T, T) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> left = [[], []] <NEW_LINE> right = [[], []] <NEW_LINE> for pos in range(n): <NEW_LINE> <INDENT> if self[pos] == open_symbol: <NEW_LINE> <INDENT> left[0].append(pos + 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> left[1].append(pos + 1) <NEW_LINE> <DEDENT> if self[-pos-1] == close_symbol: <NEW_LINE> <INDENT> right[0].append(pos+1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> right[1].append(pos+1) <NEW_LINE> <DEDENT> <DEDENT> return (Tableau(left), Tableau(right))
Convert ``self`` to a pair of standard tableaux of the same shape and of length less than or equal to two. EXAMPLES:: sage: DyckWord([1,0,1,0]).to_pair_of_standard_tableaux() ([[1], [2]], [[1], [2]]) sage: DyckWord([1,1,0,0]).to_pair_of_standard_tableaux() ([[1, 2]], [[1, 2]]) sage: DyckWord([1,1,0,1,0,0,1,1,0,1,0,1,0,0]).to_pair_of_standard_tableaux() ([[1, 2, 4, 7], [3, 5, 6]], [[1, 2, 4, 6], [3, 5, 7]])
625941ba7c178a314d6ef2f6
def interval_cardinality(self): <NEW_LINE> <INDENT> return len(list(self.lower_contained_intervals()))
Return the cardinality of the interval, i.e., the number of elements (binary trees or Dyck words) in the interval represented by ``self``. Not to be confused with :meth:`size` which is the number of vertices. EXAMPLES:: sage: TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)]).interval_cardinality() 4 sage: TamariIntervalPoset(4,[]).interval_cardinality() 14 sage: TamariIntervalPoset(4,[(1,2),(2,3),(3,4)]).interval_cardinality() 1
625941ba287bf620b61d390a
def run_Vagrant(node, command, input): <NEW_LINE> <INDENT> return run_Command( "vagrant ssh %s -c \"%s\"" % (node, command,), input, )
Run a command using 'vagrant ssh <node> -c <command> :param node: The name of the vagrant node to run the command :type node: ``list`` of ``bytes``. :param command: The command to run via vagrant ssh on the node :type command: ``list`` of ``bytes``. :return: stdout as ``bytes``
625941ba4f88993c3716bf10
def __iter__(self): <NEW_LINE> <INDENT> raise NotImplementedError()
Abstract iterator must be implemented in a subclass.
625941ba6e29344779a624b1
@view_config(context=APIResource, name='fetch', renderer='json', permission=NO_PERMISSION_REQUIRED) <NEW_LINE> @argify(wheel=bool, prerelease=bool) <NEW_LINE> def fetch_requirements(request, requirements, wheel=True, prerelease=False): <NEW_LINE> <INDENT> if not request.access.can_update_cache(): <NEW_LINE> <INDENT> return HTTPForbidden() <NEW_LINE> <DEDENT> locator = BetterScrapingLocator(request.registry.fallback_url, wheel=wheel) <NEW_LINE> packages = [] <NEW_LINE> for line in requirements.splitlines(): <NEW_LINE> <INDENT> dist = locator.locate(line, prerelease) <NEW_LINE> if dist is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> packages.append(fetch_dist(request, dist)) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return { 'pkgs': packages, }
Fetch packages from the fallback_url Parameters ---------- requirements : str Requirements in the requirements.txt format (with newlines) wheel : bool, optional If True, will prefer wheels (default True) prerelease : bool, optional If True, will allow prerelease versions (default False) Returns ------- pkgs : list List of Package objects
625941ba7d847024c06be15d
def _priority(self, key): <NEW_LINE> <INDENT> return self.attribute_definition[key].priority
get priority of attribute key
625941bae5267d203edcdb3d
def get_driver_status(self): <NEW_LINE> <INDENT> self.check_validity() <NEW_LINE> return GetDriverStatus(*self.ipcon.send_request(self, BrickSilentStepper.FUNCTION_GET_DRIVER_STATUS, (), '', 16, 'B B B ! B ! B B'))
Returns the current driver status. * Open Load: Indicates if an open load is present on phase A, B or both. This could mean that there is a problem with the wiring of the motor. False detection can occur in fast motion as well as during stand still. * Short To Ground: Indicates if a short to ground is present on phase A, B or both. If this is detected the driver automatically becomes disabled and stays disabled until it is enabled again manually. * Over Temperature: The over temperature indicator switches to "Warning" if the driver IC warms up. The warning flag is expected during long duration stepper uses. If the temperature limit is reached the indicator switches to "Limit". In this case the driver becomes disabled until it cools down again. * Motor Stalled: Is true if a motor stall was detected. * Actual Motor Current: Indicates the actual current control scaling as used in Coolstep mode. It represents a multiplier of 1/32 to 32/32 of the ``Motor Run Current`` as set by :func:`Set Basic Configuration`. Example: If a ``Motor Run Current`` of 1000mA was set and the returned value is 15, the ``Actual Motor Current`` is 16/32*1000mA = 500mA. * Stallguard Result: Indicates the load of the motor. A lower value signals a higher load. Per trial and error you can find out which value corresponds to a suitable torque for the velocity used in your application. After that you can use this threshold value to find out if a motor stall becomes probable and react on it (e.g. decrease velocity). During stand still this value can not be used for stall detection, it shows the chopper on-time for motor coil A. * Stealth Voltage Amplitude: Shows the actual PWM scaling. In Stealth mode it can be used to detect motor load and stall if autoscale is enabled (see :func:`Set Stealth Configuration`).
625941ba31939e2706e4cd0c
def host_exists(host, port): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> socket.getaddrinfo(host, port) <NEW_LINE> <DEDENT> except socket.gaierror: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
Determine whether or not the given host exists. Return true if hosts exists, false otherwise.
625941ba56ac1b37e6264072
def __init__(self, method_type, route, template, dictionary): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.method_type = method_type <NEW_LINE> self.route_url = route <NEW_LINE> self.template = template <NEW_LINE> self.dictionary = dictionary <NEW_LINE> self._compiled_url = self.compile_route_to_regex()
Class used for view routes. This class should be returned when a view is called on an HTTP route. This is useful when returning a view that doesn't need any special logic and only needs a dictionary. Arguments: method_type {string} -- The method type (GET, POST, PUT etc) route {string} -- The current route (/test/url) template {string} -- The template to use (dashboard/user) dictionary {dict} -- The dictionary to use to render the template.
625941ba004d5f362079a1d3
def __init__(self, obj, parent=None): <NEW_LINE> <INDENT> print ("######## STEREO '%s' INITIALIZING ########" % obj.name) <NEW_LINE> super(self.__class__,self).__init__(obj, parent) <NEW_LINE> self.num_cameras = 0 <NEW_LINE> self.camera_list = [] <NEW_LINE> for child in obj.children: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> child['Component_Tag'] <NEW_LINE> <DEDENT> except KeyError as detail: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> camera_name = child.name <NEW_LINE> self.camera_list.append(camera_name) <NEW_LINE> self.num_cameras += 1 <NEW_LINE> <DEDENT> print ("PTU has %d cameras" % self.num_cameras) <NEW_LINE> print ('######## STEREO INITIALIZED ########')
Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent.
625941ba99fddb7c1c9de22f
def remove_chat_context(self, ctx): <NEW_LINE> <INDENT> self.manager.remove_chat_context(ctx)
Remove a chat context
625941ba15fb5d323cde09a7
def _X_ik_star(self, i: v.Species, k: v.Species): <NEW_LINE> <INDENT> X_ik = self._n_ik_star(i, k) / sum(self._n_ik_star(a, x) for a, x in self._pairs) <NEW_LINE> return X_ik
Return the endmember fraction, X^*_i/k, for a the pair i/k. Follows Lambotte JCT 2011 Eq. A.5, but this is simply Poschmann Eq. 6 with n^*_ik instead of n_ik. This term is only used in the pair part of the entropy and as a sub-expression of F_i (also part of the pair part of the entropy).
625941ba8c3a87329515825b
def test_set(filler_set, target_set, switch_set, index): <NEW_LINE> <INDENT> display_set = [] <NEW_LINE> for word in filler_set[index]: <NEW_LINE> <INDENT> display_set.append(word) <NEW_LINE> <DEDENT> display_set.append(target_set[index]) <NEW_LINE> display_set.append(switch_set[index]) <NEW_LINE> random.shuffle(display_set) <NEW_LINE> clrscr() <NEW_LINE> for word in display_set: print(word, end=" ") <NEW_LINE> print("\n") <NEW_LINE> time.sleep(NSECONDS) <NEW_LINE> for word in display_set: <NEW_LINE> <INDENT> if word == target_set[index]: <NEW_LINE> <INDENT> display_set.remove(word) <NEW_LINE> <DEDENT> <DEDENT> if index == (NSETS - 1): <NEW_LINE> <INDENT> display_set.append(switch_set[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> display_set.append(switch_set[index + 1]) <NEW_LINE> <DEDENT> random.shuffle(display_set) <NEW_LINE> clrscr() <NEW_LINE> for word in display_set: <NEW_LINE> <INDENT> print(word, end=" ") <NEW_LINE> <DEDENT> print("\n") <NEW_LINE> return input("Which word was removed? ")
Function to combine the filler, target and switch words into one set (at the specified index), shuffle and display it for n seconds, then clear the screen, replace the target word with the switch word of the next set up (index + 1, or 0 if index is equal to the last set), ask the player to enter the target word (i.e. the word that was replaced) and return the answer as a string
625941ba30bbd722463cbc60
@Moduleloader.setup <NEW_LINE> def setup_quoter(ts3bot): <NEW_LINE> <INDENT> global bot, dont_send <NEW_LINE> bot = ts3bot <NEW_LINE> ts3conn = bot.ts3conn <NEW_LINE> for g in ts3conn.servergrouplist(): <NEW_LINE> <INDENT> if g.get('name', '') in ["Guest", "Admin Server Query"]: <NEW_LINE> <INDENT> dont_send.append(int(g.get('sgid', 0)))
Setup the quoter. Define groups not to send quotes to. :return:
625941ba9c8ee82313fbb611
def select_output_file(self): <NEW_LINE> <INDENT> filename = QFileDialog.getSaveFileName(self.dlg, "Select output file ","", '*.csv') <NEW_LINE> self.dlg.lineEdit.setText(filename)
This opens a file browser with where to save
625941ba66656f66f7cbc047
def test_deny_list(self): <NEW_LINE> <INDENT> option = ImageClassifierOptions(label_deny_list=_DENY_LIST) <NEW_LINE> classifier = ImageClassifier(_MODEL_FILE, options=option) <NEW_LINE> categories = classifier.classify(self.image) <NEW_LINE> for category in categories: <NEW_LINE> <INDENT> label = category.label <NEW_LINE> self.assertNotIn(label, _DENY_LIST, 'Label "{0}" found but in deny list.'.format(label))
Test the label_deny_list option.
625941ba925a0f43d2549d10
def main(): <NEW_LINE> <INDENT> f = open('password_v3.0.txt','r') <NEW_LINE> for line in f: <NEW_LINE> <INDENT> print('read:{}'.format(line)) <NEW_LINE> <DEDENT> f.close()
主函数
625941babf627c535bc13073
def nettoie_mieux(ZZ, factor=2.0): <NEW_LINE> <INDENT> ZZr = ZZ.copy() <NEW_LINE> for i in range(ZZ.shape[1]): <NEW_LINE> <INDENT> iZZ = ZZ[:,i] <NEW_LINE> thresh = factor*np.median(iZZ) <NEW_LINE> ZZr[iZZ<thresh,i] = 1.0 <NEW_LINE> <DEDENT> return ZZr
clean noise in matrix - hard thresholding columnwise
625941bad164cc6175782beb
def _makeSybCstr(self, addr, db, uid, pwd, **kwds): <NEW_LINE> <INDENT> if not (addr and db): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> cs = self._getrawcs("sybase") <NEW_LINE> if not cs: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> miscs = [] <NEW_LINE> if "autoCP" in kwds: <NEW_LINE> <INDENT> if "charset" not in kwds: <NEW_LINE> <INDENT> miscs.append("charset=%s" % lc.getdefaultlocale()[1]) <NEW_LINE> <DEDENT> del kwds["autoCP"] <NEW_LINE> <DEDENT> if kwds: <NEW_LINE> <INDENT> miscs.extend(["=".join(x) for x in kwds.items()]) <NEW_LINE> <DEDENT> miscs = ";".join(miscs) if miscs else "" <NEW_LINE> return cs % { "app": gv(kwds, "app", "python"), "addr": addr, "db": db, "uid": uid, "pwd": pwd, "port": gv(kwds, "port", "5000"), "miscs": miscs }
make a connect string for sybase connection if you want a direct connect, call getSybConn() instead @param addr: IP of the server @param uid & pwd: the userId and password @param tests: the key=value pair with ; as delimiter @param autoCP: get the code page automatically
625941ba50812a4eaa59c1c1
def md_cmd( self, f, c, p, o, po, out, name='MDRUN', num_threads=6, t='', *a, **kw): <NEW_LINE> <INDENT> rc = self.grompp(f, c, p, o, po, t=t, **kw) <NEW_LINE> cmd = [ MDRUN, '-s', o, '-deffnm', out] <NEW_LINE> return self.proc_cmd( cmd=cmd, cmd_name=name, pc=rc, nt=num_threads, *a, **kw)
For generating MDRUN commands
625941bab57a9660fec3371d
@plugin(native="systemctl") <NEW_LINE> def hybridsleep(jarvis, s): <NEW_LINE> <INDENT> os.system("sudo systemctl hybrid-sleep")
Hybrid sleep. Will quickly wake up but also survive power cut. Performs both suspend AND hibernate. Will quickly wake up but also survive power cut.
625941baac7a0e7691ed3f76
def tag_parser(element, default): <NEW_LINE> <INDENT> tags = [] <NEW_LINE> all_tags = element.findall('tag') <NEW_LINE> for tag in all_tags: <NEW_LINE> <INDENT> key = tag.attrib['k'] <NEW_LINE> val = tag.attrib['v'] <NEW_LINE> tag_dict = {'id' : element.attrib['id']} <NEW_LINE> tag_dict['value'] = fc_cleaner.process(key, val) <NEW_LINE> if ':' in key: <NEW_LINE> <INDENT> first = re.compile(r"^[a-zA-Z_]+") <NEW_LINE> second = re.compile(r":+?.+") <NEW_LINE> tag_dict['type'] = first.search(key).group() <NEW_LINE> tag_dict['key'] = second.search(key).group()[1:] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tag_dict['type'] = default <NEW_LINE> tag_dict['key'] = key <NEW_LINE> <DEDENT> tags.append(tag_dict) <NEW_LINE> <DEDENT> return tags
Adds an attribute dictionary for each child element to a list
625941baadb09d7d5db6c630
def list(self, start, end, metadata=None): <NEW_LINE> <INDENT> if metadata is None: <NEW_LINE> <INDENT> metadata = {} <NEW_LINE> <DEDENT> opts = { 'start': start.isoformat(), 'end': end.isoformat() } <NEW_LINE> if metadata: <NEW_LINE> <INDENT> opts['metadata'] = metadata <NEW_LINE> <DEDENT> qparams = {} <NEW_LINE> for opt, val in opts.items(): <NEW_LINE> <INDENT> if val: <NEW_LINE> <INDENT> if isinstance(val, six.text_type): <NEW_LINE> <INDENT> val = val.encode('utf-8') <NEW_LINE> <DEDENT> qparams[opt] = val <NEW_LINE> <DEDENT> <DEDENT> query_string = '?%s' % parse.urlencode(qparams) <NEW_LINE> resp = self._list( "/usages%s" % (query_string), 'tenant_usages' ) <NEW_LINE> return self.to_dict(resp)
List volume usages. List volume usages between start and end that also have the provided metadata. :param start: Datetime :param end: Datetime :param metadata: json
625941ba76d4e153a657e9cd
def main(): <NEW_LINE> <INDENT> updater = Updater("487836253:AAEQRmd6SaK3U22XYnsFzPWqLJEAnBACHRY") <NEW_LINE> dp = updater.dispatcher <NEW_LINE> dp.add_handler(CommandHandler("find", find)) <NEW_LINE> updater.start_polling() <NEW_LINE> updater.idle()
Start the bot.
625941bab57a9660fec3371e
def get_transform(self): <NEW_LINE> <INDENT> return self._transform
Return the `.Transform` associated with this scale.
625941bae1aae11d1e749b51
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10, use_ref=True, use_input_shape=True, name=None): <NEW_LINE> <INDENT> data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True) <NEW_LINE> if isinstance(data, ops.Tensor): <NEW_LINE> <INDENT> if data.dtype._is_ref_dtype and use_ref: <NEW_LINE> <INDENT> result = gen_control_flow_ops._ref_enter( data, frame_name, is_constant, parallel_iterations, name=name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result = gen_control_flow_ops._enter( data, frame_name, is_constant, parallel_iterations, name=name) <NEW_LINE> <DEDENT> if use_input_shape: <NEW_LINE> <INDENT> result.set_shape(data.get_shape()) <NEW_LINE> <DEDENT> return result <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)): <NEW_LINE> <INDENT> raise TypeError("Type %s not supported" % type(data)) <NEW_LINE> <DEDENT> values = _Enter( data.values, frame_name, is_constant, parallel_iterations=parallel_iterations, use_input_shape=use_input_shape, name=name) <NEW_LINE> indices = gen_control_flow_ops._enter( data.indices, frame_name, is_constant, parallel_iterations, name="indices") <NEW_LINE> if use_input_shape: <NEW_LINE> <INDENT> indices.set_shape(data.indices.get_shape()) <NEW_LINE> <DEDENT> if isinstance(data, ops.IndexedSlices): <NEW_LINE> <INDENT> dense_shape = data.dense_shape <NEW_LINE> if dense_shape is not None: <NEW_LINE> <INDENT> dense_shape = gen_control_flow_ops._enter( dense_shape, frame_name, is_constant, parallel_iterations, name="dense_shape") <NEW_LINE> if use_input_shape: <NEW_LINE> <INDENT> dense_shape.set_shape(data.dense_shape.get_shape()) <NEW_LINE> <DEDENT> <DEDENT> return ops.IndexedSlices(values, indices, dense_shape) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dense_shape = gen_control_flow_ops._enter( data.dense_shape, frame_name, is_constant, parallel_iterations, name="dense_shape") <NEW_LINE> if use_input_shape: <NEW_LINE> <INDENT> dense_shape.set_shape(data.dense_shape.get_shape()) <NEW_LINE> <DEDENT> return sparse_tensor.SparseTensor(indices, values, dense_shape)
Creates or finds a child frame, and makes `data` available to it. The unique `frame_name` is used by the `Executor` to identify frames. If `is_constant` is true, `data` is a constant in the child frame; otherwise it may be changed in the child frame. At most `parallel_iterations` iterations are run in parallel in the child frame. Args: data: The tensor to be made available to the child frame. frame_name: The name of the child frame. is_constant: If true, the output is constant within the child frame. parallel_iterations: The number of iterations allowed to run in parallel. use_ref: If true, use ref_enter if data is of ref type. name: A name for this operation (optional). Returns: The same tensor as `data`.
625941ba090684286d50eb7e
def define_song_inputs(self): <NEW_LINE> <INDENT> lengthsatisfy = False <NEW_LINE> while lengthsatisfy == False: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> songlength = int(input("How long in seconds do you want your song to be?\n")) <NEW_LINE> if songlength > 0: <NEW_LINE> <INDENT> lengthsatisfy = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> print("Invalid song length") <NEW_LINE> <DEDENT> <DEDENT> self.songinmilli = songlength * 1000 <NEW_LINE> keysatisfy = False <NEW_LINE> while keysatisfy == False: <NEW_LINE> <INDENT> inputkey = input("Enter a Key, i.e. 'asharpminor'\n") <NEW_LINE> if inputkey.lower() in ['cmajor', 'dmajor', 'emajor', 'fmajor', 'gmajor', 'amajor', 'bmajor', 'csharpmajor', 'dsharpmajor', 'fsharpmajor', 'gsharpmajor', 'asharpmajor', 'cminor', 'dminor', 'eminor', 'fminor', 'gminor', 'aminor', 'bminor', 'csharpminor', 'dsharpminor', 'fsharpminor', 'gsharpminor', 'asharpminor']: <NEW_LINE> <INDENT> keysatisfy = True <NEW_LINE> self.key += inputkey <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Invalid Key") <NEW_LINE> <DEDENT> <DEDENT> assert self.key in ['self.cmajor', 'self.dmajor', 'self.emajor', 'self.fmajor', 'self.gmajor', 'self.amajor', 'self.bmajor', 'self.csharpmajor', 'self.dsharpmajor', 'self.fsharpmajor', 'self.gsharpmajor', 'self.asharpmajor', 'self.cminor', 'self.dminor', 'self.eminor', 'self.fminor', 'self.gminor', 'self.aminor', 'self.bminor', 'self.csharpminor', 'self.dsharpminor', 'self.fsharpminor', 'self.gsharpminor', 'self.asharpminor'], "Nothing should print here" <NEW_LINE> self.songname = input("Give your song a name!\n")
Takes all inputs necessary to generate the song. Side Effects: Stores 'songlength' input Stores 'inputkey' input Stores 'songname' input Prints "Invalid song length" if input is less than 1. Prints "Invalid key" if the key input is incorrect.
625941ba23849d37ff7b2f2f
def insert(self, head, data): <NEW_LINE> <INDENT> temp = Node(data) <NEW_LINE> if head is None: <NEW_LINE> <INDENT> head = temp <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> last = head <NEW_LINE> while last.next is not None: <NEW_LINE> <INDENT> last = last.next <NEW_LINE> <DEDENT> last.next = temp <NEW_LINE> <DEDENT> return head
Insert a node to the end of the list.
625941ba23e79379d52ee405
def test_months_with_28_29(): <NEW_LINE> <INDENT> for item in MONTHS_WITH_28_or_29: <NEW_LINE> <INDENT> assert days_in_month(item) == "28 or 29"
Test months with 28 or 29 days
625941ba2eb69b55b151c749
def save(self, *args, **kwargs): <NEW_LINE> <INDENT> self = tag_presave(self) <NEW_LINE> super(Tag, self).save()
Autopopulates the hash_id.
625941baa4f1c619b28afede
def get_sheetnames(self): <NEW_LINE> <INDENT> return list(self._book.dict.keys())
return a list with the names of all sheets in this book
625941ba9f2886367277a72e
def get_serialnumber(self) -> str: <NEW_LINE> <INDENT> if self._current_cert == None: <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> serialstring = "" <NEW_LINE> serialnumber = "{0:x}".format(self._current_cert.serial_number) <NEW_LINE> for i in range(0, len(serialnumber), 2): <NEW_LINE> <INDENT> serialstring += serialnumber[i:i+2] + ":" <NEW_LINE> <DEDENT> return serialstring[:-1]
Return serialnumber of current certificate
625941bab7558d58953c4db8
def set_move_to_get_here(self,move): <NEW_LINE> <INDENT> self.move_to_get_here = move
setting which operator is required to get to this node
625941ba7b25080760e392f8
def delete_all_content(self): <NEW_LINE> <INDENT> self.delete_medias() <NEW_LINE> self.delete_pages() <NEW_LINE> self.delete_widgets()
Delete all content WordPress
625941ba5fc7496912cc3823
def __init__(self, max_size = 8, max_waiting_num = 8): <NEW_LINE> <INDENT> DictDataContainer.__init__(self, max_waiting_num) <NEW_LINE> DataContainerWithMaxSize.__init__(self, max_size, max_waiting_num)
Initialize data container instance.
625941ba1d351010ab8559ba
def _get_resp_body_errors(self): <NEW_LINE> <INDENT> if self._resp_body_errors and len(self._resp_body_errors) > 0: <NEW_LINE> <INDENT> return self._resp_body_errors <NEW_LINE> <DEDENT> errors = [] <NEW_LINE> warnings = [] <NEW_LINE> resp_codes = [] <NEW_LINE> if self.verb is None: <NEW_LINE> <INDENT> return errors <NEW_LINE> <DEDENT> dom = self.response_dom() <NEW_LINE> if dom is None: <NEW_LINE> <INDENT> return errors <NEW_LINE> <DEDENT> for e in dom.getElementsByTagName("Errors"): <NEW_LINE> <INDENT> eSeverity = None <NEW_LINE> eClass = None <NEW_LINE> eShortMsg = None <NEW_LINE> eLongMsg = None <NEW_LINE> eCode = None <NEW_LINE> if e.getElementsByTagName('SeverityCode'): <NEW_LINE> <INDENT> eSeverity = getNodeText(e.getElementsByTagName('SeverityCode')[0]) <NEW_LINE> <DEDENT> if e.getElementsByTagName('ErrorClassification'): <NEW_LINE> <INDENT> eClass = getNodeText(e.getElementsByTagName('ErrorClassification')[0]) <NEW_LINE> <DEDENT> if e.getElementsByTagName('ErrorCode'): <NEW_LINE> <INDENT> eCode = getNodeText(e.getElementsByTagName('ErrorCode')[0]) <NEW_LINE> if int(eCode) not in resp_codes: <NEW_LINE> <INDENT> resp_codes.append(int(eCode)) <NEW_LINE> <DEDENT> <DEDENT> if e.getElementsByTagName('ShortMessage'): <NEW_LINE> <INDENT> eShortMsg = getNodeText(e.getElementsByTagName('ShortMessage')[0]) <NEW_LINE> <DEDENT> if e.getElementsByTagName('LongMessage'): <NEW_LINE> <INDENT> eLongMsg = getNodeText(e.getElementsByTagName('LongMessage')[0]) <NEW_LINE> <DEDENT> msg = "Class: %s, Severity: %s, Code: %s, %s%s" % (eClass, eSeverity, eCode, eShortMsg, eLongMsg) <NEW_LINE> if eSeverity == 'Warning': <NEW_LINE> <INDENT> warnings.append(msg) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> errors.append(msg) <NEW_LINE> <DEDENT> <DEDENT> self._resp_body_warnings = warnings <NEW_LINE> self._resp_body_errors = errors <NEW_LINE> self._resp_codes = resp_codes <NEW_LINE> if self.config.get('warnings') and len(warnings) > 0: <NEW_LINE> <INDENT> log.warn("%s: %s\n\n" % (self.verb, "\n".join(warnings))) <NEW_LINE> <DEDENT> if self.response_dict().Ack == 'Failure': <NEW_LINE> <INDENT> if self.config.get('errors'): <NEW_LINE> <INDENT> log.error("%s: %s\n\n" % (self.verb, "\n".join(errors))) <NEW_LINE> <DEDENT> return errors <NEW_LINE> <DEDENT> return []
Parses the response content to pull errors. Child classes should override this method based on what the errors in the XML response body look like. They can choose to look at the 'ack', 'Errors', 'errorMessage' or whatever other fields the service returns. the implementation below is the original code that was part of error()
625941bacb5e8a47e48b794c
def test_command_line_interface(self): <NEW_LINE> <INDENT> runner = CliRunner() <NEW_LINE> result = runner.invoke(cli.main) <NEW_LINE> assert result.exit_code == 0 <NEW_LINE> assert 'ticketshow.cli.main' in result.output <NEW_LINE> help_result = runner.invoke(cli.main, ['--help']) <NEW_LINE> assert help_result.exit_code == 0 <NEW_LINE> assert '--help Show this message and exit.' in help_result.output
Test the CLI.
625941ba099cdd3c635f0afa
def get_core(self): <NEW_LINE> <INDENT> if self.gluecard and self.status == False: <NEW_LINE> <INDENT> return pysolvers.gluecard3_core(self.gluecard)
Get an unsatisfiable core if the formula was previously unsatisfied.
625941baaad79263cf3908d9
def plot_S(S, per=False, fs=(15, 15), save='', title='', show=True): <NEW_LINE> <INDENT> colormap = 'hot' <NEW_LINE> nph = np.max(S) <NEW_LINE> ndim = np.ndim(S) <NEW_LINE> if ndim == 1: <NEW_LINE> <INDENT> plt.figure() <NEW_LINE> if not per: <NEW_LINE> <INDENT> plt.matshow( np.array( [S]), cmap=plt.get_cmap( colormap, nph), vmin=0.5, vmax=nph + 0.5) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> plt.matshow( periodic( np.array( [S])), cmap=plt.get_cmap( colormap, nph), vmin=0.5, vmax=nph + 0.5) <NEW_LINE> <DEDENT> plt.yticks([]) <NEW_LINE> plt.colorbar(ticks=np.arange(1, nph + 1)) <NEW_LINE> if len(save) > 0: <NEW_LINE> <INDENT> plt.savefig('./' + save + '.png') <NEW_LINE> <DEDENT> <DEDENT> if ndim == 2: <NEW_LINE> <INDENT> plt.figure() <NEW_LINE> if not per: <NEW_LINE> <INDENT> plt.matshow( S, cmap=plt.get_cmap(colormap, nph), vmin=0.5, vmax=nph + 0.5 ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> plt.matshow( periodic(S), cmap=plt.get_cmap( colormap, nph), vmin=0.5, vmax=nph + 0.5) <NEW_LINE> <DEDENT> plt.colorbar(ticks=np.arange(1, nph + 1)) <NEW_LINE> if len(save) > 0: <NEW_LINE> <INDENT> plt.savefig('./' + save + '.png') <NEW_LINE> <DEDENT> <DEDENT> if ndim == 3: <NEW_LINE> <INDENT> plt.figure() <NEW_LINE> base = plt.cm.get_cmap('hot') <NEW_LINE> colors = base(np.linspace(0, 1, np.max(S))) <NEW_LINE> ax = plt.gca(projection='3d') <NEW_LINE> for i in range(1, nph): <NEW_LINE> <INDENT> if not per: <NEW_LINE> <INDENT> ax.voxels(S == i, facecolors=colors[i - 1], edgecolor='gray') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ax.voxels(periodic(S == i), facecolors=colors[i - 1], edgecolor='gray') <NEW_LINE> <DEDENT> <DEDENT> ax.set_xlabel('$p_1$') <NEW_LINE> ax.set_ylabel('$p_2$') <NEW_LINE> ax.set_zlabel('$p_3$') <NEW_LINE> ax.set_xticks([]) <NEW_LINE> ax.set_yticks([]) <NEW_LINE> ax.set_zticks([]) <NEW_LINE> mylegend = [] <NEW_LINE> for i in range(1, nph): <NEW_LINE> <INDENT> mylegend.append( Patch( label=str(i), facecolor=colors[i - 1], edgecolor='black' ) ) <NEW_LINE> <DEDENT> mylegend.append( Patch( label=str(nph), facecolor='white', edgecolor='black' ) ) <NEW_LINE> plt.legend(handles=mylegend) <NEW_LINE> plt.tight_layout() <NEW_LINE> if len(title) > 0: <NEW_LINE> <INDENT> plt.title(title) <NEW_LINE> <DEDENT> if len(save) > 0: <NEW_LINE> <INDENT> plt.savefig('./' + save) <NEW_LINE> <DEDENT> <DEDENT> if show: <NEW_LINE> <INDENT> plt.show() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> plt.close() <NEW_LINE> <DEDENT> return 0
Plot structure.
625941babde94217f3682c99
def calc_index_of_coincidence(cipher_text): <NEW_LINE> <INDENT> freqs = freq_count(cipher_text) <NEW_LINE> numpy_arr = np.array(list(freqs.values())) <NEW_LINE> n = sum(numpy_arr) <NEW_LINE> return (sum((numpy_arr * (numpy_arr - 1)))/(n * (n - 1)), n)
get all the frequencies from cipher_text convert it to numpy array for faster computation using Vectorization technique formulae: summation[(n(i)(n(i) - 1))/n(n - 1)] where, n(i) is frequency of each alphabet e.g. n(a) = 5, n(b) = 8.. n is total frequency
625941baa17c0f6771cbdef1
def testUserSamlProviderResponseModel(self): <NEW_LINE> <INDENT> pass
Test UserSamlProviderResponseModel
625941ba507cdc57c6306b71
def _load_config(self): <NEW_LINE> <INDENT> source_dir = os.path.dirname(os.path.abspath(__file__)) <NEW_LINE> config_file = os.path.abspath('config.py') <NEW_LINE> if not os.path.exists(config_file): <NEW_LINE> <INDENT> config_file = os.path.join(source_dir, 'config.py') <NEW_LINE> <DEDENT> if not os.path.exists(config_file): <NEW_LINE> <INDENT> configure = os.path.join(source_dir, 'configure') <NEW_LINE> configure = os.path.relpath(configure) <NEW_LINE> raise ConfigurationError("Configuration file not found. Forgot to run '{}'?" .format(configure)) <NEW_LINE> <DEDENT> config = Variables() <NEW_LINE> config.set_from_file(config_file) <NEW_LINE> try: <NEW_LINE> <INDENT> global_variables = BuiltIn().get_variables() <NEW_LINE> for name in config.as_dict(): <NEW_LINE> <INDENT> if name in global_variables: <NEW_LINE> <INDENT> config[name] = global_variables[name] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except RobotNotRunningError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return DotDict(config.as_dict(decoration=False))
Load config.py as a variable file in a way that the individual variables can be overriden on command line as if it was loaded explicitly as a variables file.
625941baa8ecb033257d2f74
def test_no_epochs(tmpdir): <NEW_LINE> <INDENT> raw, events = _get_data()[:2] <NEW_LINE> reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) <NEW_LINE> raw.info['bads'] = ['MEG 2443', 'EEG 053'] <NEW_LINE> epochs = mne.Epochs(raw, events, reject=reject) <NEW_LINE> epochs.save(op.join(str(tmpdir), 'sample-epo.fif'), overwrite=True) <NEW_LINE> assert 0 not in epochs.selection <NEW_LINE> assert len(epochs) > 0 <NEW_LINE> raw.info['bads'] = [] <NEW_LINE> epochs = mne.Epochs(raw, events, reject=reject) <NEW_LINE> with pytest.warns(RuntimeWarning, match='no data'): <NEW_LINE> <INDENT> epochs.save(op.join(str(tmpdir), 'sample-epo.fif'), overwrite=True) <NEW_LINE> <DEDENT> assert len(epochs) == 0
Test that having the first epoch bad does not break writing.
625941ba3c8af77a43ae363b
def _distribute_homeless_shares(mappings, homeless_shares, peers_to_shares): <NEW_LINE> <INDENT> servermap_peerids = set([key for key in peers_to_shares]) <NEW_LINE> servermap_shareids = set() <NEW_LINE> for key in sorted(peers_to_shares.keys()): <NEW_LINE> <INDENT> for share in peers_to_shares[key]: <NEW_LINE> <INDENT> servermap_shareids.add(share) <NEW_LINE> <DEDENT> <DEDENT> to_distribute = set() <NEW_LINE> for share in homeless_shares: <NEW_LINE> <INDENT> if share in servermap_shareids: <NEW_LINE> <INDENT> for peerid in peers_to_shares: <NEW_LINE> <INDENT> if share in peers_to_shares[peerid]: <NEW_LINE> <INDENT> mappings[share] = set([peerid]) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> to_distribute.add(share) <NEW_LINE> <DEDENT> <DEDENT> priority = {} <NEW_LINE> pQueue = PriorityQueue() <NEW_LINE> for peerid in servermap_peerids: <NEW_LINE> <INDENT> priority.setdefault(peerid, 0) <NEW_LINE> <DEDENT> for share in mappings: <NEW_LINE> <INDENT> if mappings[share] is not None: <NEW_LINE> <INDENT> for peer in mappings[share]: <NEW_LINE> <INDENT> if peer in servermap_peerids: <NEW_LINE> <INDENT> priority[peer] += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if priority == {}: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for peerid in priority: <NEW_LINE> <INDENT> pQueue.put((priority[peerid], peerid)) <NEW_LINE> <DEDENT> for share in to_distribute: <NEW_LINE> <INDENT> peer = pQueue.get() <NEW_LINE> mappings[share] = set([peer[1]]) <NEW_LINE> pQueue.put((peer[0]+1, peer[1]))
Shares which are not mapped to a peer in the maximum spanning graph still need to be placed on a server. This function attempts to distribute those homeless shares as evenly as possible over the available peers. If possible a share will be placed on the server it was originally on, signifying the lease should be renewed instead.
625941ba21a7993f00bc7b88
def __iadd__(self, x): <NEW_LINE> <INDENT> if isinstance(x, NoddyOutput): <NEW_LINE> <INDENT> self.block += x.block <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.block += x <NEW_LINE> <DEDENT> return self
Augmented assignment addtition: add value to all grid blocks **Arguments**: - *x*: can be either a numerical value (int, float, ...) *or* another NoddyOutput object! Note that, in both cases, the own block is updated and no new object is created (compare to overwritten addition operator!) Note: This method is changing the object *in place*!
625941bae5267d203edcdb3e
def deactivate_guider_decenter(cmd, cmdState, actorState, stageName): <NEW_LINE> <INDENT> multiCmd = SopMultiCommand(cmd, actorState.timeout, '') <NEW_LINE> prep_guider_decenter_off(multiCmd) <NEW_LINE> if not handle_multiCmd(multiCmd, cmd, cmdState, stageName, 'failed to disable decentered guide mode.', finish=False): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
Prepare for non-MaNGA observations by disabling guider decenter mode.
625941ba3539df3088e2e1e9
def get_total_background_gen_binning(self): <NEW_LINE> <INDENT> total_bg_hist = None <NEW_LINE> for name, hist in self.backgrounds_gen_binning.items(): <NEW_LINE> <INDENT> if total_bg_hist is None: <NEW_LINE> <INDENT> total_bg_hist = hist.Clone() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> total_bg_hist.Add(hist) <NEW_LINE> <DEDENT> <DEDENT> return total_bg_hist
Get total cumulative background with generator binning
625941ba07f4c71912b11325
def __call__(self): <NEW_LINE> <INDENT> return self._rot
Returns complex rotation vector
625941ba57b8e32f5248333e
def append_line(self, linespec): <NEW_LINE> <INDENT> retval = self.ConfigObjs.append(linespec) <NEW_LINE> return self.ConfigObjs[-1]
Unconditionally insert linespec (a text line) at the end of the configuration
625941ba6aa9bd52df036c40
def ord_dh(csp, var = None): <NEW_LINE> <INDENT> vars = csp.get_all_unasgn_vars() <NEW_LINE> maxVar = None <NEW_LINE> max = -1 <NEW_LINE> for var in vars: <NEW_LINE> <INDENT> count = 0 <NEW_LINE> if var.assignedValue == None: <NEW_LINE> <INDENT> for scopeVar in csp.get_cons_with_var(var): <NEW_LINE> <INDENT> for othervar in scopeVar.scope: <NEW_LINE> <INDENT> if othervar.name != var.name and othervar.assignedValue == None: <NEW_LINE> <INDENT> count += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if max < 0 or max < count: <NEW_LINE> <INDENT> max = count <NEW_LINE> maxVar = var <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return maxVar
ord_dh(csp): A var_ordering function that takes CSP object csp and returns Variable object var, according to the Degree Heuristic (DH), as covered in lecture. Given the constraint graph for the CSP, where each variable is a node, and there exists an edge from two variable nodes v1, v2 iff there exists at least one constraint that includes both v1 and v2, DH returns the variable whose node has highest degree.
625941ba796e427e537b0460
def testPullGoodRdf(self): <NEW_LINE> <INDENT> data = ('<?xml version="1.0" encoding="utf-8"?>\n' '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">' '<channel><my header="data"/></channel>' '<item><guid>1</guid><updated>123</updated>wooh</item>' '</rdf:RDF>') <NEW_LINE> topic = 'http://example.com/my-topic' <NEW_LINE> callback = 'http://example.com/my-subscriber' <NEW_LINE> self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) <NEW_LINE> FeedToFetch.insert([topic]) <NEW_LINE> urlfetch_test_stub.instance.expect('get', topic, 200, data) <NEW_LINE> self.run_fetch_task() <NEW_LINE> feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) <NEW_LINE> self.assertTrue(feed is None) <NEW_LINE> event = EventToDeliver.all().get() <NEW_LINE> self.assertEquals(data.replace('\n', ''), event.payload.replace('\n', '')) <NEW_LINE> self.assertEquals('application/rdf+xml', event.content_type) <NEW_LINE> self.assertEquals('rss', FeedRecord.all().get().format)
Tests when the RDF (RSS 1.0) XML can parse just fine.
625941bafbf16365ca6f605b
def get_csci_salt() -> bytes: <NEW_LINE> <INDENT> load_dotenv() <NEW_LINE> salt = bytes.fromhex(os.environ["CSCI_SALT"]) <NEW_LINE> return salt
Returns the appropriate salt for CSCI E-29 :return: bytes representation of the CSCI salt
625941bae64d504609d746de
def __eq__(self, other): <NEW_LINE> <INDENT> if type(self) != type(other): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if len(self.packages) != len(other.packages): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> for pkg in self.packages: <NEW_LINE> <INDENT> if not pkg in other.packages: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
Compare one profile to another to determine if anything has changed.
625941ba99cbb53fe6792a85
def save(self, *args, **kwargs): <NEW_LINE> <INDENT> if self.rank is None: <NEW_LINE> <INDENT> self.rank = 0 <NEW_LINE> <DEDENT> super(OCtype, self).save(*args, **kwargs)
saves a manifest item with a good slug
625941baa8370b771705273f
def getCatIds(self, catNms=[], catIds=[]): <NEW_LINE> <INDENT> catNms = catNms if type(catNms) == list else [catNms] <NEW_LINE> catIds = catIds if type(catIds) == list else [catIds] <NEW_LINE> if len(catNms) == len(supNms) == len(catIds) == 0: <NEW_LINE> <INDENT> cats = self.dataset['categories'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cats = self.dataset['categories'] <NEW_LINE> cats = cats if len(catNms) == 0 else [ cat for cat in cats if cat['name'] in catNms ] <NEW_LINE> cats = cats if len(catIds) == 0 else [ cat for cat in cats if cat['id'] in catIds ] <NEW_LINE> <DEDENT> ids = [cat['id'] for cat in cats] <NEW_LINE> return ids
filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids
625941ba6fb2d068a760ef38
def test_4_update(self): <NEW_LINE> <INDENT> modified = self.updater.update([os.path.join(self.testdir, "4_update.update")]) <NEW_LINE> assert modified <NEW_LINE> entries = self.ld.get_entries( self.user_dn, self.ld.SCOPE_BASE, 'objectclass=*', ['*']) <NEW_LINE> assert len(entries) == 1 <NEW_LINE> entry = entries[0] <NEW_LINE> assert entry.single_value['gecos'] == 'Test User New2'
Test the updater adding a new value to a single-valued attribute (test_4_update)
625941ba0a50d4780f666d2d
def update_weights(self, weights): <NEW_LINE> <INDENT> self.weights = weights
Update weights This method updates the values of the weights Parameters ---------- weights : np.ndarray Array of weights
625941bafb3f5b602dac352d
def check_last_run_table(self, component): <NEW_LINE> <INDENT> logging.info("Getting the last run time in seconds for component: {0}".format(component)) <NEW_LINE> last_record_time = '2000-01-01 00:00:00' <NEW_LINE> last_run = LastRun.objects.filter(component=component).values('last_run') <NEW_LINE> for last_run in last_run: <NEW_LINE> <INDENT> last_record_time = (timezone.now() - last_run['last_run']).total_seconds() <NEW_LINE> <DEDENT> return last_record_time
Get all the date/time of the last run by components ..
625941ba498bea3a759b994f
def extractFeatures(source, where=None, geom=None, srs=None, onlyGeom=False, onlyAttr=False, asPandas=True, indexCol=None, **kwargs): <NEW_LINE> <INDENT> if not asPandas: <NEW_LINE> <INDENT> return _extractFeatures(source=source, geom=geom, where=where, srs=srs, onlyGeom=onlyGeom, onlyAttr=onlyAttr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fields = defaultdict(list) <NEW_LINE> fields["geom"] = [] <NEW_LINE> for g,a in _extractFeatures(source=source, geom=geom, where=where, srs=srs, onlyGeom=False, onlyAttr=False): <NEW_LINE> <INDENT> fields["geom"].append(g.Clone()) <NEW_LINE> for k,v in a.items(): <NEW_LINE> <INDENT> fields[k].append(v) <NEW_LINE> <DEDENT> <DEDENT> df = pd.DataFrame(fields) <NEW_LINE> if not indexCol is None: <NEW_LINE> <INDENT> df.set_index(indexCol, inplace=True, drop=False) <NEW_LINE> <DEDENT> if onlyGeom: return df["geom"] <NEW_LINE> elif onlyAttr: return df.drop("geom", axis=1) <NEW_LINE> else: return df
Creates a generator which extract the features contained within the source * Iteratively returns (feature-geometry, feature-fields) Note: ----- Be careful when filtering by a geometry as the extracted features may not necessarily be IN the given shape * Sometimes they may only overlap * Sometimes they are only in the geometry's envelope * To be sure an extracted geometry fits the selection criteria, you may still need to do further processing Parameters: ----------- source : Anything acceptable by loadVector() The vector data source to read from geom : ogr.Geometry; optional The geometry to search with * All features are extracted which touch this geometry where : str; optional An SQL-like where statement to apply to the source * Feature attribute name do not need quotes * String values should be wrapped in 'single quotes' Example: If the source vector has a string attribute called "ISO" and a integer attribute called "POP", you could use.... where = "ISO='DEU' AND POP>1000" srs : Anything acceptable to geokit.srs.loadSRS(); optional The srs of the geometries to extract * If not given, the source's inherent srs is used * If srs does not match the inherent srs, all geometries will be transformed onlyGeom : bool; optional If True, only feature geometries will be returned onlyAttr : bool; optional If True, only feature attributes will be returned asPandas : bool; optional Whether or not the result should be returned as a pandas.DataFrame (when onlyGeom is False) or pandas.Series (when onlyGeom is True) indexCol : str; optional The feature identifier to use as the DataFrams's index * Only useful when as DataFrame is True Returns: -------- * If asPandas is True: pandas.DataFrame or pandas.Series * If asPandas is False: generator
625941bae8904600ed9f1dc7
def get_logger(module_name, level='INFO', to_file=True, to_console=True): <NEW_LINE> <INDENT> path = os.path.join(os.path.expanduser("~"), ".pygtktalog", "app.log") <NEW_LINE> log = logging.getLogger(module_name) <NEW_LINE> log.setLevel(LEVEL[level]) <NEW_LINE> if to_console: <NEW_LINE> <INDENT> console_handler = logging.StreamHandler(sys.stderr) <NEW_LINE> console_formatter = ColoredFormatter("%(filename)s:%(lineno)s - " "%(levelname)s - %(message)s") <NEW_LINE> console_handler.setFormatter(console_formatter) <NEW_LINE> log.addHandler(console_handler) <NEW_LINE> <DEDENT> elif to_file: <NEW_LINE> <INDENT> file_handler = logging.FileHandler(path) <NEW_LINE> file_formatter = logging.Formatter("%(asctime)s %(levelname)6s " "%(filename)s: %(lineno)s - " "%(message)s") <NEW_LINE> file_handler.setFormatter(file_formatter) <NEW_LINE> file_handler.setLevel(LEVEL[level]) <NEW_LINE> log.addHandler(file_handler) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> devnull = open(os.devnull, "w") <NEW_LINE> dummy_handler = logging.StreamHandler(devnull) <NEW_LINE> dummy_formatter = DummyFormater("") <NEW_LINE> dummy_handler.setFormatter(dummy_formatter) <NEW_LINE> log.addHandler(dummy_handler) <NEW_LINE> <DEDENT> return log
Prepare and return log object. Standard formatting is used for all logs. Arguments: @module_name - String name for Logger object. @level - Log level (as string), one of DEBUG, INFO, WARN, ERROR and CRITICAL. @to_file - If True, additionally stores full log in file inside .pygtktalog config directory and to stderr, otherwise log is only redirected to stderr. Returns: object of logging.Logger class
625941bac4546d3d9de728cf
def addCnsCurve(parent, name, centers, degree=1): <NEW_LINE> <INDENT> if degree == 3: <NEW_LINE> <INDENT> if len(centers) == 2: <NEW_LINE> <INDENT> centers.insert(0, centers[0]) <NEW_LINE> centers.append(centers[-1]) <NEW_LINE> <DEDENT> elif len(centers) == 3: <NEW_LINE> <INDENT> centers.append(centers[-1]) <NEW_LINE> <DEDENT> <DEDENT> points = [datatypes.Vector() for center in centers] <NEW_LINE> node = addCurve(parent, name, points, False, degree) <NEW_LINE> applyop.gear_curvecns_op(node, centers) <NEW_LINE> return node
Create a curve attached to given centers. One point per center Arguments: parent (dagNode): Parent object. name (str): Name centers (list of dagNode): Object that will drive the curve. degree (int): 1 for linear curve, 3 for Cubic. Returns: dagNode: The newly created curve.
625941ba96565a6dacc8f573
def _sampling_concrete(self, args): <NEW_LINE> <INDENT> return sampling_concrete(args, (self.batch_size, self.latent_disc_dim))
Sampling from a concrete distribution
625941baa4f1c619b28afedf
def error(self, token): <NEW_LINE> <INDENT> if token: <NEW_LINE> <INDENT> lineno = getattr(token, 'lineno', 0) <NEW_LINE> if lineno: <NEW_LINE> <INDENT> sys.stderr.write(f'yacc: Syntax error at line {lineno}, token={token.type}\n') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sys.stderr.write(f'yacc: Syntax error, token={token.type}') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> sys.stderr.write('yacc: Parse error in input. EOF\n')
Default error handling function. This may be subclassed.
625941ba711fe17d82542210
def check_pip_version(min_version='6.0.0'): <NEW_LINE> <INDENT> if StrictVersion(pip.__version__) < StrictVersion(min_version): <NEW_LINE> <INDENT> print("Upgrade pip, your version '{0}' " "is outdated. Minimum required version is '{1}':\n{2}".format(pip.__version__, min_version, GET_PIP)) <NEW_LINE> sys.exit(1)
Ensure that a minimum supported version of pip is installed.
625941ba6fb2d068a760ef39
def save(self, url, addr, clues): <NEW_LINE> <INDENT> assert url and addr <NEW_LINE> urldir = self._mkdir(os.path.join(self.root, self._sanitize(url))) <NEW_LINE> filename = self._sanitize(addr) + os.extsep + self.ext <NEW_LINE> cluefile = os.path.join(urldir, filename) <NEW_LINE> Halberd.clues.file.save(cluefile, clues)
Hierarchically write clues. :param url: URL scanned (will be used as a directory name). @type url: C{url} :param addr: Address of the target. @type addr: C{str} :param clues: Clues to be stored. @type clues: C{list} @raise OSError: If the directories can't be created. @raise IOError: If the file can't be stored successfully.
625941ba187af65679ca4fbb
def XPLMIsFeatureEnabled(inFeature): <NEW_LINE> <INDENT> pass
This returns 1 if a feature is currently enabled for your plugin, or 0 if it is not enabled. It is an error to call this routine with an unsupported feature.
625941ba45492302aab5e15f
def publish_event( self, event_name: int, event_payload: Dict[str, Any], publisher_id: Optional[str] = None, ) -> None: <NEW_LINE> <INDENT> if not self.flags.enable_events: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> assert self.event_queue <NEW_LINE> self.event_queue.publish( self.uid, event_name, event_payload, publisher_id, )
Convenience method provided to publish events into the global event queue.
625941ba9f2886367277a72f
def check_link(self): <NEW_LINE> <INDENT> item = self.gui.tree_url.currentItem() <NEW_LINE> url = item.text(1) <NEW_LINE> x = self.gui.webView.page() <NEW_LINE> self.x = x.mainFrame() <NEW_LINE> self.x.load(QtCore.QUrl(url))
Open url in browser
625941bad7e4931a7ee9ddba
def itest_deltafactor(fwp, tvars): <NEW_LINE> <INDENT> pseudo = abidata.pseudo("Si.GGA_PBE-JTH-paw.xml") <NEW_LINE> flow = abilab.AbinitFlow(workdir=fwp.workdir, manager=fwp.manager) <NEW_LINE> kppa = 20 <NEW_LINE> ecut = 2 <NEW_LINE> pawecutdg = ecut * 2 if pseudo.ispaw else None <NEW_LINE> from pseudo_dojo.dojo.dojo_workflows import DeltaFactory <NEW_LINE> work = DeltaFactory().work_for_pseudo(pseudo, kppa=kppa, ecut=ecut, pawecutdg=pawecutdg, paral_kgb=tvars.paral_kgb) <NEW_LINE> flow.register_work(work) <NEW_LINE> flow.allocate() <NEW_LINE> flow.build_and_pickle_dump() <NEW_LINE> for task in flow[0]: <NEW_LINE> <INDENT> task.start_and_wait() <NEW_LINE> <DEDENT> flow.check_status() <NEW_LINE> flow.show_status() <NEW_LINE> assert flow.all_ok <NEW_LINE> assert all(work.finalized for work in flow) <NEW_LINE> results = flow[0].get_results()
Test the flow used for the computation of the deltafactor.
625941bae64d504609d746df
def resolveFrame(frame, who=''): <NEW_LINE> <INDENT> if not isinstance(frame, Frame): <NEW_LINE> <INDENT> if frame not in Frame.Names: <NEW_LINE> <INDENT> raise excepting.ResolveError("ResolveError: Bad frame link name", frame, who) <NEW_LINE> <DEDENT> frame = Frame.Names[frame] <NEW_LINE> <DEDENT> return frame
Returns resolved frame instance from frame frame may be name of frame or instance Frame.Names registry must be setup
625941ba38b623060ff0ac8d
def fill_region(self, x, y, union_find): <NEW_LINE> <INDENT> R, C = union_find.R, union_find.C <NEW_LINE> q = [(int(x), int(y))] <NEW_LINE> visited = set(union_find.id.keys()) | {(x, y)} <NEW_LINE> while q: <NEW_LINE> <INDENT> next_level = [] <NEW_LINE> for node in q: <NEW_LINE> <INDENT> for neighbor in self.get_neighbors(*node): <NEW_LINE> <INDENT> if neighbor not in visited and 0 <= neighbor[0] < C-1 and 0 <= neighbor[1] < R-1: <NEW_LINE> <INDENT> visited.add(neighbor) <NEW_LINE> next_level.append(neighbor) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> q = next_level <NEW_LINE> <DEDENT> self.nodes |= visited
Creates a shape by spreading out from the location (x, y) until
625941ba4f88993c3716bf12
def kthSmallest(self, root, k): <NEW_LINE> <INDENT> self.rv = None <NEW_LINE> def inorder(node, known_smaller = 0): <NEW_LINE> <INDENT> if node is None: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> assert known_smaller < k <NEW_LINE> left_tree_size = inorder(node.left, known_smaller) <NEW_LINE> if self.rv is not None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if left_tree_size + known_smaller + 1 == k: <NEW_LINE> <INDENT> self.rv = node.val <NEW_LINE> return <NEW_LINE> <DEDENT> right_tree_size = inorder(node.right, known_smaller + left_tree_size + 1) <NEW_LINE> if self.rv is not None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> return left_tree_size + right_tree_size + 1 <NEW_LINE> <DEDENT> inorder(root) <NEW_LINE> return self.rv
:type root: TreeNode :type k: int :rtype: int
625941ba4e696a04525c92eb
def test_active_lstar_05(self): <NEW_LINE> <INDENT> q0 = automaton.State('0') <NEW_LINE> q1 = automaton.State('1') <NEW_LINE> expected_dfa = automaton.DFA({'a'}, start_state=q0) <NEW_LINE> expected_dfa.add_transition(q0, q1, 'a') <NEW_LINE> expected_dfa.add_transition(q1, q0, 'a') <NEW_LINE> expected_dfa.accept_states.add(q1) <NEW_LINE> teacher = oracle.ActiveOracle(expected_dfa) <NEW_LINE> lstar = algorithms.LSTAR({'a'}, teacher) <NEW_LINE> dfa = lstar.learn() <NEW_LINE> self.assertEqual(2, len(dfa.states)) <NEW_LINE> self.assertEqual(1, len(dfa.accept_states)) <NEW_LINE> s_plus = set() <NEW_LINE> s_minus = set() <NEW_LINE> for i in range(1, 21, 2): <NEW_LINE> <INDENT> s_plus.add('a' * i) <NEW_LINE> s_minus.add('a' * (i - 1)) <NEW_LINE> <DEDENT> for s in s_plus: <NEW_LINE> <INDENT> self.assertTrue(dfa.parse_string(s)[1]) <NEW_LINE> <DEDENT> for s in s_minus: <NEW_LINE> <INDENT> self.assertFalse(dfa.parse_string(s)[1])
Try to let L* learn the regular language A. A is a language over the alphabet sigma = {a}, that accepts all strings with an odd number of a's.
625941ba4e4d5625662d427b
def display_batch(img_array_batch, nrows=2, ncols=2, title=''): <NEW_LINE> <INDENT> if img_array_batch is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if (utils.image.is_float_image(img_array_batch[0])): <NEW_LINE> <INDENT> max_value = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> max_value = 255 <NEW_LINE> <DEDENT> fig = plt.figure(random.randint(1, sys.maxint)) <NEW_LINE> fig.suptitle(title, fontsize=12, fontweight='semibold') <NEW_LINE> for i in xrange(min(nrows * ncols, len(img_array_batch))): <NEW_LINE> <INDENT> current_img = img_array_batch[i] <NEW_LINE> if len(current_img.shape) > 2 and current_img.shape[2] == 3: <NEW_LINE> <INDENT> cmap = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if len(current_img.shape) > 2: <NEW_LINE> <INDENT> current_img=np.squeeze(current_img) <NEW_LINE> <DEDENT> cmap = plt.cm.gray <NEW_LINE> <DEDENT> ax = plt.subplot(nrows,ncols,i + 1) <NEW_LINE> plt.imshow(current_img, cmap=cmap, vmin=0, vmax=max_value) <NEW_LINE> ax.get_xaxis().set_visible(False) <NEW_LINE> ax.get_yaxis().set_visible(False)
Display a batch of images given as a 4D numpy array. Remarks: Some RGB images might be displayed with changed colors. Parameters ---------- img_array_batch : numpy.ndarray The image numpy data in format [batch_size, height, width, channels] or a list of numpy arrays in format [height, width, channels], which can have 1 or 3 color channels. nrows : uint, optional The number or rows. ncols : uint, optional The number or colums. title: str, optional The title of the figure.
625941ba004d5f362079a1d5
def xueqiu(code,path=None,price=True,textv=False,n=1): <NEW_LINE> <INDENT> driver.get("https://xueqiu.com") <NEW_LINE> waitForLoad(driver) <NEW_LINE> driver.find_element_by_xpath('//div[@class="nav__login__btn"]/span').click() <NEW_LINE> usename=driver.find_element_by_xpath('//input[@name="username"]') <NEW_LINE> usename.clear() <NEW_LINE> usename.send_keys("[email protected]") <NEW_LINE> password = driver.find_element_by_xpath('//input[@type="password"]') <NEW_LINE> password.clear() <NEW_LINE> password.send_keys("chen&801019") <NEW_LINE> driver.find_element_by_css_selector("div.modal__login__btn").click() <NEW_LINE> waitForLoad(driver) <NEW_LINE> print('logon sucess') <NEW_LINE> code=_set_code(code) <NEW_LINE> url='https://xueqiu.com/S/{}'.format(code) <NEW_LINE> driver.get(url) <NEW_LINE> waitForLoad(driver) <NEW_LINE> print(driver.current_url) <NEW_LINE> if price: <NEW_LINE> <INDENT> d=driver.find_element_by_xpath('//table[@class="topTable"]').text.split() <NEW_LINE> x=[i.split(':')[1] for i in d] <NEW_LINE> print(d) <NEW_LINE> <DEDENT> d={1:'//div[@stockNews]/ul[1]/li[1]/a', 2:'//div[@stockNews]/ul[1]/li[2]/a', 3:'//div[@stockNews]/ul[1]/li[3]/a', 4:'//div[@stockNews]/ul[1]/li[4]/a', 5:'//div[@stockNews]/ul[1]/li[5]/a', 6:'//div[@stockNews]/ul[1]/li[6]/a', 7:'//div[@stockNews]/ul[1]/li[7]/a'} <NEW_LINE> if textv: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> driver.find_element_by_xpath(d[n]).click() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> _get_text(driver,code,path,n) <NEW_LINE> pageno=1 <NEW_LINE> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> driver.find_element_by_xpath("//li[@class='next']/a").click() <NEW_LINE> waitForLoad(driver) <NEW_LINE> print("Getting Page %s"%pageno) <NEW_LINE> pageno +=1 <NEW_LINE> _get_text(driver,code,path,n) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return
code:股票代码, n:对应雪球下方的讨论等文本的获取
625941bae5267d203edcdb3f
def url_traceback(self, url): <NEW_LINE> <INDENT> if '..' not in url: <NEW_LINE> <INDENT> return url <NEW_LINE> <DEDENT> data = url.split('/') <NEW_LINE> new_data = [] <NEW_LINE> for p in data: <NEW_LINE> <INDENT> if p=='..': <NEW_LINE> <INDENT> new_data.pop(-1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_data.append(p) <NEW_LINE> <DEDENT> <DEDENT> new_url = '/'.join(new_data) <NEW_LINE> return new_url
路径回溯处理
625941baab23a570cc25001e
def deserialize(self, data): <NEW_LINE> <INDENT> values = [int(value) if value != ' ' else None for value in data.split('*')] <NEW_LINE> if not (values and values[0] is not None): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> root = TreeNode(values[0]) <NEW_LINE> node_queue = collections.deque([root]) <NEW_LINE> value_queue = collections.deque(values[1:]) <NEW_LINE> while node_queue: <NEW_LINE> <INDENT> node = node_queue.popleft() <NEW_LINE> if node is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> left_check, right_check = False, False <NEW_LINE> while value_queue and not (left_check and right_check): <NEW_LINE> <INDENT> value = value_queue.popleft() <NEW_LINE> if not left_check: <NEW_LINE> <INDENT> node.left = TreeNode(value) if value is not None else None <NEW_LINE> node_queue.append(node.left) <NEW_LINE> left_check = True <NEW_LINE> <DEDENT> elif not right_check: <NEW_LINE> <INDENT> node.right = TreeNode(value) if value is not None else None <NEW_LINE> node_queue.append(node.right) <NEW_LINE> right_check = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return root
Decodes your encoded data to tree. :type data: str :rtype: TreeNode
625941ba8c3a87329515825d
def getIcon(self): <NEW_LINE> <INDENT> return os.path.join(iconPath, 'EM_FHSolver.svg')
Return the icon which will appear in the tree view. This method is optional and if not defined a default icon is shown.
625941bade87d2750b85fc2d
def declutter(obj): <NEW_LINE> <INDENT> outstring = '' <NEW_LINE> for element in obj: <NEW_LINE> <INDENT> headings = element.filter_headings() <NEW_LINE> for heading in headings: <NEW_LINE> <INDENT> element.remove(heading) <NEW_LINE> <DEDENT> table_tags = element.filter_tags(element.RECURSE_OTHERS, matches=lambda n: n.tag == "table") <NEW_LINE> for tag in table_tags: <NEW_LINE> <INDENT> element.remove(tag) <NEW_LINE> <DEDENT> thumbnail_wikilinks = element.filter_wikilinks(element.RECURSE_OTHERS, matches="thumb") <NEW_LINE> for link in thumbnail_wikilinks: <NEW_LINE> <INDENT> element.remove(link) <NEW_LINE> <DEDENT> external_links = element.filter_external_links(element.RECURSE_OTHERS) <NEW_LINE> for exlink in external_links: <NEW_LINE> <INDENT> element.remove(exlink) <NEW_LINE> <DEDENT> elements = element.strip_code().split('\n') <NEW_LINE> for e in elements: <NEW_LINE> <INDENT> re.sub(r"^\;\s*$", "", e) <NEW_LINE> re.sub(r"^\s+$", "", e) <NEW_LINE> if e == '': <NEW_LINE> <INDENT> elements.remove(e) <NEW_LINE> <DEDENT> <DEDENT> outstring = '\n'.join(elements) <NEW_LINE> <DEDENT> return outstring
function for removing elements from the wikitext that would otherwise not be removed fully by using the strip_code() function :param obj: Wikicode object from get_sections() :return: Wikicode stripped of all headings, tags (table was the tricky one bringing problems) and thumbnail wikilinks, and external url links, concatenates all the stripped statements into a single string
625941ba627d3e7fe0d68ced
def get_build_sha1(binary): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open(binary, "rb") as fp: <NEW_LINE> <INDENT> contents = fp.read() <NEW_LINE> <DEDENT> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> raise HighLevelConfigurationError("Cannot calculate the SHA1 of the high-level extractor binary: {}".format(e)) <NEW_LINE> <DEDENT> return hashlib.sha1(contents).hexdigest()
Calculate the SHA1 of the binary we're using.
625941ba30bbd722463cbc62
def sign_request(self, signature_method, consumer, token): <NEW_LINE> <INDENT> if not self.is_form_encoded: <NEW_LINE> <INDENT> self['oauth_body_hash'] = base64.b64encode(sha(self.body.encode('utf-8')).digest()).decode() <NEW_LINE> <DEDENT> if 'oauth_consumer_key' not in self: <NEW_LINE> <INDENT> self['oauth_consumer_key'] = consumer.key <NEW_LINE> <DEDENT> if token and 'oauth_token' not in self: <NEW_LINE> <INDENT> self['oauth_token'] = token.key <NEW_LINE> <DEDENT> self['oauth_signature_method'] = signature_method.name <NEW_LINE> self['oauth_signature'] = signature_method.sign(self, consumer, token)
Set the signature parameter to the result of sign.
625941baa934411ee3751539
def record_property(Property:"Property"): <NEW_LINE> <INDENT> properties = get_properties() <NEW_LINE> state = Property.state <NEW_LINE> county = Property.county <NEW_LINE> if not properties.get(state): <NEW_LINE> <INDENT> properties[state] = {} <NEW_LINE> <DEDENT> state_properties = properties.get(state) <NEW_LINE> if not state_properties.get(county): <NEW_LINE> <INDENT> state_properties[county] = [] <NEW_LINE> <DEDENT> county_properties = state_properties.get(Property.county) <NEW_LINE> if Property.json() not in county_properties: <NEW_LINE> <INDENT> county_properties.append(Property.json())
Loads properties, then adds provided property to the dict
625941ba9c8ee82313fbb614
def extend_context(start, end): <NEW_LINE> <INDENT> return ''.join(' {}\n'.format(line.rstrip()) for line in self.a[start:end])
Add context lines.
625941bacad5886f8bd26e81
def to_crs(self, crs): <NEW_LINE> <INDENT> if self.crs == crs: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> transform = osr.CoordinateTransformation(self.crs._crs, crs._crs) <NEW_LINE> return GeoPolygon([p[:2] for p in transform.TransformPoints(self.points)], crs)
Duplicates polygon while transforming to a new CRS :param CRS crs: Target CRS :return: new GeoPolygon with CRS specified by crs :rtype: GeoPolygon
625941bad6c5a10208143ee6
def select_shapes(self, select_function, **kwargs): <NEW_LINE> <INDENT> self.shapes_to_draw.append( {'shapes': self.shapes[self.shapes.apply(select_function, axis=1)]['path'].values, 'args': kwargs} )
Selects shapes based on an arbitrary function such as: lambda shape: shape['highway'] == 'motorway' :param select_function: boolean function to include a shape or not :param kwargs: arguments for the drawing :return:
625941ba7047854f462a12ac
def workdue(self, task): <NEW_LINE> <INDENT> wv = (self.workview(task) and task.get_due_date() and task.get_days_left() < 2) <NEW_LINE> return wv
Filter for tasks due within the next day
625941ba91f36d47f21ac392
def test015_get_process_details_in_container(self): <NEW_LINE> <INDENT> self.lg.info('Choose one random container of list of running nodes') <NEW_LINE> container_name = self.create_contaienr(self.node_id) <NEW_LINE> self.assertTrue(container_name) <NEW_LINE> self.lg.info('Choose one random process of list of process.') <NEW_LINE> response = self.containers_api.post_containers_containerid_jobs(self.node_id, container_name, self.job_body) <NEW_LINE> self.assertEqual(response.status_code, 202) <NEW_LINE> job_id = response.headers['Location'].split('/')[6] <NEW_LINE> self.assertTrue(self.g8core.wait_on_container_job_update(container_name, job_id, 100, False)) <NEW_LINE> response = self.containers_api.get_containers_containerid_processes(self.node_id, container_name) <NEW_LINE> self.assertEqual(response.status_code, 200) <NEW_LINE> processes_list = response.json() <NEW_LINE> process_id = None <NEW_LINE> while not process_id or process_id == 1: <NEW_LINE> <INDENT> random_number = random.randint(0, len(processes_list)-1) <NEW_LINE> process_id = processes_list[random_number]['pid'] <NEW_LINE> <DEDENT> self.lg.info('Send get nodes/{nodeid}/containers/containerid/processes/processid api request.') <NEW_LINE> response = self.containers_api.get_containers_containerid_processes_processid(self.node_id, container_name, str(process_id)) <NEW_LINE> self.assertEqual(response.status_code, 200) <NEW_LINE> process = response.json() <NEW_LINE> container_id = int(list(self.g8core.client.container.find(container_name).keys())[0]) <NEW_LINE> container = self.g8core.client.container.client(container_id) <NEW_LINE> golden_value = container.process.list(process_id)[0] <NEW_LINE> self.lg.info(' Compare results with golden value.') <NEW_LINE> skip_keys = ['cpu', 'vms', 'rss'] <NEW_LINE> for key in process: <NEW_LINE> <INDENT> if key in skip_keys: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if key in golden_value.keys(): <NEW_LINE> <INDENT> self.assertEqual(golden_value[key], process[key])
GAT-036 *post:/node/{nodeid}/containers/containerid/processes/processid * **Test Scenario:** #. Choose one random node of list of running nodes. #. Choose one random conatainer of list of running containers. #. Choose one random process of list of process. #. Send get nodes/{nodeid}/containers/containerid/processes/processid request. #. Compare results with golden value.
625941bafbf16365ca6f605c
def test_get_property(self): <NEW_LINE> <INDENT> def get_values(db): <NEW_LINE> <INDENT> ans = {} <NEW_LINE> for label, loc in iteritems(db.FIELD_MAP): <NEW_LINE> <INDENT> if isinstance(label, numbers.Integral): <NEW_LINE> <INDENT> label = '#'+db.custom_column_num_map[label]['label'] <NEW_LINE> <DEDENT> label = unicode_type(label) <NEW_LINE> ans[label] = tuple(db.get_property(i, index_is_id=True, loc=loc) for i in db.all_ids()) <NEW_LINE> if label in ('id', 'title', '#tags'): <NEW_LINE> <INDENT> with self.assertRaises(IndexError): <NEW_LINE> <INDENT> db.get_property(9999, loc=loc) <NEW_LINE> <DEDENT> with self.assertRaises(IndexError): <NEW_LINE> <INDENT> db.get_property(9999, index_is_id=True, loc=loc) <NEW_LINE> <DEDENT> <DEDENT> if label in {'tags', 'formats'}: <NEW_LINE> <INDENT> ans[label] = tuple(set(x.split(',')) if x else x for x in ans[label]) <NEW_LINE> <DEDENT> if label == 'series_sort': <NEW_LINE> <INDENT> ans[label] = None <NEW_LINE> <DEDENT> <DEDENT> return ans <NEW_LINE> <DEDENT> db = self.init_legacy() <NEW_LINE> new_vals = get_values(db) <NEW_LINE> db.close() <NEW_LINE> old = self.init_old() <NEW_LINE> old_vals = get_values(old) <NEW_LINE> old.close() <NEW_LINE> old = None <NEW_LINE> self.assertEqual(old_vals, new_vals)
Test the get_property interface for reading data
625941ba851cf427c661a3b1
def low(balance, annualInterestRate): <NEW_LINE> <INDENT> monthlyInterestRate = annualInterestRate/12.0 <NEW_LINE> low = balance / 12 <NEW_LINE> high = (balance * (1+monthlyInterestRate)**12) /12.0 <NEW_LINE> payment = (high+low)/2 <NEW_LINE> debt = balance <NEW_LINE> while low <= (high - .1): <NEW_LINE> <INDENT> debt = balance <NEW_LINE> for i in range(12): <NEW_LINE> <INDENT> debt -= payment <NEW_LINE> debt *= (monthlyInterestRate + 1) <NEW_LINE> <DEDENT> if debt > 0: <NEW_LINE> <INDENT> low = payment <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> high = payment <NEW_LINE> <DEDENT> payment = (high+low)/2.0 <NEW_LINE> <DEDENT> return payment
INPUT: balance = 320000 annualInterestRate = .2 OUTPUT: 29157.09
625941bac432627299f04ae3
def derivation1(data): <NEW_LINE> <INDENT> length = len(data) <NEW_LINE> der = [float('nan') for i in range(length)] <NEW_LINE> last = length - 1 <NEW_LINE> for i in range(1, last): <NEW_LINE> <INDENT> der[i] = (data[i + 1] - data[i - 1]) / 2 <NEW_LINE> <DEDENT> return der
计算光谱一阶微分 @param data 光谱数据 @return data 光谱的一阶微分
625941ba30dc7b7665901809
def export_rows_analysisID_sqlalchemyModel_table_js(self, model_I, analysis_id_I, data1_keys, data1_nestkeys, data1_keymap, used__I=True, tabletype_I='responsivecrosstable_01', data_dir_I='tmp'): <NEW_LINE> <INDENT> data_O = []; <NEW_LINE> queryselect = sbaas_base_query_select(session_I=self.session,engine_I=self.engine,settings_I=self.settings,data_I=self.data); <NEW_LINE> data_O = queryselect.get_rows_analysisID_sqlalchemyModel(analysis_id_I,used__I); <NEW_LINE> ddttable = ddt_container_table() <NEW_LINE> ddttable.make_container_table(data_O,data1_keys,data1_nestkeys,data1_keymap,tabletype=tabletype_I); <NEW_LINE> if data_dir_I=='tmp': <NEW_LINE> <INDENT> filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js' <NEW_LINE> <DEDENT> elif data_dir_I=='data_json': <NEW_LINE> <INDENT> data_json_O = ddtutilities.get_allObjects_js(); <NEW_LINE> return data_json_O; <NEW_LINE> <DEDENT> with open(filename_str,'w') as file: <NEW_LINE> <INDENT> file.write(ddttable.get_allObjects());
Export a tabular representation of the data INPUT: model_I = sqlalchemy model object analysis_id_I = string,
625941babe383301e01b532b