code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def cells_rt_meta(workbook, sheet, row, col): """ Traverse all cells in a row. If you find new data in a cell, add it to the list. :param obj workbook: :param str sheet: :param int row: :param int col: :return list: Cell data for a specific row """ logger_excel.info("enter cells_rt_meta") col_loop = 0 cell_data = [] temp_sheet = workbook.sheet_by_name(sheet) while col_loop < temp_sheet.ncols: col += 1 col_loop += 1 try: if temp_sheet.cell_value(row, col) != xlrd.empty_cell and temp_sheet.cell_value(row, col) != '': cell_data.append(temp_sheet.cell_value(row, col)) except IndexError as e: logger_excel.warn("cells_rt_meta: IndexError: sheet: {}, row: {}, col: {}, {}".format(sheet, row, col, e)) logger_excel.info("exit cells_right_meta") return cell_data
Traverse all cells in a row. If you find new data in a cell, add it to the list. :param obj workbook: :param str sheet: :param int row: :param int col: :return list: Cell data for a specific row
def render_html(self, obj, context=None): """ Generate the 'html' attribute of an oembed resource using a template. Sort of a corollary to the parser's render_oembed method. By default, the current mapping will be passed in as the context. OEmbed templates are stored in: oembed/provider/[app_label]_[model].html -- or -- oembed/provider/media_video.html """ provided_context = context or Context() context = RequestContext(mock_request()) context.update(provided_context) context.push() context[self._meta.context_varname] = obj rendered = render_to_string(self._meta.template_name, context) context.pop() return rendered
Generate the 'html' attribute of an oembed resource using a template. Sort of a corollary to the parser's render_oembed method. By default, the current mapping will be passed in as the context. OEmbed templates are stored in: oembed/provider/[app_label]_[model].html -- or -- oembed/provider/media_video.html
def lreshape(data, groups, dropna=True, label=None): """ Reshape long-format data to wide. Generalized inverse of DataFrame.pivot Parameters ---------- data : DataFrame groups : dict {new_name : list_of_columns} dropna : boolean, default True Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Returns ------- reshaped : DataFrame """ if isinstance(groups, dict): keys = list(groups.keys()) values = list(groups.values()) else: keys, values = zip(*groups) all_cols = list(set.union(*[set(x) for x in values])) id_cols = list(data.columns.difference(all_cols)) K = len(values[0]) for seq in values: if len(seq) != K: raise ValueError('All column lists must be same length') mdata = {} pivot_cols = [] for target, names in zip(keys, values): to_concat = [data[col].values for col in names] import pandas.core.dtypes.concat as _concat mdata[target] = _concat._concat_compat(to_concat) pivot_cols.append(target) for col in id_cols: mdata[col] = np.tile(data[col].values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols)
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot Parameters ---------- data : DataFrame groups : dict {new_name : list_of_columns} dropna : boolean, default True Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Returns ------- reshaped : DataFrame
def face_function(script, function='(fi == 0)'): """Boolean function using muparser lib to perform face selection over current mesh. See help(mlx.muparser_ref) for muparser reference documentation. It's possible to use parenthesis, per-vertex variables and boolean operator: (, ), and, or, <, >, = It's possible to use per-face variables like attributes associated to the three vertices of every face. Variables (per face): x0, y0, z0 for first vertex; x1,y1,z1 for second vertex; x2,y2,z2 for third vertex nx0, ny0, nz0, nx1, ny1, nz1, etc. for vertex normals r0, g0, b0, a0, etc. for vertex color q0, q1, q2 for quality wtu0, wtv0, wtu1, wtv1, wtu2, wtv2 (per wedge texture coordinates) ti for face texture index (>= ML2016.12) vsel0, vsel1, vsel2 for vertex selection (1 yes, 0 no) (>= ML2016.12) fr, fg, fb, fa for face color (>= ML2016.12) fq for face quality (>= ML2016.12) fnx, fny, fnz for face normal (>= ML2016.12) fsel face selection (1 yes, 0 no) (>= ML2016.12) Args: script: the FilterScript object or script filename to write the filter] to. function (str): a boolean function that will be evaluated in order to select a subset of faces. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Conditional Face Selection">\n', ' <Param name="condSelect" ', 'value="{}" '.format(str(function).replace('&', '&amp;').replace('<', '&lt;')), 'description="boolean function" ', 'type="RichString" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Boolean function using muparser lib to perform face selection over current mesh. See help(mlx.muparser_ref) for muparser reference documentation. It's possible to use parenthesis, per-vertex variables and boolean operator: (, ), and, or, <, >, = It's possible to use per-face variables like attributes associated to the three vertices of every face. Variables (per face): x0, y0, z0 for first vertex; x1,y1,z1 for second vertex; x2,y2,z2 for third vertex nx0, ny0, nz0, nx1, ny1, nz1, etc. for vertex normals r0, g0, b0, a0, etc. for vertex color q0, q1, q2 for quality wtu0, wtv0, wtu1, wtv1, wtu2, wtv2 (per wedge texture coordinates) ti for face texture index (>= ML2016.12) vsel0, vsel1, vsel2 for vertex selection (1 yes, 0 no) (>= ML2016.12) fr, fg, fb, fa for face color (>= ML2016.12) fq for face quality (>= ML2016.12) fnx, fny, fnz for face normal (>= ML2016.12) fsel face selection (1 yes, 0 no) (>= ML2016.12) Args: script: the FilterScript object or script filename to write the filter] to. function (str): a boolean function that will be evaluated in order to select a subset of faces. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def fun_inverse(fun=None, y=0, x0=None, args=(), disp=False, method='Nelder-Mead', **kwargs): r"""Find the threshold level that accomplishes the desired specificity Call indicated function repeatedly to find answer to the inverse function evaluation Arguments: fun (function): function to be calculate an inverse for y (float): desired output of fun x0 (float): initial guess at input to fun, the fun arg that will be adjusted args (list or tuple): constants arguments to fun which will not be adjusted constraints (tuple): dictionary of optimizer constraints (see scipy.optimize.minimize) disp (bool): whether to display incremental results during optimization method (str): one of the scipy.optimize.minimize methods additional kwargs are passed along to the minimize function fun_inverse can be used to calculate a trivial square root: >>> round(fun_inverse(fun=lambda x: x**2, y=9, x0=0), 6) 3.0 """ fun_inverse.fun = cost_fun.fun = fun if fun is not None else getattr(fun_inverse, 'fun', lambda x: x) fun_inverse.target = cost_fun.target = y or 0 fun_inverse.verbose = verbose = cost_fun.verbose = kwargs.pop( 'verbose', getattr(cost_fun, 'verbose', getattr(fun_inverse, 'verbose', False))) fun_inverse.x0 = x0 = x0 if x0 is not None else getattr(fun_inverse, 'x0', 0) or 0 if verbose: print(' x0: {}\ntarget: {}\n'.format(fun_inverse.x0, fun_inverse.target)) res = minimize(cost_fun, x0=x0, args=args, options=kwargs.pop('options', {}), method=method, **kwargs ) if isinstance(x0, NUMERIC_TYPES): return res.x[0] return res.x
r"""Find the threshold level that accomplishes the desired specificity Call indicated function repeatedly to find answer to the inverse function evaluation Arguments: fun (function): function to be calculate an inverse for y (float): desired output of fun x0 (float): initial guess at input to fun, the fun arg that will be adjusted args (list or tuple): constants arguments to fun which will not be adjusted constraints (tuple): dictionary of optimizer constraints (see scipy.optimize.minimize) disp (bool): whether to display incremental results during optimization method (str): one of the scipy.optimize.minimize methods additional kwargs are passed along to the minimize function fun_inverse can be used to calculate a trivial square root: >>> round(fun_inverse(fun=lambda x: x**2, y=9, x0=0), 6) 3.0
def create_ar (archive, compression, cmd, verbosity, interactive, filenames): """Create a AR archive.""" opts = 'rc' if verbosity > 1: opts += 'v' cmdlist = [cmd, opts, archive] cmdlist.extend(filenames) return cmdlist
Create a AR archive.
def available_composite_ids(self, available_datasets=None): """Get names of compositors that can be generated from the available datasets. Returns: generator of available compositor's names """ if available_datasets is None: available_datasets = self.available_dataset_ids(composites=False) else: if not all(isinstance(ds_id, DatasetID) for ds_id in available_datasets): raise ValueError( "'available_datasets' must all be DatasetID objects") all_comps = self.all_composite_ids() # recreate the dependency tree so it doesn't interfere with the user's # wishlist comps, mods = self.cpl.load_compositors(self.attrs['sensor']) dep_tree = DependencyTree(self.readers, comps, mods) dep_tree.find_dependencies(set(available_datasets + all_comps)) available_comps = set(x.name for x in dep_tree.trunk()) # get rid of modified composites that are in the trunk return sorted(available_comps & set(all_comps))
Get names of compositors that can be generated from the available datasets. Returns: generator of available compositor's names
def _hijack_gtk(self): """Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - gtk.main - gtk.main_quit """ def dummy(*args, **kw): pass # save and trap main and main_quit from gtk orig_main, gtk.main = gtk.main, dummy orig_main_quit, gtk.main_quit = gtk.main_quit, dummy return orig_main, orig_main_quit
Hijack a few key functions in GTK for IPython integration. Modifies pyGTK's main and main_quit with a dummy so user code does not block IPython. This allows us to use %run to run arbitrary pygtk scripts from a long-lived IPython session, and when they attempt to start or stop Returns ------- The original functions that have been hijacked: - gtk.main - gtk.main_quit
def _detect_sse41(self): "Does this compiler support SSE4.1 intrinsics?" self._print_support_start('SSE4.1') result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)', include='<smmintrin.h>', extra_postargs=['-msse4']) self._print_support_end('SSE4.1', result) return result
Does this compiler support SSE4.1 intrinsics?
def newton(self): """ Newton power flow routine Returns ------- (bool, int) success flag, number of iterations """ dae = self.system.dae while True: inc = self.calc_inc() dae.x += inc[:dae.n] dae.y += inc[dae.n:dae.n + dae.m] self.niter += 1 max_mis = max(abs(inc)) self.iter_mis.append(max_mis) self._iter_info(self.niter) if max_mis < self.config.tol: self.solved = True break elif self.niter > 5 and max_mis > 1000 * self.iter_mis[0]: logger.warning('Blown up in {0} iterations.'.format(self.niter)) break if self.niter > self.config.maxit: logger.warning('Reached maximum number of iterations.') break return self.solved, self.niter
Newton power flow routine Returns ------- (bool, int) success flag, number of iterations
def type_converter(text): """ I convert strings into integers, floats, and strings! """ if text.isdigit(): return int(text), int try: return float(text), float except ValueError: return text, STRING_TYPE
I convert strings into integers, floats, and strings!
def jsonresolver_loader(url_map): """Jsonresolver hook for funders resolving.""" def endpoint(doi_code): pid_value = "10.13039/{0}".format(doi_code) _, record = Resolver(pid_type='frdoi', object_type='rec', getter=Record.get_record).resolve(pid_value) return record pattern = '/10.13039/<doi_code>' url_map.add(Rule(pattern, endpoint=endpoint, host='doi.org')) url_map.add(Rule(pattern, endpoint=endpoint, host='dx.doi.org'))
Jsonresolver hook for funders resolving.
def b_pathInTree(self, astr_path): """ Converts a string <astr_path> specifier to a list-based *absolute* lookup, i.e. "/node1/node2/node3" is converted to ['/' 'node1' 'node2' 'node3']. The method also understands a paths that start with: '..' or combination of '../../..' and is also aware that the root node is its own parent. If the path list conversion is valid (i.e. exists in the space of existing paths, l_allPaths), return True and the destination path list; else return False and the current path list. """ if astr_path == '/': return True, ['/'] al_path = astr_path.split('/') # Do we have a trailing '/' and not doing a '../'? If so, strip it..! if astr_path != '../' and al_path[-1] == '': al_path = al_path[0:-2] # Check for absolute path if not len(al_path[0]): al_path[0] = '/' # print "returning %s : %s" % (self.b_pathOK(al_path), al_path) return self.b_pathOK(al_path), al_path # Here we are in relative mode... # First, resolve any leading '..' l_path = self.l_cwd[:] if al_path[0] == '..': while al_path[0] == '..' and len(al_path): l_path = l_path[0:-1] if len(al_path) >= 2: al_path = al_path[1:] else: al_path[0] = '' # print "l_path = %s" % l_path # print "al_path = %s (%d)" % (al_path, len(al_path[0])) if len(al_path[0]): # print "extending %s with %s" % (l_path, al_path) l_path.extend(al_path) else: l_path = self.l_cwd l_path.extend(al_path) # print "final path list = %s (%d)" % (l_path, len(l_path)) if len(l_path)>=1 and l_path[0] != '/': l_path.insert(0, '/') if len(l_path)>1: l_path[0] = '' if not len(l_path): l_path = ['/'] str_path = '/'.join(l_path) # print "final path str = %s" % str_path b_valid, al_path = self.b_pathInTree(str_path) return b_valid, al_path
Converts a string <astr_path> specifier to a list-based *absolute* lookup, i.e. "/node1/node2/node3" is converted to ['/' 'node1' 'node2' 'node3']. The method also understands a paths that start with: '..' or combination of '../../..' and is also aware that the root node is its own parent. If the path list conversion is valid (i.e. exists in the space of existing paths, l_allPaths), return True and the destination path list; else return False and the current path list.
def get_model_indexes(model, add_reserver_flag=True): """ Creating indexes suit for model_config. """ import uliweb.orm as orm from sqlalchemy.engine.reflection import Inspector indexes = [] engine = model.get_engine().engine insp = Inspector.from_engine(engine) for index in insp.get_indexes(model.tablename): d = {} d['name'] = index['name'] d['unique'] = index['unique'] d['fields'] = index['column_names'] if add_reserver_flag: d['_reserved'] = True indexes.append(d) return indexes
Creating indexes suit for model_config.
def parse_mini(memory_decriptor, buff): """ memory_descriptor: MINIDUMP_MEMORY_DESCRIPTOR buff: file_handle """ mms = MinidumpMemorySegment() mms.start_virtual_address = memory_decriptor.StartOfMemoryRange mms.size = memory_decriptor.Memory.DataSize mms.start_file_address = memory_decriptor.Memory.Rva mms.end_virtual_address = mms.start_virtual_address + mms.size return mms
memory_descriptor: MINIDUMP_MEMORY_DESCRIPTOR buff: file_handle
def obfn_dfd(self): r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2` """ Ef = self.eval_Rf(self.Xf) E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN) return (np.linalg.norm(self.W * E)**2) / 2.0
r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`
def create_argparser(): """Instantiate an `argparse.ArgumentParser`. Adds all basic cli options including default values. """ parser = argparse.ArgumentParser() arg_defaults = { "daemon": False, "loop": False, "listpresets": False, "config": None, "debug": False, "sleeptime": 300, "version": False, "verbose_count": 0 } # add generic client options to the CLI: parser.add_argument("-c", "--config", dest="config", help="config file", default=arg_defaults["config"]) parser.add_argument("--list-presets", dest="listpresets", help="list all available presets", action="store_true", default=arg_defaults["listpresets"]) parser.add_argument("-d", "--daemon", dest="daemon", help="go into daemon mode (implies --loop)", action="store_true", default=arg_defaults["daemon"]) parser.add_argument("--debug", dest="debug", help="increase logging level to DEBUG (DEPRECATED, please use -vvv)", action="store_true", default=arg_defaults["debug"]) parser.add_argument("--loop", dest="loop", help="loop forever (default is to update once)", action="store_true", default=arg_defaults["loop"]) parser.add_argument("--sleeptime", dest="sleeptime", help="how long to sleep between checks in seconds", default=arg_defaults["sleeptime"]) parser.add_argument("--version", dest="version", help="show version and exit", action="store_true", default=arg_defaults["version"]) parser.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=arg_defaults["verbose_count"], help="increases log verbosity for each occurrence") return parser, arg_defaults
Instantiate an `argparse.ArgumentParser`. Adds all basic cli options including default values.
def get_block_info(self): """ Get the retrieved block information. Return [(height, [txs])] on success, ordered on height Raise if not finished downloading """ if not self.finished: raise Exception("Not finished downloading") ret = [] for (block_hash, block_data) in self.block_info.items(): ret.append( (block_data['height'], block_data['txns']) ) return ret
Get the retrieved block information. Return [(height, [txs])] on success, ordered on height Raise if not finished downloading
def request_forward_agent(self, handler): """ Request for a forward SSH Agent on this channel. This is only valid for an ssh-agent from OpenSSH !!! :param function handler: a required handler to use for incoming SSH Agent connections :return: True if we are ok, else False (at that time we always return ok) :raises: SSHException in case of channel problem. """ if self.closed or self.eof_received or self.eof_sent or not self.active: raise SSHException('Channel is not open') m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) m.add_int(self.remote_chanid) m.add_string('[email protected]') m.add_boolean(False) self.transport._send_user_message(m) self.transport._set_forward_agent_handler(handler) return True
Request for a forward SSH Agent on this channel. This is only valid for an ssh-agent from OpenSSH !!! :param function handler: a required handler to use for incoming SSH Agent connections :return: True if we are ok, else False (at that time we always return ok) :raises: SSHException in case of channel problem.
def guest_delete_disks(self, userid, disk_vdev_list): """Delete disks from an existing guest vm. :param userid: (str) the userid of the vm to be deleted :param disk_vdev_list: (list) the vdev list of disks to be deleted, for example: ['0101', '0102'] """ action = "delete disks '%s' from guest '%s'" % (str(disk_vdev_list), userid) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.delete_disks(userid, disk_vdev_list)
Delete disks from an existing guest vm. :param userid: (str) the userid of the vm to be deleted :param disk_vdev_list: (list) the vdev list of disks to be deleted, for example: ['0101', '0102']
def _extensions(self, line): """ Extract the extension from the given line. :param line: The line from the official public suffix repository. :type line: str """ # We strip the parsed line. line = line.strip() if not line.startswith("//") and "." in line: # * The parsed line is not a commented line. # and # * There is a point in the parsed line. line = line.encode("idna").decode("utf-8") if line.startswith("*."): # The parsed line start with `*.`. # We remove the first two characters. line = line[2:] # We we split the points and we get the last element. # Explanation: The idea behind this action is to # always get the extension. extension = line.split(".")[-1] if extension in self.public_suffix_db: # The extension is alrady in our database. # We update the content of the 1st level TDL with # the content of the suffix. # In between, we format so that we ensure that there is no # duplicate in the database index content. self.public_suffix_db[extension] = List( self.public_suffix_db[extension] + [line] ).format() else: # The extension is not already in our database. # We append the currently formatted extension and the line content. self.public_suffix_db.update({extension: [line]})
Extract the extension from the given line. :param line: The line from the official public suffix repository. :type line: str
def _set_request_referer_metric(self, request): """ Add metric 'request_referer' for http referer. """ if 'HTTP_REFERER' in request.META and request.META['HTTP_REFERER']: monitoring.set_custom_metric('request_referer', request.META['HTTP_REFERER'])
Add metric 'request_referer' for http referer.
def get_effect_class(self, effect_name: str, package_name: str = None) -> Type['Effect']: """ Get an effect class by the class name Args: effect_name (str): Name of the effect class Keyword Args: package_name (str): The package the effect belongs to. This is optional and only needed when effect class names are not unique. Returns: :py:class:`Effect` class """ return self._project.get_effect_class(effect_name, package_name=package_name)
Get an effect class by the class name Args: effect_name (str): Name of the effect class Keyword Args: package_name (str): The package the effect belongs to. This is optional and only needed when effect class names are not unique. Returns: :py:class:`Effect` class
def open_required(func): """ Use this decorator to raise an error if the project is not opened """ def wrapper(self, *args, **kwargs): if self._status == "closed": raise aiohttp.web.HTTPForbidden(text="The project is not opened") return func(self, *args, **kwargs) return wrapper
Use this decorator to raise an error if the project is not opened
def convert_upsample_bilinear(params, w_name, scope_name, inputs, layers, weights, names): """ Convert upsample_bilinear2d layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting upsample...') if names == 'short': tf_name = 'UPSL' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) output_size = params['output_size'] align_corners = params['align_corners'] > 0 def target_layer(x, size=output_size, align_corners=align_corners): import tensorflow as tf x = tf.transpose(x, [0, 2, 3, 1]) x = tf.image.resize_images(x, size, align_corners=align_corners) x = tf.transpose(x, [0, 3, 1, 2]) return x lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert upsample_bilinear2d layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def hoverEnterEvent(self, event): """ Processes when this hotspot is entered. :param event | <QHoverEvent> :return <bool> | processed """ self._hovered = True if self.toolTip(): QToolTip.showText(QCursor.pos(), self.toolTip()) return True return self.style() == XNodeHotspot.Style.Icon
Processes when this hotspot is entered. :param event | <QHoverEvent> :return <bool> | processed
def derivatives_factory(cls, coef, degree, knots, ext, **kwargs): """ Given some coefficients, return a the derivative of a B-spline. """ return cls._basis_spline_factory(coef, degree, knots, 1, ext)
Given some coefficients, return a the derivative of a B-spline.
def voltage_delta_vde(v_nom, s_max, r, x, cos_phi): """ Estimate voltrage drop/increase The VDE [#]_ proposes a simplified method to estimate voltage drop or increase in radial grids. Parameters ---------- v_nom : int Nominal voltage s_max : float Apparent power r : float Short-circuit resistance from node to HV/MV substation (in ohm) x : float Short-circuit reactance from node to HV/MV substation (in ohm). Must be a signed number indicating (+) inductive reactive consumer (load case) or (-) inductive reactive supplier (generation case) cos_phi : float Returns ------- :any:`float` Voltage drop or increase References ---------- .. [#] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz – Technische Mindestanforderungen für Anschluss und Parallelbetrieb von Erzeugungsanlagen am Niederspannungsnetz, 2011 """ delta_v = (s_max * ( r * cos_phi + x * math.sin(math.acos(cos_phi)))) / v_nom ** 2 return delta_v
Estimate voltrage drop/increase The VDE [#]_ proposes a simplified method to estimate voltage drop or increase in radial grids. Parameters ---------- v_nom : int Nominal voltage s_max : float Apparent power r : float Short-circuit resistance from node to HV/MV substation (in ohm) x : float Short-circuit reactance from node to HV/MV substation (in ohm). Must be a signed number indicating (+) inductive reactive consumer (load case) or (-) inductive reactive supplier (generation case) cos_phi : float Returns ------- :any:`float` Voltage drop or increase References ---------- .. [#] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz – Technische Mindestanforderungen für Anschluss und Parallelbetrieb von Erzeugungsanlagen am Niederspannungsnetz, 2011
def open(filename, mode="r", iline = 189, xline = 193, strict = True, ignore_geometry = False, endian = 'big'): """Open a segy file. Opens a segy file and tries to figure out its sorting, inline numbers, crossline numbers, and offsets, and enables reading and writing to this file in a simple manner. For reading, the access mode `r` is preferred. All write operations will raise an exception. For writing, the mode `r+` is preferred (as `rw` would truncate the file). Any mode with `w` will raise an error. The modes used are standard C file modes; please refer to that documentation for a complete reference. Open should be used together with python's ``with`` statement. Please refer to the examples. When the ``with`` statement is used the file will automatically be closed when the routine completes or an exception is raised. By default, segyio tries to open in ``strict`` mode. This means the file will be assumed to represent a geometry with consistent inline, crosslines and offsets. If strict is False, segyio will still try to establish a geometry, but it won't abort if it fails. When in non-strict mode is opened, geometry-dependent modes such as iline will raise an error. If ``ignore_geometry=True``, segyio will *not* try to build iline/xline or other geometry related structures, which leads to faster opens. This is essentially the same as using ``strict=False`` on a file that has no geometry. Parameters ---------- filename : str Path to file to open mode : {'r', 'r+'} File access mode, read-only ('r', default) or read-write ('r+') iline : int or segyio.TraceField Inline number field in the trace headers. Defaults to 189 as per the SEG-Y rev1 specification xline : int or segyio.TraceField Crossline number field in the trace headers. Defaults to 193 as per the SEG-Y rev1 specification strict : bool, optional Abort if a geometry cannot be inferred. Defaults to True. ignore_geometry : bool, optional Opt out on building geometry information, useful for e.g. shot organised files. Defaults to False. endian : {'big', 'msb', 'little', 'lsb'} File endianness, big/msb (default) or little/lsb Returns ------- file : segyio.SegyFile An open segyio file handle Raises ------ ValueError If the mode string contains 'w', as it would truncate the file Notes ----- .. versionadded:: 1.1 .. versionchanged:: 1.8 endian argument When a file is opened non-strict, only raw traces access is allowed, and using modes such as ``iline`` raise an error. Examples -------- Open a file in read-only mode: >>> with segyio.open(path, "r") as f: ... print(f.ilines) ... [1, 2, 3, 4, 5] Open a file in read-write mode: >>> with segyio.open(path, "r+") as f: ... f.trace = np.arange(100) Open two files at once: >>> with segyio.open(path) as src, segyio.open(path, "r+") as dst: ... dst.trace = src.trace # copy all traces from src to dst Open a file little-endian file: >>> with segyio.open(path, endian = 'little') as f: ... f.trace[0] """ if 'w' in mode: problem = 'w in mode would truncate the file' solution = 'use r+ to open in read-write' raise ValueError(', '.join((problem, solution))) endians = { 'little': 256, # (1 << 8) 'lsb': 256, 'big': 0, 'msb': 0, } if endian not in endians: problem = 'unknown endianness {}, expected one of: ' opts = ' '.join(endians.keys()) raise ValueError(problem.format(endian) + opts) from . import _segyio fd = _segyio.segyiofd(str(filename), mode, endians[endian]) fd.segyopen() metrics = fd.metrics() f = segyio.SegyFile(fd, filename = str(filename), mode = mode, iline = iline, xline = xline, endian = endian, ) try: dt = segyio.tools.dt(f, fallback_dt = 4000.0) / 1000.0 t0 = f.header[0][segyio.TraceField.DelayRecordingTime] samples = metrics['samplecount'] f._samples = (numpy.arange(samples) * dt) + t0 except: f.close() raise if ignore_geometry: return f return infer_geometry(f, metrics, iline, xline, strict)
Open a segy file. Opens a segy file and tries to figure out its sorting, inline numbers, crossline numbers, and offsets, and enables reading and writing to this file in a simple manner. For reading, the access mode `r` is preferred. All write operations will raise an exception. For writing, the mode `r+` is preferred (as `rw` would truncate the file). Any mode with `w` will raise an error. The modes used are standard C file modes; please refer to that documentation for a complete reference. Open should be used together with python's ``with`` statement. Please refer to the examples. When the ``with`` statement is used the file will automatically be closed when the routine completes or an exception is raised. By default, segyio tries to open in ``strict`` mode. This means the file will be assumed to represent a geometry with consistent inline, crosslines and offsets. If strict is False, segyio will still try to establish a geometry, but it won't abort if it fails. When in non-strict mode is opened, geometry-dependent modes such as iline will raise an error. If ``ignore_geometry=True``, segyio will *not* try to build iline/xline or other geometry related structures, which leads to faster opens. This is essentially the same as using ``strict=False`` on a file that has no geometry. Parameters ---------- filename : str Path to file to open mode : {'r', 'r+'} File access mode, read-only ('r', default) or read-write ('r+') iline : int or segyio.TraceField Inline number field in the trace headers. Defaults to 189 as per the SEG-Y rev1 specification xline : int or segyio.TraceField Crossline number field in the trace headers. Defaults to 193 as per the SEG-Y rev1 specification strict : bool, optional Abort if a geometry cannot be inferred. Defaults to True. ignore_geometry : bool, optional Opt out on building geometry information, useful for e.g. shot organised files. Defaults to False. endian : {'big', 'msb', 'little', 'lsb'} File endianness, big/msb (default) or little/lsb Returns ------- file : segyio.SegyFile An open segyio file handle Raises ------ ValueError If the mode string contains 'w', as it would truncate the file Notes ----- .. versionadded:: 1.1 .. versionchanged:: 1.8 endian argument When a file is opened non-strict, only raw traces access is allowed, and using modes such as ``iline`` raise an error. Examples -------- Open a file in read-only mode: >>> with segyio.open(path, "r") as f: ... print(f.ilines) ... [1, 2, 3, 4, 5] Open a file in read-write mode: >>> with segyio.open(path, "r+") as f: ... f.trace = np.arange(100) Open two files at once: >>> with segyio.open(path) as src, segyio.open(path, "r+") as dst: ... dst.trace = src.trace # copy all traces from src to dst Open a file little-endian file: >>> with segyio.open(path, endian = 'little') as f: ... f.trace[0]
def _unmount_devicemapper(self, cid): """ Devicemapper unmount backend. """ mountpoint = self.mountpoint Mount.unmount_path(mountpoint) cinfo = self.client.inspect_container(cid) dev_name = cinfo['GraphDriver']['Data']['DeviceName'] Mount.remove_thin_device(dev_name) self._cleanup_container(cinfo)
Devicemapper unmount backend.
def text_ui(self): """ Start Text UI main loop """ self.logger.info("Starting command line interface") self.help() try: self.ipython_ui() except ImportError: self.fallback_ui() self.system.cleanup()
Start Text UI main loop
def _get_name(self, name): """ Find a team's name and abbreviation. Given the team's HTML name tag, determine their name, abbreviation, and whether or not they compete in Division-I. Parameters ---------- name : PyQuery object A PyQuery object of a team's HTML name tag in the boxscore. Returns ------- tuple Returns a tuple containing the name, abbreviation, and whether or not the team participates in Division-I. Tuple is in the following order: Team Name, Team Abbreviation, boolean which evaluates to True if the team does not participate in Division-I. """ team_name = name.text() abbr = self._parse_abbreviation(name) non_di = False if not abbr: abbr = team_name non_di = True return team_name, abbr, non_di
Find a team's name and abbreviation. Given the team's HTML name tag, determine their name, abbreviation, and whether or not they compete in Division-I. Parameters ---------- name : PyQuery object A PyQuery object of a team's HTML name tag in the boxscore. Returns ------- tuple Returns a tuple containing the name, abbreviation, and whether or not the team participates in Division-I. Tuple is in the following order: Team Name, Team Abbreviation, boolean which evaluates to True if the team does not participate in Division-I.
def save_code(self, title, addr, _bytes): """ Saves the given bytes as code. If bytes are strings, its chars will be converted to bytes """ self.standard_bytes_header(title, addr, len(_bytes)) _bytes = [self.BLOCK_TYPE_DATA] + [(int(x) & 0xFF) for x in _bytes] # & 0xFF truncates to bytes self.standard_block(_bytes)
Saves the given bytes as code. If bytes are strings, its chars will be converted to bytes
def nvmlDeviceGetCurrPcieLinkWidth(handle): r""" /** * Retrieves the current PCIe link width * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param currLinkWidth Reference in which to return the current PCIe link generation * * @return * - \ref NVML_SUCCESS if \a currLinkWidth has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth """ fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkWidth") width = c_uint() ret = fn(handle, byref(width)) _nvmlCheckReturn(ret) return bytes_to_str(width.value)
r""" /** * Retrieves the current PCIe link width * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param currLinkWidth Reference in which to return the current PCIe link generation * * @return * - \ref NVML_SUCCESS if \a currLinkWidth has been populated * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth
def image(self, well_row, well_column, field_row, field_column): """Get path of specified image. Parameters ---------- well_row : int Starts at 0. Same as --U in files. well_column : int Starts at 0. Same as --V in files. field_row : int Starts at 0. Same as --Y in files. field_column : int Starts at 0. Same as --X in files. Returns ------- string Path to image or empty string if image is not found. """ return next((i for i in self.images if attribute(i, 'u') == well_column and attribute(i, 'v') == well_row and attribute(i, 'x') == field_column and attribute(i, 'y') == field_row), '')
Get path of specified image. Parameters ---------- well_row : int Starts at 0. Same as --U in files. well_column : int Starts at 0. Same as --V in files. field_row : int Starts at 0. Same as --Y in files. field_column : int Starts at 0. Same as --X in files. Returns ------- string Path to image or empty string if image is not found.
def modified_Wilson_Vc(zs, Vcs, Aijs): r'''Calculates critical volume of a mixture according to mixing rules in [1]_ with parameters. Equation .. math:: V_{cm} = \sum_i x_i V_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)V_{ref} For a binary mxiture, this simplifies to: .. math:: V_{cm} = x_1 V_{c1} + x_2 V_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})] Parameters ---------- zs : float Mole fractions of all components Vcs : float Critical volumes of all components, [m^3/mol] Aijs : matrix Interaction parameters, [cm^3/mol] Returns ------- Vcm : float Critical volume of the mixture, [m^3/mol] Notes ----- The equation and original article has been reviewed. All parameters, even if zero, must be given to this function. C = -2500 All parameters, even if zero, must be given to this function. nu parameters are in cm^3/mol, but are converted to m^3/mol inside the function Examples -------- 1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol. >>> modified_Wilson_Vc([0.4271, 0.5729], [0.000273, 0.000256], ... [[0, 0.6671250], [1.3939900, 0]]) 0.0002664335032706881 References ---------- .. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the Calculation of Gas-Liquid Critical Temperatures and Pressures of Multicomponent Mixtures." Industrial & Engineering Chemistry Process Design and Development 22, no. 4 (1983): 672-76. .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati. "Prediction of True Critical Temperature of Multi-Component Mixtures: Extending Fast Estimation Methods." Fluid Phase Equilibria 392 (April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001. ''' if not none_and_length_check([zs, Vcs]): # check same-length inputs raise Exception('Function inputs are incorrect format') C = -2500 Vcm = sum(zs[i]*Vcs[i] for i in range(len(zs))) for i in range(len(zs)): Vcm += C*zs[i]*log(zs[i] + sum(zs[j]*Aijs[i][j] for j in range(len(zs))))/1E6 return Vcm
r'''Calculates critical volume of a mixture according to mixing rules in [1]_ with parameters. Equation .. math:: V_{cm} = \sum_i x_i V_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)V_{ref} For a binary mxiture, this simplifies to: .. math:: V_{cm} = x_1 V_{c1} + x_2 V_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})] Parameters ---------- zs : float Mole fractions of all components Vcs : float Critical volumes of all components, [m^3/mol] Aijs : matrix Interaction parameters, [cm^3/mol] Returns ------- Vcm : float Critical volume of the mixture, [m^3/mol] Notes ----- The equation and original article has been reviewed. All parameters, even if zero, must be given to this function. C = -2500 All parameters, even if zero, must be given to this function. nu parameters are in cm^3/mol, but are converted to m^3/mol inside the function Examples -------- 1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol. >>> modified_Wilson_Vc([0.4271, 0.5729], [0.000273, 0.000256], ... [[0, 0.6671250], [1.3939900, 0]]) 0.0002664335032706881 References ---------- .. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the Calculation of Gas-Liquid Critical Temperatures and Pressures of Multicomponent Mixtures." Industrial & Engineering Chemistry Process Design and Development 22, no. 4 (1983): 672-76. .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati. "Prediction of True Critical Temperature of Multi-Component Mixtures: Extending Fast Estimation Methods." Fluid Phase Equilibria 392 (April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
def __startOpenThread(self): """start OpenThread stack Returns: True: successful to start OpenThread stack and thread interface up False: fail to start OpenThread stack """ print 'call startOpenThread' try: if self.hasActiveDatasetToCommit: if self.__sendCommand('dataset commit active')[0] != 'Done': raise Exception('failed to commit active dataset') else: self.hasActiveDatasetToCommit = False # restore whitelist/blacklist address filter mode if rejoin after reset if self.isPowerDown: if self._addressfilterMode == 'whitelist': if self.__setAddressfilterMode('whitelist'): for addr in self._addressfilterSet: self.addAllowMAC(addr) elif self._addressfilterMode == 'blacklist': if self.__setAddressfilterMode('blacklist'): for addr in self._addressfilterSet: self.addBlockedMAC(addr) if self.deviceRole in [Thread_Device_Role.Leader, Thread_Device_Role.Router, Thread_Device_Role.REED]: self.__setRouterSelectionJitter(1) if self.__sendCommand('ifconfig up')[0] == 'Done': if self.__sendCommand('thread start')[0] == 'Done': self.isPowerDown = False return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger("startOpenThread() Error: " + str(e))
start OpenThread stack Returns: True: successful to start OpenThread stack and thread interface up False: fail to start OpenThread stack
def _close_stdout_stderr_streams(self): """Close output-capturing stuff. This also flushes anything left in the buffers. """ # we don't have tee_file's in headless mode if self._stdout_tee.tee_file is not None: self._stdout_tee.tee_file.close() if self._stderr_tee.tee_file is not None: self._stderr_tee.tee_file.close() # TODO(adrian): we should close these even in headless mode # but in python 2 the read thread doesn't stop on its own # for some reason self._stdout_tee.close_join() self._stderr_tee.close_join() if self._cloud: # not set in dry run mode self._stdout_stream.close() self._stderr_stream.close() self._output_log.f.close() self._output_log = None
Close output-capturing stuff. This also flushes anything left in the buffers.
def expected_log_joint_probability(self): """ Compute E_{q(z) q(x)} [log p(z) + log p(x | z) + log p(y | x, z)] """ # E_{q(z)}[log p(z)] from pyslds.util import expected_hmm_logprob elp = expected_hmm_logprob( self.pi_0, self.trans_matrix, (self.expected_states, self.expected_transcounts, self._normalizer)) # E_{q(x)}[log p(y, x | z)] is given by aBl # To get E_{q(x)}[ aBl ] we multiply and sum elp += np.sum(self.expected_states * self.vbem_aBl) return elp
Compute E_{q(z) q(x)} [log p(z) + log p(x | z) + log p(y | x, z)]
def _run_configure_script(self, script): """Run the script to install the Juju agent on the target machine. :param str script: The script returned by the ProvisioningScript API :raises: :class:`paramiko.ssh_exception.AuthenticationException` if the upload fails """ _, tmpFile = tempfile.mkstemp() with open(tmpFile, 'w') as f: f.write(script) try: # get ssh client ssh = self._get_ssh_client( self.host, "ubuntu", self.private_key_path, ) # copy the local copy of the script to the remote machine sftp = paramiko.SFTPClient.from_transport(ssh.get_transport()) sftp.put( tmpFile, tmpFile, ) # run the provisioning script stdout, stderr = self._run_command( ssh, "sudo /bin/bash {}".format(tmpFile), ) except paramiko.ssh_exception.AuthenticationException as e: raise e finally: os.remove(tmpFile) ssh.close()
Run the script to install the Juju agent on the target machine. :param str script: The script returned by the ProvisioningScript API :raises: :class:`paramiko.ssh_exception.AuthenticationException` if the upload fails
def run(suite, stream, args, testing=False): """ Run the given test case or test suite with the specified arguments. Any args.stream passed in will be wrapped in a GreenStream """ if not issubclass(GreenStream, type(stream)): stream = GreenStream(stream, disable_windows=args.disable_windows, disable_unidecode=args.disable_unidecode) result = GreenTestResult(args, stream) # Note: Catching SIGINT isn't supported by Python on windows (python # "WONTFIX" issue 18040) installHandler() registerResult(result) with warnings.catch_warnings(): if args.warnings: # pragma: no cover # if args.warnings is set, use it to filter all the warnings warnings.simplefilter(args.warnings) # if the filter is 'default' or 'always', special-case the # warnings from the deprecated unittest methods to show them # no more than once per module, because they can be fairly # noisy. The -Wd and -Wa flags can be used to bypass this # only when args.warnings is None. if args.warnings in ['default', 'always']: warnings.filterwarnings('module', category=DeprecationWarning, message='Please use assert\w+ instead.') result.startTestRun() pool = LoggingDaemonlessPool(processes=args.processes or None, initializer=InitializerOrFinalizer(args.initializer), finalizer=InitializerOrFinalizer(args.finalizer)) manager = multiprocessing.Manager() targets = [(target, manager.Queue()) for target in toParallelTargets(suite, args.targets)] if targets: for index, (target, queue) in enumerate(targets): if args.run_coverage: coverage_number = index + 1 else: coverage_number = None debug("Sending {} to runner {}".format(target, poolRunner)) pool.apply_async( poolRunner, (target, queue, coverage_number, args.omit_patterns, args.cov_config_file)) pool.close() for target, queue in targets: abort = False while True: msg = queue.get() # Sentinel value, we're done if not msg: break else: # Result guaranteed after this message, we're # currently waiting on this test, so print out # the white 'processing...' version of the output result.startTest(msg) proto_test_result = queue.get() result.addProtoTestResult(proto_test_result) if result.shouldStop: abort = True break if abort: break pool.close() pool.join() result.stopTestRun() removeResult(result) return result
Run the given test case or test suite with the specified arguments. Any args.stream passed in will be wrapped in a GreenStream
def data(self, data, part=False, dataset=''): """ Parameters ---------- data : `PIL.Image` Image to parse. part : `bool`, optional True if data is partial (default: `False`). dataset : `str`, optional Dataset key prefix (default: ''). """ #if self.parser is None: # raise ValueError('no parser') imgs = self.imgtype.convert(data) for channel, data in zip(self.imgtype.channels, imgs): key = dataset + channel data = self.scanner(data, part) if isinstance(self.parser, LevelParser): self.storage.add_links(self.parser(data, part, key)) else: for level, level_data in enumerate(data): level_key = key + level_dataset(level) level_data = self.parser(level_data, part, level_key) self.storage.add_links(level_data)
Parameters ---------- data : `PIL.Image` Image to parse. part : `bool`, optional True if data is partial (default: `False`). dataset : `str`, optional Dataset key prefix (default: '').
def get_object(self, subject=None, predicate=None): """Eliminates some of the glue code for searching RDF. Pass in a URIRef object (generated by the `uri` function above or a BNode object (returned by this function) for either of the parameters.""" # Get the result of the search results = self.rdf.objects(subject, predicate) as_list = list(results) # Don't raise exceptions, value test! if not as_list: return None return as_list[0]
Eliminates some of the glue code for searching RDF. Pass in a URIRef object (generated by the `uri` function above or a BNode object (returned by this function) for either of the parameters.
def gcs_get_url(url, altexts=None, client=None, service_account_json=None, raiseonfail=False): """This gets a single file from a Google Cloud Storage bucket. This uses the gs:// URL instead of a bucket name and key. Parameters ---------- url : str GCS URL to download. This should begin with 'gs://'. altexts : None or list of str If not None, this is a list of alternate extensions to try for the file other than the one provided in `filename`. For example, to get anything that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to strip the .gz. client : google.cloud.storage.Client instance The instance of the Client to use to perform the download operation. If this is None, a new Client will be used. If this is None and `service_account_json` points to a downloaded JSON file with GCS credentials, a new Client with the provided credentials will be used. If this is not None, the existing Client instance will be used. service_account_json : str Path to a downloaded GCS credentials JSON file. raiseonfail : bool If True, will re-raise whatever Exception caused the operation to fail and break out immediately. Returns ------- str Path to the downloaded filename or None if the download was unsuccessful. """ bucket_item = url.replace('gs://','') bucket_item = bucket_item.split('/') bucket = bucket_item[0] filekey = '/'.join(bucket_item[1:]) return gcs_get_file(bucket, filekey, bucket_item[-1], altexts=altexts, client=client, service_account_json=service_account_json, raiseonfail=raiseonfail)
This gets a single file from a Google Cloud Storage bucket. This uses the gs:// URL instead of a bucket name and key. Parameters ---------- url : str GCS URL to download. This should begin with 'gs://'. altexts : None or list of str If not None, this is a list of alternate extensions to try for the file other than the one provided in `filename`. For example, to get anything that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to strip the .gz. client : google.cloud.storage.Client instance The instance of the Client to use to perform the download operation. If this is None, a new Client will be used. If this is None and `service_account_json` points to a downloaded JSON file with GCS credentials, a new Client with the provided credentials will be used. If this is not None, the existing Client instance will be used. service_account_json : str Path to a downloaded GCS credentials JSON file. raiseonfail : bool If True, will re-raise whatever Exception caused the operation to fail and break out immediately. Returns ------- str Path to the downloaded filename or None if the download was unsuccessful.
def prime(self): """ Prime the stored values based on default values found in property definitions. @return: self @rtype: L{Properties} """ for d in self.definitions.values(): self.defined[d.name] = d.default return self
Prime the stored values based on default values found in property definitions. @return: self @rtype: L{Properties}
def _verify_inputs(inputs, channel_index, data_format): """Verifies `inputs` is semantically correct. Args: inputs: An input tensor provided by the user. channel_index: The index of the channel dimension. data_format: The format of the data in `inputs`. Raises: base.IncompatibleShapeError: If the shape of `inputs` doesn't match `data_format`. base.UnderspecifiedError: If the channel dimension of `inputs` isn't defined. TypeError: If input Tensor dtype is not compatible with either `tf.float16`, `tf.bfloat16` or `tf.float32`. """ # Check shape. input_shape = tuple(inputs.get_shape().as_list()) if len(input_shape) != len(data_format): raise base.IncompatibleShapeError(( "Input Tensor must have rank {} corresponding to " "data_format {}, but instead was {} of rank {}.").format( len(data_format), data_format, input_shape, len(input_shape))) # Check type. if not (tf.float16.is_compatible_with(inputs.dtype) or tf.bfloat16.is_compatible_with(inputs.dtype) or tf.float32.is_compatible_with(inputs.dtype)): raise TypeError( "Input must have dtype tf.float16, tf.bfloat16 or tf.float32, " "but dtype was {}".format(inputs.dtype)) # Check channel dim. input_channels = input_shape[channel_index] if input_channels is None: raise base.UnderspecifiedError( "Number of input channels must be known at module build time")
Verifies `inputs` is semantically correct. Args: inputs: An input tensor provided by the user. channel_index: The index of the channel dimension. data_format: The format of the data in `inputs`. Raises: base.IncompatibleShapeError: If the shape of `inputs` doesn't match `data_format`. base.UnderspecifiedError: If the channel dimension of `inputs` isn't defined. TypeError: If input Tensor dtype is not compatible with either `tf.float16`, `tf.bfloat16` or `tf.float32`.
async def get_protocol_version(self): """ This method retrieves the Firmata protocol version. JSON command: {"method": "get_protocol_version", "params": ["null"]} :returns: {"method": "protocol_version_reply", "params": [PROTOCOL_VERSION]} """ value = await self.core.get_protocol_version() if value: reply = json.dumps({"method": "protocol_version_reply", "params": value}) else: reply = json.dumps({"method": "protocol_version_reply", "params": "Unknown"}) await self.websocket.send(reply)
This method retrieves the Firmata protocol version. JSON command: {"method": "get_protocol_version", "params": ["null"]} :returns: {"method": "protocol_version_reply", "params": [PROTOCOL_VERSION]}
def fixed_inputs(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False): """ Convenience function for returning back fixed_inputs where the other inputs are fixed using fix_routine :param model: model :type model: Model :param non_fixed_inputs: dimensions of non fixed inputs :type non_fixed_inputs: list :param fix_routine: fixing routine to use, 'mean', 'median', 'zero' :type fix_routine: string :param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix :type as_list: boolean """ from ...inference.latent_function_inference.posterior import VariationalPosterior f_inputs = [] if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs(): X = model.X.mean.values.copy() elif isinstance(model.X, VariationalPosterior): X = model.X.values.copy() else: if X_all: X = model.X_all.copy() else: X = model.X.copy() for i in range(X.shape[1]): if i not in non_fixed_inputs: if fix_routine == 'mean': f_inputs.append( (i, np.mean(X[:,i])) ) if fix_routine == 'median': f_inputs.append( (i, np.median(X[:,i])) ) else: # set to zero zero f_inputs.append( (i, 0) ) if not as_list: X[:,i] = f_inputs[-1][1] if as_list: return f_inputs else: return X
Convenience function for returning back fixed_inputs where the other inputs are fixed using fix_routine :param model: model :type model: Model :param non_fixed_inputs: dimensions of non fixed inputs :type non_fixed_inputs: list :param fix_routine: fixing routine to use, 'mean', 'median', 'zero' :type fix_routine: string :param as_list: if true, will return a list of tuples with (dimension, fixed_val) otherwise it will create the corresponding X matrix :type as_list: boolean
def refreshButtons(self): """ Refreshes the buttons for building this sql query. """ last = self._last first = self._first joiner = self._containerWidget.currentJoiner() # the first button set can contain the toggle options if first: self.uiJoinSBTN.setActionTexts(['AND', 'OR']) elif joiner == QueryCompound.Op.And: self.uiJoinSBTN.setActionTexts(['AND']) else: self.uiJoinSBTN.setActionTexts(['OR']) # the last option should not show an active action if last: self.uiJoinSBTN.setCurrentAction(None) # otherwise, highlight the proper action else: act = self.uiJoinSBTN.findAction(QueryCompound.Op[joiner].upper()) self.uiJoinSBTN.setCurrentAction(act) enable = QueryCompound.typecheck(self._query) or self.isChecked() self.uiEnterBTN.setEnabled(enable)
Refreshes the buttons for building this sql query.
def display(self): """ Get screen width and height """ w, h = self.session.window_size() return Display(w*self.scale, h*self.scale)
Get screen width and height
def expQt(self, t): ''' Parameters ---------- t : float Time to propagate Returns -------- expQt : numpy.array Matrix exponential of exo(Qt) ''' eLambdaT = np.diag(self._exp_lt(t)) # vector length = a Qs = self.v.dot(eLambdaT.dot(self.v_inv)) # This is P(nuc1 | given nuc_2) return np.maximum(0,Qs)
Parameters ---------- t : float Time to propagate Returns -------- expQt : numpy.array Matrix exponential of exo(Qt)
def codigo_ibge_uf(sigla): """Retorna o código do IBGE para a UF informada.""" idx = [s for s, i, n, r in UNIDADES_FEDERACAO].index(sigla) return UNIDADES_FEDERACAO[idx][_UF_CODIGO_IBGE]
Retorna o código do IBGE para a UF informada.
def ColorLuminance(color): """Compute the brightness of an sRGB color using the formula from http://www.w3.org/TR/2000/WD-AERT-20000426#color-contrast. Args: color: a string of 6 hex digits in the format verified by IsValidHexColor(). Returns: A floating-point number between 0.0 (black) and 255.0 (white). """ r = int(color[0:2], 16) g = int(color[2:4], 16) b = int(color[4:6], 16) return (299*r + 587*g + 114*b) / 1000.0
Compute the brightness of an sRGB color using the formula from http://www.w3.org/TR/2000/WD-AERT-20000426#color-contrast. Args: color: a string of 6 hex digits in the format verified by IsValidHexColor(). Returns: A floating-point number between 0.0 (black) and 255.0 (white).
def entity_to_protobuf(entity): """Converts an entity into a protobuf. :type entity: :class:`google.cloud.datastore.entity.Entity` :param entity: The entity to be turned into a protobuf. :rtype: :class:`.entity_pb2.Entity` :returns: The protobuf representing the entity. """ entity_pb = entity_pb2.Entity() if entity.key is not None: key_pb = entity.key.to_protobuf() entity_pb.key.CopyFrom(key_pb) for name, value in entity.items(): value_is_list = isinstance(value, list) value_pb = _new_value_pb(entity_pb, name) # Set the appropriate value. _set_protobuf_value(value_pb, value) # Add index information to protobuf. if name in entity.exclude_from_indexes: if not value_is_list: value_pb.exclude_from_indexes = True for sub_value in value_pb.array_value.values: sub_value.exclude_from_indexes = True # Add meaning information to protobuf. _set_pb_meaning_from_entity( entity, name, value, value_pb, is_list=value_is_list ) return entity_pb
Converts an entity into a protobuf. :type entity: :class:`google.cloud.datastore.entity.Entity` :param entity: The entity to be turned into a protobuf. :rtype: :class:`.entity_pb2.Entity` :returns: The protobuf representing the entity.
def connect(self, factory): """Attempts to connect using a given factory. This will find the requested factory and use it to build a protocol as if the AMP protocol's peer was making the connection. It will create a transport for the protocol and connect it immediately. It will then store the protocol under a unique identifier, and return that identifier. """ try: factory = self._factories[factory] except KeyError: raise NoSuchFactory() remote = self.getProtocol() addr = remote.transport.getPeer() proto = factory.buildProtocol(addr) if proto is None: raise ConnectionRefused() identifier = uuid4().hex transport = MultiplexedTransport(identifier, remote) proto.makeConnection(transport) self._protocols[identifier] = proto return {"connection": identifier}
Attempts to connect using a given factory. This will find the requested factory and use it to build a protocol as if the AMP protocol's peer was making the connection. It will create a transport for the protocol and connect it immediately. It will then store the protocol under a unique identifier, and return that identifier.
def error_redirect(self, errormsg='', errorlog=''): ''' Shortcut for redirecting Django view to LTI Consumer with errors ''' from django.shortcuts import redirect self.lti_errormsg = errormsg self.lti_errorlog = errorlog return redirect(self.build_return_url())
Shortcut for redirecting Django view to LTI Consumer with errors
def add_eval(self, agent, e, fr=None): """Add or change agent's evaluation of the artifact with given framing information. :param agent: Name of the agent which did the evaluation. :param float e: Evaluation for the artifact. :param object fr: Framing information for the evaluation. """ self._evals[agent.name] = e self._framings[agent.name] = fr
Add or change agent's evaluation of the artifact with given framing information. :param agent: Name of the agent which did the evaluation. :param float e: Evaluation for the artifact. :param object fr: Framing information for the evaluation.
def XYZ_to_galcenrect(X,Y,Z,Xsun=1.,Zsun=0.,_extra_rot=True): """ NAME: XYZ_to_galcenrect PURPOSE: transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates INPUT: X - X Y - Y Z - Z Xsun - cylindrical distance to the GC Zsun - Sun's height above the midplane _extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition OUTPUT: (Xg, Yg, Zg) HISTORY: 2010-09-24 - Written - Bovy (NYU) 2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT) 2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT) """ if _extra_rot: X,Y,Z= nu.dot(galcen_extra_rot,nu.array([X,Y,Z])) dgc= nu.sqrt(Xsun**2.+Zsun**2.) costheta, sintheta= Xsun/dgc, Zsun/dgc return nu.dot(nu.array([[costheta,0.,-sintheta], [0.,1.,0.], [sintheta,0.,costheta]]), nu.array([-X+dgc,Y,nu.sign(Xsun)*Z])).T
NAME: XYZ_to_galcenrect PURPOSE: transform XYZ coordinates (wrt Sun) to rectangular Galactocentric coordinates INPUT: X - X Y - Y Z - Z Xsun - cylindrical distance to the GC Zsun - Sun's height above the midplane _extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition OUTPUT: (Xg, Yg, Zg) HISTORY: 2010-09-24 - Written - Bovy (NYU) 2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT) 2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT)
def value_splitter(self, reference, prop, value, mode): """ Split a string into a list items. Default behavior is to split on white spaces. Arguments: reference (string): Reference name used when raising possible error. prop (string): Property name used when raising possible error. value (string): Property value to split. mode (string): Splitter mode. Default should come from ``ManifestSerializer._DEFAULT_SPLITTER``. Available splitter are: * ``white-space``: Simply split a string on white spaces; * ``json-list``: Assume the string is a JSON list to parse; Returns: list: """ items = [] if mode == 'json-list': try: items = json.loads(value) except json.JSONDecodeError as e: print(value) msg = ("Reference '{ref}' raised JSON decoder error when " "splitting values from '{prop}': {err}'") raise SerializerError(msg.format(ref=reference, prop=prop, err=e)) else: if len(value) > 0: items = value.split(" ") return items
Split a string into a list items. Default behavior is to split on white spaces. Arguments: reference (string): Reference name used when raising possible error. prop (string): Property name used when raising possible error. value (string): Property value to split. mode (string): Splitter mode. Default should come from ``ManifestSerializer._DEFAULT_SPLITTER``. Available splitter are: * ``white-space``: Simply split a string on white spaces; * ``json-list``: Assume the string is a JSON list to parse; Returns: list:
def t_string_NGRAPH(t): r"\\[ '.:][ '.:]" global __STRING P = {' ': 0, "'": 2, '.': 8, ':': 10} N = {' ': 0, "'": 1, '.': 4, ':': 5} __STRING += chr(128 + P[t.value[1]] + N[t.value[2]])
r"\\[ '.:][ '.:]
def writeImageToFile(self, filename, _format="PNG"): ''' Write the View image to the specified filename in the specified format. @type filename: str @param filename: Absolute path and optional filename receiving the image. If this points to a directory, then the filename is determined by this View unique ID and format extension. @type _format: str @param _format: Image format (default format is PNG) ''' filename = self.device.substituteDeviceTemplate(filename) if not os.path.isabs(filename): raise ValueError("writeImageToFile expects an absolute path (fielname='%s')" % filename) if os.path.isdir(filename): filename = os.path.join(filename, self.variableNameFromId() + '.' + _format.lower()) if DEBUG: print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format" % (filename, _format) #self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, _format) # crop: # im.crop(box) ⇒ image # Returns a copy of a rectangular region from the current image. # The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. ((l, t), (r, b)) = self.getCoords() box = (l, t, r, b) if DEBUG: print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect if self.uiAutomatorHelper: if DEBUG_UI_AUTOMATOR_HELPER: print >> sys.stderr, "Taking screenshot using UiAutomatorHelper" received = self.uiAutomatorHelper.takeScreenshot() stream = StringIO.StringIO(received) try: from PIL import Image image = Image.open(stream) except ImportError as ex: # FIXME: this method should be global self.pilNotInstalledWarning() sys.exit(1) except IOError, ex: print >> sys.stderr, ex print repr(stream) sys.exit(1) else: image = self.device.takeSnapshot(reconnect=self.device.reconnect) image.crop(box).save(filename, _format)
Write the View image to the specified filename in the specified format. @type filename: str @param filename: Absolute path and optional filename receiving the image. If this points to a directory, then the filename is determined by this View unique ID and format extension. @type _format: str @param _format: Image format (default format is PNG)
def _num_players(self): """Compute number of players, both human and computer.""" self._player_num = 0 self._computer_num = 0 for player in self._header.scenario.game_settings.player_info: if player.type == 'human': self._player_num += 1 elif player.type == 'computer': self._computer_num += 1
Compute number of players, both human and computer.
def _iter_path_collection(paths, path_transforms, offsets, styles): """Build an iterator over the elements of the path collection""" N = max(len(paths), len(offsets)) if not path_transforms: path_transforms = [np.eye(3)] edgecolor = styles['edgecolor'] if np.size(edgecolor) == 0: edgecolor = ['none'] facecolor = styles['facecolor'] if np.size(facecolor) == 0: facecolor = ['none'] elements = [paths, path_transforms, offsets, edgecolor, styles['linewidth'], facecolor] it = itertools return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
Build an iterator over the elements of the path collection
def ecn(ns=None, cn=None, di=None): # pylint: disable=redefined-outer-name """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.EnumerateClassNames`. Enumerate the names of subclasses of a class, or of the top-level classes in a namespace. Parameters: ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the namespace of the `cn` parameter if specified as a `CIMClassName`, or to the default namespace of the connection. cn (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class whose subclasses are to be enumerated (case independent). `None` will enumerate the top-level classes. If specified as a `CIMClassName` object, its `host` attribute will be ignored. di (:class:`py:bool`): DeepInheritance flag: Include also indirect subclasses. `None` will cause the server default of `False` to be used. Returns: list of :term:`unicode string`: The enumerated class names. """ return CONN.EnumerateClassNames(ns, ClassName=cn, DeepInheritance=di)
This function is a wrapper for :meth:`~pywbem.WBEMConnection.EnumerateClassNames`. Enumerate the names of subclasses of a class, or of the top-level classes in a namespace. Parameters: ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the namespace of the `cn` parameter if specified as a `CIMClassName`, or to the default namespace of the connection. cn (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class whose subclasses are to be enumerated (case independent). `None` will enumerate the top-level classes. If specified as a `CIMClassName` object, its `host` attribute will be ignored. di (:class:`py:bool`): DeepInheritance flag: Include also indirect subclasses. `None` will cause the server default of `False` to be used. Returns: list of :term:`unicode string`: The enumerated class names.
def index(self, index, doc_type, body, id=None, **query_params): """ Adds or updates a typed JSON document in a specific index, making it searchable. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_ :param index: The name of the index :param doc_type: The type of the document :param body: The document :param id: Document ID :arg consistency: Explicit write consistency setting for the operation, valid choices are: 'one', 'quorum', 'all' :arg op_type: Explicit operation type, default 'index', valid choices are: 'index', 'create' :arg parent: ID of the parent document :arg refresh: Refresh the index after performing the operation :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'external', 'external_gte', 'force' """ self._es_parser.is_not_empty_params(index, doc_type, body) method = HttpMethod.POST if id in NULL_VALUES else HttpMethod.PUT path = self._es_parser.make_path(index, doc_type, id) result = yield self._perform_request(method, path, body, params=query_params) returnValue(result)
Adds or updates a typed JSON document in a specific index, making it searchable. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_ :param index: The name of the index :param doc_type: The type of the document :param body: The document :param id: Document ID :arg consistency: Explicit write consistency setting for the operation, valid choices are: 'one', 'quorum', 'all' :arg op_type: Explicit operation type, default 'index', valid choices are: 'index', 'create' :arg parent: ID of the parent document :arg refresh: Refresh the index after performing the operation :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'external', 'external_gte', 'force'
def encrypted_gradient(self, sum_to=None): """Compute and encrypt gradient. When `sum_to` is given, sum the encrypted gradient to it, assumed to be another vector of the same size """ gradient = self.compute_gradient() encrypted_gradient = encrypt_vector(self.pubkey, gradient) if sum_to is not None: return sum_encrypted_vectors(sum_to, encrypted_gradient) else: return encrypted_gradient
Compute and encrypt gradient. When `sum_to` is given, sum the encrypted gradient to it, assumed to be another vector of the same size
def validate(self, ip, **kwargs): """Check to see if this is a valid ip address.""" if ip is None: return False ip = stringify(ip) if self.IPV4_REGEX.match(ip): try: socket.inet_pton(socket.AF_INET, ip) return True except AttributeError: # no inet_pton here, sorry try: socket.inet_aton(ip) except socket.error: return False return ip.count('.') == 3 except socket.error: # not a valid address return False if self.IPV6_REGEX.match(ip): try: socket.inet_pton(socket.AF_INET6, ip) except socket.error: # not a valid address return False return True
Check to see if this is a valid ip address.
def decorate_with_checker(func: CallableT) -> CallableT: """Decorate the function with a checker that verifies the preconditions and postconditions.""" assert not hasattr(func, "__preconditions__"), \ "Expected func to have no list of preconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postconditions__"), \ "Expected func to have no list of postconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postcondition_snapshots__"), \ "Expected func to have no list of postcondition snapshots (there should be only a single contract checker " \ "per function)." sign = inspect.signature(func) param_names = list(sign.parameters.keys()) # Determine the default argument values. kwdefaults = dict() # type: Dict[str, Any] # Add to the defaults all the values that are needed by the contracts. for param in sign.parameters.values(): if param.default != inspect.Parameter.empty: kwdefaults[param.name] = param.default def wrapper(*args, **kwargs): """Wrap func by checking the preconditions and postconditions.""" preconditions = getattr(wrapper, "__preconditions__") # type: List[List[Contract]] snapshots = getattr(wrapper, "__postcondition_snapshots__") # type: List[Snapshot] postconditions = getattr(wrapper, "__postconditions__") # type: List[Contract] resolved_kwargs = _kwargs_from_call(param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs) if postconditions: if 'result' in resolved_kwargs: raise TypeError("Unexpected argument 'result' in a function decorated with postconditions.") if 'OLD' in resolved_kwargs: raise TypeError("Unexpected argument 'OLD' in a function decorated with postconditions.") # Assert the preconditions in groups. This is necessary to implement "require else" logic when a class # weakens the preconditions of its base class. violation_err = None # type: Optional[ViolationError] for group in preconditions: violation_err = None try: for contract in group: _assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs) break except ViolationError as err: violation_err = err if violation_err is not None: raise violation_err # pylint: disable=raising-bad-type # Capture the snapshots if postconditions: old_as_mapping = dict() # type: MutableMapping[str, Any] for snap in snapshots: # This assert is just a last defense. # Conflicting snapshot names should have been caught before, either during the decoration or # in the meta-class. assert snap.name not in old_as_mapping, "Snapshots with the conflicting name: {}" old_as_mapping[snap.name] = _capture_snapshot(a_snapshot=snap, resolved_kwargs=resolved_kwargs) resolved_kwargs['OLD'] = _Old(mapping=old_as_mapping) # Execute the wrapped function result = func(*args, **kwargs) if postconditions: resolved_kwargs['result'] = result # Assert the postconditions as a conjunction for contract in postconditions: _assert_postcondition(contract=contract, resolved_kwargs=resolved_kwargs) return result # type: ignore # Copy __doc__ and other properties so that doctests can run functools.update_wrapper(wrapper=wrapper, wrapped=func) assert not hasattr(wrapper, "__preconditions__"), "Expected no preconditions set on a pristine contract checker." assert not hasattr(wrapper, "__postcondition_snapshots__"), \ "Expected no postcondition snapshots set on a pristine contract checker." assert not hasattr(wrapper, "__postconditions__"), "Expected no postconditions set on a pristine contract checker." # Precondition is a list of condition groups (i.e. disjunctive normal form): # each group consists of AND'ed preconditions, while the groups are OR'ed. # # This is necessary in order to implement "require else" logic when a class weakens the preconditions of # its base class. setattr(wrapper, "__preconditions__", []) setattr(wrapper, "__postcondition_snapshots__", []) setattr(wrapper, "__postconditions__", []) return wrapper
Decorate the function with a checker that verifies the preconditions and postconditions.
def changeSubMenu(self,submenu): """ Changes the submenu that is displayed. :raises ValueError: if the name was not previously registered """ if submenu not in self.submenus: raise ValueError("Submenu %s does not exist!"%submenu) elif submenu == self.activeSubMenu: return # Ignore double submenu activation to prevent bugs in submenu initializer old = self.activeSubMenu self.activeSubMenu = submenu if old is not None: self.submenus[old].on_exit(submenu) self.submenus[old].doAction("exit") self.submenu.on_enter(old) self.submenu.doAction("enter")
Changes the submenu that is displayed. :raises ValueError: if the name was not previously registered
def add_model_name_to_payload(cls, payload): """ Checks whether the model name in question is in the payload. If not, the entire payload is set as a value of a key by the name of the model. This method is useful when some server-side Rails API calls expect the parameters to include the parameterized model name. For example, server-side endpoints that handle the updating of a biosample record or the creation of a new biosmample record will expect the payload to be of the form:: { "biosample": { "name": "new biosample", "donor": 3, ... } } Args: payload: `dict`. The data to send in an HTTP request. Returns: `dict`. """ if not cls.MODEL_NAME in payload: payload = {cls.MODEL_NAME: payload} return payload
Checks whether the model name in question is in the payload. If not, the entire payload is set as a value of a key by the name of the model. This method is useful when some server-side Rails API calls expect the parameters to include the parameterized model name. For example, server-side endpoints that handle the updating of a biosample record or the creation of a new biosmample record will expect the payload to be of the form:: { "biosample": { "name": "new biosample", "donor": 3, ... } } Args: payload: `dict`. The data to send in an HTTP request. Returns: `dict`.
def add(path=None, force=False, quiet=False): """Add that path to git's staging area (default current dir) so that it will be included in next commit """ option = '-f' if force else '' return run('add %s %s' % (option, path) or '.', quiet=quiet)
Add that path to git's staging area (default current dir) so that it will be included in next commit
def algorithm(G, method_name, **kwargs): """ Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the :class:`.GraphCollection` ``G``. For options, see the `list of algorithms <http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_ in the NetworkX documentation. Not all of these have been tested. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method_name : string Name of a method in NetworkX to execute on graph collection. **kwargs A list of keyword arguments that should correspond to the parameters of the specified method. Returns ------- results : dict Indexed by element (node or edge) and graph index (e.g. ``date``). Raises ------ ValueError If no such method exists. Examples -------- *Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\) .. code-block:: python >>> from tethne.analyze import collection >>> BC = collection.algorithm(G, 'betweenness_centrality') >>> print BC[0] {1999: 0.010101651117889644, 2000: 0.0008689093723107329, 2001: 0.010504898852426189, 2002: 0.009338654511194512, 2003: 0.007519105636349891} """ warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(method_name, **kwargs)
Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the :class:`.GraphCollection` ``G``. For options, see the `list of algorithms <http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_ in the NetworkX documentation. Not all of these have been tested. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method_name : string Name of a method in NetworkX to execute on graph collection. **kwargs A list of keyword arguments that should correspond to the parameters of the specified method. Returns ------- results : dict Indexed by element (node or edge) and graph index (e.g. ``date``). Raises ------ ValueError If no such method exists. Examples -------- *Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\) .. code-block:: python >>> from tethne.analyze import collection >>> BC = collection.algorithm(G, 'betweenness_centrality') >>> print BC[0] {1999: 0.010101651117889644, 2000: 0.0008689093723107329, 2001: 0.010504898852426189, 2002: 0.009338654511194512, 2003: 0.007519105636349891}
def expect(self, searcher, timeout=3): """Wait for input matching *searcher* Waits for input matching *searcher* for up to *timeout* seconds. If a match is found, the match result is returned (the specific type of returned result depends on the :class:`Searcher` type). If no match is found within *timeout* seconds, raise an :class:`ExpectTimeout` exception. :param Searcher searcher: :class:`Searcher` to apply to underlying stream. :param float timeout: Timeout in seconds. """ timeout = float(timeout) end = time.time() + timeout match = searcher.search(self._history[self._start:]) while not match: # poll() will raise ExpectTimeout if time is exceeded incoming = self._stream_adapter.poll(end - time.time()) self.input_callback(incoming) self._history += incoming match = searcher.search(self._history[self._start:]) trimlength = len(self._history) - self._window if trimlength > 0: self._start -= trimlength self._history = self._history[trimlength:] self._start += match.end if (self._start < 0): self._start = 0 return match
Wait for input matching *searcher* Waits for input matching *searcher* for up to *timeout* seconds. If a match is found, the match result is returned (the specific type of returned result depends on the :class:`Searcher` type). If no match is found within *timeout* seconds, raise an :class:`ExpectTimeout` exception. :param Searcher searcher: :class:`Searcher` to apply to underlying stream. :param float timeout: Timeout in seconds.
def build_structure(self, check_name, groups, source_name, limit=1): ''' Compiles the checks, results and scores into an aggregate structure which looks like: { "scored_points": 396, "low_count": 0, "possible_points": 400, "testname": "gliderdac", "medium_count": 2, "source_name": ".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc", "high_count": 0, "all_priorities" : [...], "high_priorities": [...], "medium_priorities" : [...], "low_priorities" : [...] } @param check_name The test which was run @param groups List of results from compliance checker @param source_name Source of the dataset, used for title ''' aggregates = {} aggregates['scored_points'] = 0 aggregates['possible_points'] = 0 high_priorities = [] medium_priorities = [] low_priorities = [] all_priorities = [] aggregates['high_count'] = 0 aggregates['medium_count'] = 0 aggregates['low_count'] = 0 def named_function(result): for child in result.children: all_priorities.append(child) named_function(child) # For each result, bin them into the appropriate category, put them all # into the all_priorities category and add up the point values for res in groups: if res.weight < limit: continue # If the result has 0 possible points, then it was not valid for # this dataset and contains no meaningful information if res.value[1] == 0: continue aggregates['scored_points'] += res.value[0] aggregates['possible_points'] += res.value[1] if res.weight == 3: high_priorities.append(res) if res.value[0] < res.value[1]: aggregates['high_count'] += 1 elif res.weight == 2: medium_priorities.append(res) if res.value[0] < res.value[1]: aggregates['medium_count'] += 1 else: low_priorities.append(res) if res.value[0] < res.value[1]: aggregates['low_count'] += 1 all_priorities.append(res) # Some results have children # We don't render children inline with the top three tables, but we # do total the points and display the messages named_function(res) aggregates['high_priorities'] = high_priorities aggregates['medium_priorities'] = medium_priorities aggregates['low_priorities'] = low_priorities aggregates['all_priorities'] = all_priorities aggregates['testname'] = self._get_check_versioned_name(check_name) aggregates['source_name'] = source_name aggregates['scoreheader'] = self.checkers[check_name]._cc_display_headers aggregates['cc_spec_version'] = self.checkers[check_name]._cc_spec_version aggregates['cc_url'] = self._get_check_url(aggregates['testname']) return aggregates
Compiles the checks, results and scores into an aggregate structure which looks like: { "scored_points": 396, "low_count": 0, "possible_points": 400, "testname": "gliderdac", "medium_count": 2, "source_name": ".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc", "high_count": 0, "all_priorities" : [...], "high_priorities": [...], "medium_priorities" : [...], "low_priorities" : [...] } @param check_name The test which was run @param groups List of results from compliance checker @param source_name Source of the dataset, used for title
def delete_collection_namespaced_replica_set(self, namespace, **kwargs): # noqa: E501 """delete_collection_namespaced_replica_set # noqa: E501 delete collection of ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.delete_collection_namespaced_replica_set_with_http_info(namespace, **kwargs) # noqa: E501 return data
delete_collection_namespaced_replica_set # noqa: E501 delete collection of ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_replica_set(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
def make_bcbiornaseq_object(data): """ load the initial bcb.rda object using bcbioRNASeq """ if "bcbiornaseq" not in dd.get_tools_on(data): return data upload_dir = tz.get_in(("upload", "dir"), data) report_dir = os.path.join(upload_dir, "bcbioRNASeq") safe_makedir(report_dir) organism = dd.get_bcbiornaseq(data).get("organism", None) groups = dd.get_bcbiornaseq(data).get("interesting_groups", None) loadstring = create_load_string(upload_dir, groups, organism) r_file = os.path.join(report_dir, "load_bcbioRNAseq.R") with file_transaction(r_file) as tmp_file: memoize_write_file(loadstring, tmp_file) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", r_file], "Loading bcbioRNASeq object.") make_quality_report(data) return data
load the initial bcb.rda object using bcbioRNASeq
def rollback(self, label=None, plane='sdr'): """Rollback the configuration. This method rolls back the configuration on the device. Args: label (text): The configuration label ID plane: (text): sdr or admin Returns: A string with commit label or None """ begin = time.time() rb_label = self._chain.target_device.rollback(label=label, plane=plane) elapsed = time.time() - begin if label: self.emit_message("Configuration rollback last {:.0f}s. Label: {}".format(elapsed, rb_label), log_level=logging.INFO) else: self.emit_message("Configuration failed.", log_level=logging.WARNING) return rb_label
Rollback the configuration. This method rolls back the configuration on the device. Args: label (text): The configuration label ID plane: (text): sdr or admin Returns: A string with commit label or None
def resource_redirect(id): ''' Redirect to the latest version of a resource given its identifier. ''' resource = get_resource(id) return redirect(resource.url.strip()) if resource else abort(404)
Redirect to the latest version of a resource given its identifier.
def submission_storage_path(instance, filename): """ Function DocString """ string = '/'.join(['submissions', instance.submission_user.user_nick, str(instance.submission_question.question_level), str(instance.submission_question.question_level_id)]) string += '/'+datetime.datetime.now().strftime("%I:%M%p-%m-%d-%Y") string += filename return string
Function DocString
def items(self): """ Generator that iterates through the items of the value mapping. The items are the array entries of the `Values` and `ValueMap` qualifiers, and they are iterated in the order specified in the arrays. If the `ValueMap` qualifier is not specified, the default of consecutive integers starting at 0 is used as a default, consistent with :term:`DSP0004`. Each iterated item is a tuple of integer value(s) representing the `ValueMap` array entry, and the corresponding `Values` string. Any integer value in the iterated items is represented as the CIM type of the element (e.g. :class:`~pywbem.Uint16`). If the `Values` string corresponds to a single element value, the first tuple item is that single integer value. If the `Values` string corresponds to a value range (e.g. "1.." or "..2" or "3..4"), that value range is returned as a tuple with two items that are the lowest and the highest value of the range. That is the case also when the value range is open on the left side or right side. If the `Values` string corresponds to the `unclaimed` indicator "..", the first tuple item is `None`. Returns: :term:`iterator` for tuple of integer value(s) and `Values` string. """ for values_str in self._v2b_dict: element_value = self._v2b_dict[values_str] yield element_value, values_str
Generator that iterates through the items of the value mapping. The items are the array entries of the `Values` and `ValueMap` qualifiers, and they are iterated in the order specified in the arrays. If the `ValueMap` qualifier is not specified, the default of consecutive integers starting at 0 is used as a default, consistent with :term:`DSP0004`. Each iterated item is a tuple of integer value(s) representing the `ValueMap` array entry, and the corresponding `Values` string. Any integer value in the iterated items is represented as the CIM type of the element (e.g. :class:`~pywbem.Uint16`). If the `Values` string corresponds to a single element value, the first tuple item is that single integer value. If the `Values` string corresponds to a value range (e.g. "1.." or "..2" or "3..4"), that value range is returned as a tuple with two items that are the lowest and the highest value of the range. That is the case also when the value range is open on the left side or right side. If the `Values` string corresponds to the `unclaimed` indicator "..", the first tuple item is `None`. Returns: :term:`iterator` for tuple of integer value(s) and `Values` string.
def save(self, logmessage=None): """Save datastream content and any changed datastream profile information to Fedora. :rtype: boolean for success """ if self.as_of_date is not None: raise RuntimeError('Saving is not implemented for datastream versions') save_opts = {} if self.info_modified: if self.label: save_opts['dsLabel'] = self.label if self.mimetype: save_opts['mimeType'] = self.mimetype if self.versionable is not None: save_opts['versionable'] = self.versionable if self.state: save_opts['dsState'] = self.state if self.format: save_opts['formatURI'] = self.format if self.checksum: if self.checksum_modified: save_opts['checksum'] = self.checksum if self.checksum_type: save_opts['checksumType'] = self.checksum_type # FIXME: should be able to handle checksums # NOTE: as of Fedora 3.2, updating content without specifying mimetype fails (Fedora bug?) if 'mimeType' not in save_opts.keys(): # if datastreamProfile has not been pulled from fedora, use configured default mimetype if self._info is not None: save_opts['mimeType'] = self.mimetype else: save_opts['mimeType'] = self.defaults['mimetype'] # if datastream location has been set, use that for content # otherwise, use local content (if any) if self.ds_location is not None: save_opts['dsLocation'] = self.ds_location else: save_opts['content'] = self._raw_content() if self.exists: # if not versionable, make a backup to back out changes if object save fails if not self.versionable: self._backup() # if this datastream already exists, use modifyDatastream API call r = self.obj.api.modifyDatastream(self.obj.pid, self.id, logMessage=logmessage, **save_opts) # expects 200 ok success = (r.status_code == requests.codes.ok) else: # if this datastream does not yet exist, add it r = self.obj.api.addDatastream(self.obj.pid, self.id, controlGroup=self.defaults['control_group'], logMessage=logmessage, **save_opts) # expects 201 created success = (r.status_code == requests.codes.created) # clean-up required for object info after adding a new datastream if success: # update exists flag - if add succeeded, the datastream exists now self.exists = True # if the datastream content is a file-like object, clear it out # (we don't want to attempt to save the current file contents again, # particularly since the file is not guaranteed to still be open) if 'content' in save_opts and hasattr(save_opts['content'], 'read'): self._content = None self._content_modified = False if success: # update modification indicators self.info_modified = False self.checksum_modified = False self.digest = self._content_digest() # clear out ds location self.ds_location = None return success
Save datastream content and any changed datastream profile information to Fedora. :rtype: boolean for success
def member_add(self, cluster_id, params): """add new member into configuration""" cluster = self._storage[cluster_id] result = cluster.member_add(params.get('id', None), params.get('shardParams', {})) self._storage[cluster_id] = cluster return result
add new member into configuration
def find_field(self, field=None, alias=None): """ Finds a field by name or alias. :param field: string of the field name or alias, dict of {'alias': field}, or a Field instance :type field: str or dict or Field :returns: The field if it is found, otherwise None :rtype: :class:`Field <querybuilder.fields.Field>` or None """ if alias: field = alias field = FieldFactory(field, table=self, alias=alias) identifier = field.get_identifier() for field in self.fields: if field.get_identifier() == identifier: return field return None
Finds a field by name or alias. :param field: string of the field name or alias, dict of {'alias': field}, or a Field instance :type field: str or dict or Field :returns: The field if it is found, otherwise None :rtype: :class:`Field <querybuilder.fields.Field>` or None
def _array_slice(array, index): """Slice or index `array` at `index`. Parameters ---------- index : int or ibis.expr.types.IntegerValue or slice Returns ------- sliced_array : ibis.expr.types.ValueExpr If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then the return type is the element type of `array`. If `index` is a ``slice`` then the return type is the same type as the input. """ if isinstance(index, slice): start = index.start stop = index.stop if (start is not None and start < 0) or ( stop is not None and stop < 0 ): raise ValueError('negative slicing not yet supported') step = index.step if step is not None and step != 1: raise NotImplementedError('step can only be 1') op = ops.ArraySlice(array, start if start is not None else 0, stop) else: op = ops.ArrayIndex(array, index) return op.to_expr()
Slice or index `array` at `index`. Parameters ---------- index : int or ibis.expr.types.IntegerValue or slice Returns ------- sliced_array : ibis.expr.types.ValueExpr If `index` is an ``int`` or :class:`~ibis.expr.types.IntegerValue` then the return type is the element type of `array`. If `index` is a ``slice`` then the return type is the same type as the input.
def cxxRecordDecl(*args): """Matches C++ class declarations. >>> from glud import * >>> config = ''' ... class W; ... template<typename T> class X {}; ... struct Y {}; ... union Z {}; ... ''' >>> m = cxxRecordDecl() >>> for c in walk(m, parse_string(config).cursor): ... print(c.spelling) W X """ kinds = [ CursorKind.CLASS_DECL, CursorKind.CLASS_TEMPLATE, ] inner = [ PredMatcher(is_kind(k)) for k in kinds ] return allOf(anyOf(*inner), *args)
Matches C++ class declarations. >>> from glud import * >>> config = ''' ... class W; ... template<typename T> class X {}; ... struct Y {}; ... union Z {}; ... ''' >>> m = cxxRecordDecl() >>> for c in walk(m, parse_string(config).cursor): ... print(c.spelling) W X
def get(package_name, pypi_server="https://pypi.python.org/pypi/"): """ Constructs a request to the PyPI server and returns a :class:`yarg.package.Package`. :param package_name: case sensitive name of the package on the PyPI server. :param pypi_server: (option) URL to the PyPI server. >>> import yarg >>> package = yarg.get('yarg') <Package yarg> """ if not pypi_server.endswith("/"): pypi_server = pypi_server + "/" response = requests.get("{0}{1}/json".format(pypi_server, package_name)) if response.status_code >= 300: raise HTTPError(status_code=response.status_code, reason=response.reason) if hasattr(response.content, 'decode'): return json2package(response.content.decode()) else: return json2package(response.content)
Constructs a request to the PyPI server and returns a :class:`yarg.package.Package`. :param package_name: case sensitive name of the package on the PyPI server. :param pypi_server: (option) URL to the PyPI server. >>> import yarg >>> package = yarg.get('yarg') <Package yarg>
def handle_sap(q): question_votes = votes = Answer.objects.filter(question=q) users = q.get_users_voted() num_users_votes = {u.id: votes.filter(user=u).count() for u in users} user_scale = {u.id: (1 / num_users_votes[u.id]) for u in users} choices = [] for c in q.choice_set.all().order_by("num"): votes = question_votes.filter(choice=c) vote_users = set([v.user for v in votes]) choice = { "choice": c, "votes": { "total": { "all": len(vote_users), "all_percent": perc(len(vote_users), users.count()), "male": fmt(sum([v.user.is_male * user_scale[v.user.id] for v in votes])), "female": fmt(sum([v.user.is_female * user_scale[v.user.id] for v in votes])) } }, "users": [v.user for v in votes] } for yr in range(9, 14): yr_votes = [v.user if v.user.grade and v.user.grade.number == yr else None for v in votes] yr_votes = list(filter(None, yr_votes)) choice["votes"][yr] = { "all": len(set(yr_votes)), "male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])), "female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes])), } choices.append(choice) """ Clear vote """ votes = question_votes.filter(clear_vote=True) clr_users = set([v.user for v in votes]) choice = { "choice": "Clear vote", "votes": { "total": { "all": len(clr_users), "all_percent": perc(len(clr_users), users.count()), "male": fmt(sum([v.user.is_male * user_scale[v.user.id] for v in votes])), "female": fmt(sum([v.user.is_female * user_scale[v.user.id] for v in votes])) } }, "users": clr_users } for yr in range(9, 14): yr_votes = [v.user if v.user.grade and v.user.grade.number == yr else None for v in votes] yr_votes = list(filter(None, yr_votes)) choice["votes"][yr] = { "all": len(yr_votes), "male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])), "female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes])) } choices.append(choice) choice = { "choice": "Total", "votes": { "total": { "all": users.count(), "votes_all": question_votes.count(), "all_percent": perc(users.count(), users.count()), "male": users.filter(gender=True).count(), "female": users.filter(gender__isnull=False, gender=False).count() } } } for yr in range(9, 14): yr_votes = [u if u.grade and u.grade.number == yr else None for u in users] yr_votes = list(filter(None, yr_votes)) choice["votes"][yr] = { "all": len(set(yr_votes)), "male": fmt(sum([u.is_male * user_scale[u.id] for u in yr_votes])), "female": fmt(sum([u.is_female * user_scale[u.id] for u in yr_votes])) } choices.append(choice) return {"question": q, "choices": choices, "user_scale": user_scale}
Clear vote
def get_available_ip6(self, id_network6): """ Get a available IP in Network ipv6 :param id_network6: Network ipv6 identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ip6': {'ip6': < available_ip6 >}} :raise IpNotAvailableError: Network dont have available IP. :raise NetworkIPv4NotFoundError: Network was not found. :raise UserNotAuthorizedError: User dont have permission to get a available IP. :raise InvalidParameterError: Network ipv6 identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_network6): raise InvalidParameterError( u'Network ipv6 identifier is invalid or was not informed.') url = 'ip/availableip6/' + str(id_network6) + "/" code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
Get a available IP in Network ipv6 :param id_network6: Network ipv6 identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ip6': {'ip6': < available_ip6 >}} :raise IpNotAvailableError: Network dont have available IP. :raise NetworkIPv4NotFoundError: Network was not found. :raise UserNotAuthorizedError: User dont have permission to get a available IP. :raise InvalidParameterError: Network ipv6 identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database.
def hasx(self, name, *args): """ Returns true if named parameter(s) was specified on command line """ return lib.zargs_hasx(self._as_parameter_, name, *args)
Returns true if named parameter(s) was specified on command line
def Put(self, key, obj): """Add the object to the cache.""" # Remove the old entry if it is there. node = self._hash.pop(key, None) if node: self._age.Unlink(node) # Make a new node and insert it. node = Node(key=key, data=obj) self._hash[key] = node self._age.AppendNode(node) self.Expire() return key
Add the object to the cache.
def create_socketpair(size=None): """ Create a :func:`socket.socketpair` to use for use as a child process's UNIX stdio channels. As socket pairs are bidirectional, they are economical on file descriptor usage as the same descriptor can be used for ``stdin`` and ``stdout``. As they are sockets their buffers are tunable, allowing large buffers to be configured in order to improve throughput for file transfers and reduce :class:`mitogen.core.Broker` IO loop iterations. """ parentfp, childfp = socket.socketpair() parentfp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size or mitogen.core.CHUNK_SIZE) childfp.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size or mitogen.core.CHUNK_SIZE) return parentfp, childfp
Create a :func:`socket.socketpair` to use for use as a child process's UNIX stdio channels. As socket pairs are bidirectional, they are economical on file descriptor usage as the same descriptor can be used for ``stdin`` and ``stdout``. As they are sockets their buffers are tunable, allowing large buffers to be configured in order to improve throughput for file transfers and reduce :class:`mitogen.core.Broker` IO loop iterations.
def by_name(cls, session, name, **kwargs): """ Get a classifier from a given name. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param name: name of the classifier :type name: `unicode :return: classifier instance :rtype: :class:`pyshop.models.Classifier` """ classifier = cls.first(session, where=(cls.name == name,)) if not kwargs.get('create_if_not_exists', False): return classifier if not classifier: splitted_names = [n.strip() for n in name.split(u'::')] classifiers = [u' :: '.join(splitted_names[:i + 1]) for i in range(len(splitted_names))] parent_id = None category = splitted_names[0] for c in classifiers: classifier = cls.first(session, where=(cls.name == c,)) if not classifier: classifier = Classifier(name=c, parent_id=parent_id, category=category) session.add(classifier) session.flush() parent_id = classifier.id return classifier
Get a classifier from a given name. :param session: SQLAlchemy session :type session: :class:`sqlalchemy.Session` :param name: name of the classifier :type name: `unicode :return: classifier instance :rtype: :class:`pyshop.models.Classifier`
def get_point( self, x: float = 0, y: float = 0, z: float = 0, w: float = 0 ) -> float: """Return the noise value at the (x, y, z, w) point. Args: x (float): The position on the 1st axis. y (float): The position on the 2nd axis. z (float): The position on the 3rd axis. w (float): The position on the 4th axis. """ return float(lib.NoiseGetSample(self._tdl_noise_c, (x, y, z, w)))
Return the noise value at the (x, y, z, w) point. Args: x (float): The position on the 1st axis. y (float): The position on the 2nd axis. z (float): The position on the 3rd axis. w (float): The position on the 4th axis.
def _ReadIntegerDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): """Reads an integer data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: IntegerDataTypeDefinition: integer data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """ definition_object = self._ReadFixedSizeDataTypeDefinition( definitions_registry, definition_values, data_types.IntegerDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_INTEGER, is_member=is_member, supported_size_values=(1, 2, 4, 8)) attributes = definition_values.get('attributes', None) if attributes: format_attribute = attributes.get('format', definitions.FORMAT_SIGNED) if format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES: error_message = 'unsupported format attribute: {0!s}'.format( format_attribute) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.format = format_attribute return definition_object
Reads an integer data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: IntegerDataTypeDefinition: integer data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
def transformer_tall_pretrain_lm(): """Hparams for transformer on LM pretraining (with 64k vocab).""" hparams = transformer_tall() hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.optimizer = "adam_w" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-8 # Set max examples to something big when pretraining only the LM, definitely # something an order of magnitude bigger than number of train steps. hparams.multiproblem_schedule_max_examples = 5e8 # Set train steps to learning_rate_decay_steps or less hparams.learning_rate_decay_steps = 5000000 return hparams
Hparams for transformer on LM pretraining (with 64k vocab).
def set_topic_attributes(TopicArn, AttributeName, AttributeValue, region=None, key=None, keyid=None, profile=None): ''' Set an attribute of a topic to a new value. CLI example:: salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.set_topic_attributes(TopicArn=TopicArn, AttributeName=AttributeName, AttributeValue=AttributeValue) log.debug('Set attribute %s=%s on SNS topic %s', AttributeName, AttributeValue, TopicArn) return True except botocore.exceptions.ClientError as e: log.error('Failed to set attribute %s=%s for SNS topic %s: %s', AttributeName, AttributeValue, TopicArn, e) return False
Set an attribute of a topic to a new value. CLI example:: salt myminion boto3_sns.set_topic_attributes someTopic DisplayName myDisplayNameValue
def get_title(self): """ Return the string literal that is used in the template. The title is used in the admin screens. """ try: return extract_literal(self.meta_kwargs['title']) except KeyError: slot = self.get_slot() if slot is not None: return slot.replace('_', ' ').title() return None
Return the string literal that is used in the template. The title is used in the admin screens.
def counter(self, ch, part=None): """Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned. """ return Counter(self(self._key(ch), part=part))
Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned.
def set_ssl_logging(self, enable=False, func=_ssl_logging_cb): u''' Enable or disable SSL logging :param True | False enable: Enable or disable SSL logging :param func: Callback function for logging ''' if enable: SSL_CTX_set_info_callback(self._ctx, func) else: SSL_CTX_set_info_callback(self._ctx, 0)
u''' Enable or disable SSL logging :param True | False enable: Enable or disable SSL logging :param func: Callback function for logging
def create_sslcert(self, name, common_name, pri, ca): """ 修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11 Args: name: 证书名称 common_name: 相关域名 pri: 证书私钥 ca: 证书内容 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{certID: <CertID>},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ req = {} req.update({"name": name}) req.update({"common_name": common_name}) req.update({"pri": pri}) req.update({"ca": ca}) body = json.dumps(req) url = '{0}/sslcert'.format(self.server) return self.__post(url, body)
修改证书,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name#11 Args: name: 证书名称 common_name: 相关域名 pri: 证书私钥 ca: 证书内容 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{certID: <CertID>},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息