code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def acquire_connection(settings, tag=None, logger_name=None, auto_commit=False): """ Return a connection to a Relational DataBase Management System (RDBMS) the most appropriate for the service requesting this connection. @param settings: a dictionary of connection properties:: { None: { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, 'tag': { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, ... } The key ``None`` is the default tag. @param tag: a tag that specifies which particular connection properties has to be used. @param logger_name: name of the logger for debug information. @param auto_commit: indicate whether the transaction needs to be committed at the end of the session. @return: a ``RdbmsConnection`` instance to be used supporting the Python clause ``with ...:``. @raise DefaultConnectionPropertiesSettingException: if the specified tag is not defined in the dictionary of connection properties, and when no default connection properties is defined either (tag ``None``). """ try: connection_properties = settings.get(tag, settings[None]) except KeyError: raise RdbmsConnection.DefaultConnectionPropertiesSettingException() return RdbmsConnection( connection_properties['rdbms_hostname'], connection_properties['rdbms_port'], connection_properties['rdbms_database_name'], connection_properties['rdbms_account_username'], connection_properties['rdbms_account_password'], logger_name=logger_name, auto_commit=auto_commit)
Return a connection to a Relational DataBase Management System (RDBMS) the most appropriate for the service requesting this connection. @param settings: a dictionary of connection properties:: { None: { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, 'tag': { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, ... } The key ``None`` is the default tag. @param tag: a tag that specifies which particular connection properties has to be used. @param logger_name: name of the logger for debug information. @param auto_commit: indicate whether the transaction needs to be committed at the end of the session. @return: a ``RdbmsConnection`` instance to be used supporting the Python clause ``with ...:``. @raise DefaultConnectionPropertiesSettingException: if the specified tag is not defined in the dictionary of connection properties, and when no default connection properties is defined either (tag ``None``).
def plot_xtf(fignum, XTF, Fs, e, b): """ function to plot series of chi measurements as a function of temperature, holding field constant and varying frequency """ plt.figure(num=fignum) plt.xlabel('Temperature (K)') plt.ylabel('Susceptibility (m^3/kg)') k = 0 Flab = [] for freq in XTF: T, X = [], [] for xt in freq: X.append(xt[0]) T.append(xt[1]) plt.plot(T, X) plt.text(T[-1], X[-1], str(int(Fs[k])) + ' Hz') # Flab.append(str(int(Fs[k]))+' Hz') k += 1 plt.title(e + ': B = ' + '%8.1e' % (b) + ' T')
function to plot series of chi measurements as a function of temperature, holding field constant and varying frequency
def distance_to_edge(labels): '''Compute the distance of a pixel to the edge of its object labels - a labels matrix returns a matrix of distances ''' colors = color_labels(labels) max_color = np.max(colors) result = np.zeros(labels.shape) if max_color == 0: return result for i in range(1, max_color+1): mask = (colors==i) result[mask] = scind.distance_transform_edt(mask)[mask] return result
Compute the distance of a pixel to the edge of its object labels - a labels matrix returns a matrix of distances
def _parse_mirteFile(path, logger=None): """ Open and parses the mirteFile at <path>. """ l = logging.getLogger('_parse_mirteFile') if logger is None else logger cache_path = os.path.join(os.path.dirname(path), CACHE_FILENAME_TEMPLATE % os.path.basename(path)) if (os.path.exists(cache_path) and os.path.getmtime(cache_path) >= os.path.getmtime(path)): with open(cache_path) as f: return msgpack.unpack(f) with open(path) as f: ret = yaml.load(f) try: with open(cache_path, 'w') as f: msgpack.pack(ret, f) except IOError as e: if e.errno == errno.EACCES: l.warn('Not allowed to write %s', path) else: raise return ret
Open and parses the mirteFile at <path>.
def transform_obs(self, obs): """Render some SC2 observations into something an agent can handle.""" empty = np.array([], dtype=np.int32).reshape((0, 7)) out = named_array.NamedDict({ # Fill out some that are sometimes empty. "single_select": empty, "multi_select": empty, "build_queue": empty, "cargo": empty, "cargo_slots_available": np.array([0], dtype=np.int32), }) def or_zeros(layer, size): if layer is not None: return layer.astype(np.int32, copy=False) else: return np.zeros((size.y, size.x), dtype=np.int32) aif = self._agent_interface_format if aif.feature_dimensions: out["feature_screen"] = named_array.NamedNumpyArray( np.stack(or_zeros(f.unpack(obs.observation), aif.feature_dimensions.screen) for f in SCREEN_FEATURES), names=[ScreenFeatures, None, None]) out["feature_minimap"] = named_array.NamedNumpyArray( np.stack(or_zeros(f.unpack(obs.observation), aif.feature_dimensions.minimap) for f in MINIMAP_FEATURES), names=[MinimapFeatures, None, None]) if aif.rgb_dimensions: out["rgb_screen"] = Feature.unpack_rgb_image( obs.observation.render_data.map).astype(np.int32) out["rgb_minimap"] = Feature.unpack_rgb_image( obs.observation.render_data.minimap).astype(np.int32) out["last_actions"] = np.array( [self.reverse_action(a).function for a in obs.actions], dtype=np.int32) out["action_result"] = np.array([o.result for o in obs.action_errors], dtype=np.int32) out["alerts"] = np.array(obs.observation.alerts, dtype=np.int32) out["game_loop"] = np.array([obs.observation.game_loop], dtype=np.int32) score_details = obs.observation.score.score_details out["score_cumulative"] = named_array.NamedNumpyArray([ obs.observation.score.score, score_details.idle_production_time, score_details.idle_worker_time, score_details.total_value_units, score_details.total_value_structures, score_details.killed_value_units, score_details.killed_value_structures, score_details.collected_minerals, score_details.collected_vespene, score_details.collection_rate_minerals, score_details.collection_rate_vespene, score_details.spent_minerals, score_details.spent_vespene, ], names=ScoreCumulative, dtype=np.int32) def get_score_details(key, details, categories): row = getattr(details, key.name) return [getattr(row, category.name) for category in categories] out["score_by_category"] = named_array.NamedNumpyArray([ get_score_details(key, score_details, ScoreCategories) for key in ScoreByCategory ], names=[ScoreByCategory, ScoreCategories], dtype=np.int32) out["score_by_vital"] = named_array.NamedNumpyArray([ get_score_details(key, score_details, ScoreVitals) for key in ScoreByVital ], names=[ScoreByVital, ScoreVitals], dtype=np.int32) player = obs.observation.player_common out["player"] = named_array.NamedNumpyArray([ player.player_id, player.minerals, player.vespene, player.food_used, player.food_cap, player.food_army, player.food_workers, player.idle_worker_count, player.army_count, player.warp_gate_count, player.larva_count, ], names=Player, dtype=np.int32) def unit_vec(u): return np.array(( u.unit_type, u.player_relative, u.health, u.shields, u.energy, u.transport_slots_taken, int(u.build_progress * 100), # discretize ), dtype=np.int32) ui = obs.observation.ui_data with sw("ui"): groups = np.zeros((10, 2), dtype=np.int32) for g in ui.groups: groups[g.control_group_index, :] = (g.leader_unit_type, g.count) out["control_groups"] = groups if ui.single: out["single_select"] = named_array.NamedNumpyArray( [unit_vec(ui.single.unit)], [None, UnitLayer]) if ui.multi and ui.multi.units: out["multi_select"] = named_array.NamedNumpyArray( [unit_vec(u) for u in ui.multi.units], [None, UnitLayer]) if ui.cargo and ui.cargo.passengers: out["single_select"] = named_array.NamedNumpyArray( [unit_vec(ui.single.unit)], [None, UnitLayer]) out["cargo"] = named_array.NamedNumpyArray( [unit_vec(u) for u in ui.cargo.passengers], [None, UnitLayer]) out["cargo_slots_available"] = np.array([ui.cargo.slots_available], dtype=np.int32) if ui.production and ui.production.build_queue: out["single_select"] = named_array.NamedNumpyArray( [unit_vec(ui.production.unit)], [None, UnitLayer]) out["build_queue"] = named_array.NamedNumpyArray( [unit_vec(u) for u in ui.production.build_queue], [None, UnitLayer]) def full_unit_vec(u, pos_transform, is_raw=False): screen_pos = pos_transform.fwd_pt( point.Point.build(u.pos)) screen_radius = pos_transform.fwd_dist(u.radius) return np.array(( # Match unit_vec order u.unit_type, u.alliance, # Self = 1, Ally = 2, Neutral = 3, Enemy = 4 u.health, u.shield, u.energy, u.cargo_space_taken, int(u.build_progress * 100), # discretize # Resume API order int(u.health / u.health_max * 255) if u.health_max > 0 else 0, int(u.shield / u.shield_max * 255) if u.shield_max > 0 else 0, int(u.energy / u.energy_max * 255) if u.energy_max > 0 else 0, u.display_type, # Visible = 1, Snapshot = 2, Hidden = 3 u.owner, # 1-15, 16 = neutral screen_pos.x, screen_pos.y, u.facing, screen_radius, u.cloak, # Cloaked = 1, CloakedDetected = 2, NotCloaked = 3 u.is_selected, u.is_blip, u.is_powered, u.mineral_contents, u.vespene_contents, # Not populated for enemies or neutral u.cargo_space_max, u.assigned_harvesters, u.ideal_harvesters, u.weapon_cooldown, len(u.orders), u.tag if is_raw else 0 ), dtype=np.int32) raw = obs.observation.raw_data if aif.use_feature_units: with sw("feature_units"): # Update the camera location so we can calculate world to screen pos self._update_camera(point.Point.build(raw.player.camera)) feature_units = [] for u in raw.units: if u.is_on_screen and u.display_type != sc_raw.Hidden: feature_units.append( full_unit_vec(u, self._world_to_feature_screen_px)) out["feature_units"] = named_array.NamedNumpyArray( feature_units, [None, FeatureUnit], dtype=np.int32) if aif.use_raw_units: with sw("raw_units"): raw_units = [full_unit_vec(u, self._world_to_world_tl, is_raw=True) for u in raw.units] out["raw_units"] = named_array.NamedNumpyArray( raw_units, [None, FeatureUnit], dtype=np.int32) if aif.use_unit_counts: with sw("unit_counts"): unit_counts = collections.defaultdict(int) for u in raw.units: if u.alliance == sc_raw.Self: unit_counts[u.unit_type] += 1 out["unit_counts"] = named_array.NamedNumpyArray( sorted(unit_counts.items()), [None, UnitCounts], dtype=np.int32) if aif.use_camera_position: camera_position = self._world_to_world_tl.fwd_pt( point.Point.build(raw.player.camera)) out["camera_position"] = np.array((camera_position.x, camera_position.y), dtype=np.int32) out["available_actions"] = np.array(self.available_actions(obs.observation), dtype=np.int32) return out
Render some SC2 observations into something an agent can handle.
def _simplify_non_context_field_binary_composition(expression): """Return a simplified BinaryComposition if either operand is a TrueLiteral. Args: expression: BinaryComposition without any ContextField operand(s) Returns: simplified expression if the given expression is a disjunction/conjunction and one of it's operands is a TrueLiteral, and the original expression otherwise """ if any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} with a ContextField ' u'operand. This should never happen.'.format(expression)) if expression.operator == u'||': if expression.left == TrueLiteral or expression.right == TrueLiteral: return TrueLiteral else: return expression elif expression.operator == u'&&': if expression.left == TrueLiteral: return expression.right if expression.right == TrueLiteral: return expression.left else: return expression else: return expression
Return a simplified BinaryComposition if either operand is a TrueLiteral. Args: expression: BinaryComposition without any ContextField operand(s) Returns: simplified expression if the given expression is a disjunction/conjunction and one of it's operands is a TrueLiteral, and the original expression otherwise
def __is_valid_type(self, typ, typlist): """ Check if type is valid based on input type list "string" is special because it can be used for stringlist :param typ: the type to check :param typlist: the list of type to check :return: True on success, False otherwise """ typ_is_str = typ == "string" str_list_in_typlist = "stringlist" in typlist return typ in typlist or (typ_is_str and str_list_in_typlist)
Check if type is valid based on input type list "string" is special because it can be used for stringlist :param typ: the type to check :param typlist: the list of type to check :return: True on success, False otherwise
def dump(deposition, from_date, with_json=True, latest_only=False, **kwargs): """Dump the deposition object as dictionary.""" # Serialize the __getstate__ and fall back to default serializer dep_json = json.dumps(deposition.__getstate__(), default=default_serializer) dep_dict = json.loads(dep_json) dep_dict['_p'] = {} dep_dict['_p']['id'] = deposition.id dep_dict['_p']['created'] = dt2utc_timestamp(deposition.created) dep_dict['_p']['modified'] = dt2utc_timestamp(deposition.modified) dep_dict['_p']['user_id'] = deposition.user_id dep_dict['_p']['state'] = deposition.state dep_dict['_p']['has_sip'] = deposition.has_sip() dep_dict['_p']['submitted'] = deposition.submitted return dep_dict
Dump the deposition object as dictionary.
def is_valid_short_number_for_region(short_numobj, region_dialing_from): """Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern """ if not _region_dialing_from_matches_number(short_numobj, region_dialing_from): return False metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: # pragma no cover return False short_number = national_significant_number(short_numobj) general_desc = metadata.general_desc if not _matches_possible_number_and_national_number(short_number, general_desc): return False short_number_desc = metadata.short_code if short_number_desc.national_number_pattern is None: # pragma no cover return False return _matches_possible_number_and_national_number(short_number, short_number_desc)
Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern
def create_project(self, name=None, project_id=None, path=None): """ Create a project and keep a references to it in project manager. See documentation of Project for arguments """ if project_id is not None and project_id in self._projects: return self._projects[project_id] project = Project(name=name, project_id=project_id, path=path) self._check_available_disk_space(project) self._projects[project.id] = project return project
Create a project and keep a references to it in project manager. See documentation of Project for arguments
def get_vcs_details_output_vcs_details_principal_switch_wwn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vcs_details = ET.Element("get_vcs_details") config = get_vcs_details output = ET.SubElement(get_vcs_details, "output") vcs_details = ET.SubElement(output, "vcs-details") principal_switch_wwn = ET.SubElement(vcs_details, "principal-switch-wwn") principal_switch_wwn.text = kwargs.pop('principal_switch_wwn') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def getPort(self): """ Helper method for testing; returns the TCP port used for this registration, even if it was specified as 0 and thus allocated by the OS. """ disp = self.pbmanager.dispatchers[self.portstr] return disp.port.getHost().port
Helper method for testing; returns the TCP port used for this registration, even if it was specified as 0 and thus allocated by the OS.
def _stdin_raw_block(self): """Use a blocking stdin read""" # The big problem with the blocking read is that it doesn't # exit when it's supposed to in all contexts. An extra # key-press may be required to trigger the exit. try: data = sys.stdin.read(1) data = data.replace('\r', '\n') return data except WindowsError as we: if we.winerror == ERROR_NO_DATA: # This error occurs when the pipe is closed return None else: # Otherwise let the error propagate raise we
Use a blocking stdin read
def bovy_text(*args,**kwargs): """ NAME: bovy_text PURPOSE: thin wrapper around matplotlib's text and annotate use keywords: 'bottom_left=True' 'bottom_right=True' 'top_left=True' 'top_right=True' 'title=True' to place the text in one of the corners or use it as the title INPUT: see matplotlib's text (http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.text) OUTPUT: prints text on the current figure HISTORY: 2010-01-26 - Written - Bovy (NYU) """ if kwargs.pop('title',False): pyplot.annotate(args[0],(0.5,1.05),xycoords='axes fraction', horizontalalignment='center', verticalalignment='top',**kwargs) elif kwargs.pop('bottom_left',False): pyplot.annotate(args[0],(0.05,0.05),xycoords='axes fraction',**kwargs) elif kwargs.pop('bottom_right',False): pyplot.annotate(args[0],(0.95,0.05),xycoords='axes fraction', horizontalalignment='right',**kwargs) elif kwargs.pop('top_right',False): pyplot.annotate(args[0],(0.95,0.95),xycoords='axes fraction', horizontalalignment='right', verticalalignment='top',**kwargs) elif kwargs.pop('top_left',False): pyplot.annotate(args[0],(0.05,0.95),xycoords='axes fraction', verticalalignment='top',**kwargs) else: pyplot.text(*args,**kwargs)
NAME: bovy_text PURPOSE: thin wrapper around matplotlib's text and annotate use keywords: 'bottom_left=True' 'bottom_right=True' 'top_left=True' 'top_right=True' 'title=True' to place the text in one of the corners or use it as the title INPUT: see matplotlib's text (http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.text) OUTPUT: prints text on the current figure HISTORY: 2010-01-26 - Written - Bovy (NYU)
def generate_messages(outf, msgs): """Generate Swift structs to represent all MAVLink messages""" print("Generating Messages") t.write(outf, """ // MARK: MAVLink messages /** Message protocol describes common for all MAVLink messages properties and methods requirements */ public protocol Message: MAVLinkEntity { /** Initialize Message from received data - Warning: Throws `ParserError` or `ParserEnumError` if any parsing error occurred */ init(data: NSData) throws /// Array of tuples with fields name, offset, type and description information static var fieldsInfo: [(String, Int, String, String)] { get } /// All fields names and values of current Message var allFields: [(String, Any)] { get } } /** Message protocol default implementations */ extension Message { public static var typeDebugDescription: String { // It seems Xcode 7 beta does not support default protocol implementations for type methods. // Calling this method without specific implementations inside messages will cause following error in Xcode 7.0 beta (7A120f): // "Command failed due to signal: Illegal instruction: 4" let fields = "\\n\\t".join(fieldsInfo.map { "\($0.0): \($0.2): \($0.3)" }) return "Struct \(typeName): \(typeDescription)\\nFields:\\n\\t\(fields)" } public var description: String { let describeField: ((String, Any)) -> String = { (let name, var value) in value = value is String ? "\\"\(value)\\"" : value return "\(name): \(value)" } let fieldsDescription = ", ".join(allFields.map(describeField)) return "\(self.dynamicType)(\(fieldsDescription))" } public var debugDescription: String { let describeFieldVerbose: ((String, Any)) -> String = { (let name, var value) in value = value is String ? "\\"\(value)\\"" : value let (_, _, _, description) = Self.fieldsInfo.filter { $0.0 == name }.first! return "\(name) = \(value) : \(description)" } let fieldsDescription = "\\n\\t".join(allFields.map(describeFieldVerbose)) return "\(Self.typeName): \(Self.typeDescription)\\nFields:\\n\\t\(fieldsDescription)" } public var allFields: [(String, Any)] { var result: [(String, Any)] = [] let mirror = reflect(self) for i in 0..<mirror.count { result.append((mirror[i].0, mirror[i].1.value)) } return result } } """) for msg in msgs: t.write(outf, """ ${formatted_description}public struct ${swift_name}: Message { ${{fields:${formatted_description}\tpublic let ${swift_name}: ${return_type}\n}} public init(data: NSData) throws { ${{ordered_fields:\t\tself.${swift_name} = ${initial_value}\n}} } } extension ${swift_name} { public static var typeName = "${name}" public static var typeDescription = "${message_description}" public static var typeDebugDescription: String { let fields = "\\n\\t".join(fieldsInfo.map { "\($0.0): \($0.2): \($0.3)" }) return "Struct \(typeName): \(typeDescription)\\nFields:\\n\\t\(fields)" } public static var fieldsInfo = [${fields_info}] } """, msg)
Generate Swift structs to represent all MAVLink messages
def _fix_component_id(self, component): 'Fix name of component ad all of its children' theID = getattr(component, "id", None) if theID is not None: setattr(component, "id", self._fix_id(theID)) try: for c in component.children: self._fix_component_id(c) except: #pylint: disable=bare-except pass
Fix name of component ad all of its children
def get_interval_timedelta(self): """ Spits out the timedelta in days. """ now_datetime = timezone.now() current_month_days = monthrange(now_datetime.year, now_datetime.month)[1] # Two weeks if self.interval == reminders_choices.INTERVAL_2_WEEKS: interval_timedelta = datetime.timedelta(days=14) # One month elif self.interval == reminders_choices.INTERVAL_ONE_MONTH: interval_timedelta = datetime.timedelta(days=current_month_days) # Three months elif self.interval == reminders_choices.INTERVAL_THREE_MONTHS: three_months = now_datetime + relativedelta(months=+3) interval_timedelta = three_months - now_datetime # Six months elif self.interval == reminders_choices.INTERVAL_SIX_MONTHS: six_months = now_datetime + relativedelta(months=+6) interval_timedelta = six_months - now_datetime # One year elif self.interval == reminders_choices.INTERVAL_ONE_YEAR: one_year = now_datetime + relativedelta(years=+1) interval_timedelta = one_year - now_datetime return interval_timedelta
Spits out the timedelta in days.
def basic_qos(self, prefetch_size, prefetch_count, a_global): """Specify quality of service This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. PARAMETERS: prefetch_size: long prefetch window in octets The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The prefetch-size is ignored if the no-ack option is set. RULE: The server MUST ignore this setting when the client is not processing any messages - i.e. the prefetch size does not limit the transfer of single messages to a client, only the sending in advance of more messages while the client still has one or more unacknowledged messages. prefetch_count: short prefetch window in messages Specifies a prefetch window in terms of whole messages. This field may be used in combination with the prefetch-size field; a message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the no-ack option is set. RULE: The server MAY send less data in advance than allowed by the client's specified prefetch windows but it MUST NOT send more. a_global: boolean apply to entire connection By default the QoS settings apply to the current channel only. If this field is set, they are applied to the entire connection. """ args = AMQPWriter() args.write_long(prefetch_size) args.write_short(prefetch_count) args.write_bit(a_global) self._send_method((60, 10), args) return self.wait(allowed_methods=[ (60, 11), # Channel.basic_qos_ok ])
Specify quality of service This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. PARAMETERS: prefetch_size: long prefetch window in octets The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The prefetch-size is ignored if the no-ack option is set. RULE: The server MUST ignore this setting when the client is not processing any messages - i.e. the prefetch size does not limit the transfer of single messages to a client, only the sending in advance of more messages while the client still has one or more unacknowledged messages. prefetch_count: short prefetch window in messages Specifies a prefetch window in terms of whole messages. This field may be used in combination with the prefetch-size field; a message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the no-ack option is set. RULE: The server MAY send less data in advance than allowed by the client's specified prefetch windows but it MUST NOT send more. a_global: boolean apply to entire connection By default the QoS settings apply to the current channel only. If this field is set, they are applied to the entire connection.
def from_json(cls, data): """Create a Sky Condition from a dictionary. Args: data = { "solar_model": string, "month": int, "day_of_month": int, "clearness": float, "daylight_savings_indicator": string // "Yes" or "No"} """ # Check required and optional keys required_keys = ('solar_model', 'month', 'day_of_month', 'clearness') for key in required_keys: assert key in data, 'Required key "{}" is missing!'.format(key) if 'daylight_savings_indicator' not in data: data['daylight_savings_indicator'] = 'No' return cls(data['month'], data['day_of_month'], data['clearness'], data['daylight_savings_indicator'])
Create a Sky Condition from a dictionary. Args: data = { "solar_model": string, "month": int, "day_of_month": int, "clearness": float, "daylight_savings_indicator": string // "Yes" or "No"}
def romanize(text: str, engine: str = "royin") -> str: """ Rendering Thai words in the Latin alphabet or "romanization", using the Royal Thai General System of Transcription (RTGS), which is the official system published by the Royal Institute of Thailand. ถอดเสียงภาษาไทยเป็นอักษรละติน :param str text: Thai text to be romanized :param str engine: 'royin' (default) or 'thai2rom'. 'royin' uses the Royal Thai General System of Transcription issued by Royal Institute of Thailand. 'thai2rom' is deep learning Thai romanization (require keras). :return: A string of Thai words rendered in the Latin alphabet. """ if not text or not isinstance(text, str): return "" if engine == "thai2rom": from .thai2rom import romanize else: # use default engine "royin" from .royin import romanize return romanize(text)
Rendering Thai words in the Latin alphabet or "romanization", using the Royal Thai General System of Transcription (RTGS), which is the official system published by the Royal Institute of Thailand. ถอดเสียงภาษาไทยเป็นอักษรละติน :param str text: Thai text to be romanized :param str engine: 'royin' (default) or 'thai2rom'. 'royin' uses the Royal Thai General System of Transcription issued by Royal Institute of Thailand. 'thai2rom' is deep learning Thai romanization (require keras). :return: A string of Thai words rendered in the Latin alphabet.
def deploy_api_gateway( self, api_id, stage_name, stage_description="", description="", cache_cluster_enabled=False, cache_cluster_size='0.5', variables=None, cloudwatch_log_level='OFF', cloudwatch_data_trace=False, cloudwatch_metrics_enabled=False, cache_cluster_ttl=300, cache_cluster_encrypted=False ): """ Deploy the API Gateway! Return the deployed API URL. """ print("Deploying API Gateway..") self.apigateway_client.create_deployment( restApiId=api_id, stageName=stage_name, stageDescription=stage_description, description=description, cacheClusterEnabled=cache_cluster_enabled, cacheClusterSize=cache_cluster_size, variables=variables or {} ) if cloudwatch_log_level not in self.cloudwatch_log_levels: cloudwatch_log_level = 'OFF' self.apigateway_client.update_stage( restApiId=api_id, stageName=stage_name, patchOperations=[ self.get_patch_op('logging/loglevel', cloudwatch_log_level), self.get_patch_op('logging/dataTrace', cloudwatch_data_trace), self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled), self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)), self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted) ] ) return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
Deploy the API Gateway! Return the deployed API URL.
def guess_format( filename, ext, formats, io_table ): """ Guess the format of filename, candidates are in formats. """ ok = False for format in formats: output( 'guessing %s' % format ) try: ok = io_table[format].guess( filename ) except AttributeError: pass if ok: break else: raise NotImplementedError('cannot guess format of a *%s file!' % ext) return format
Guess the format of filename, candidates are in formats.
def get_field_lookups(field_type, nullable): """ Return lookup table value and append isnull if this is a nullable field """ return LOOKUP_TABLE.get(field_type) + ['isnull'] if nullable else LOOKUP_TABLE.get(field_type)
Return lookup table value and append isnull if this is a nullable field
def total(self): """Total cost of the order """ total = 0 for item in self.items.all(): total += item.total return total
Total cost of the order
def element(self, inp=None): """Return an element from ``inp`` or from scratch.""" if inp is not None: s = str(inp)[:self.length] s += ' ' * (self.length - len(s)) return s else: return ' ' * self.length
Return an element from ``inp`` or from scratch.
def comment(self, text, comment_prefix='#'): """Creates a comment block Args: text (str): content of comment without # comment_prefix (str): character indicating start of comment Returns: self for chaining """ comment = Comment(self._container) if not text.startswith(comment_prefix): text = "{} {}".format(comment_prefix, text) if not text.endswith('\n'): text = "{}{}".format(text, '\n') comment.add_line(text) self._container.structure.insert(self._idx, comment) self._idx += 1 return self
Creates a comment block Args: text (str): content of comment without # comment_prefix (str): character indicating start of comment Returns: self for chaining
def _add_default_exposure_class(layer): """The layer doesn't have an exposure class, we need to add it. :param layer: The vector layer. :type layer: QgsVectorLayer """ layer.startEditing() field = create_field_from_definition(exposure_class_field) layer.keywords['inasafe_fields'][exposure_class_field['key']] = ( exposure_class_field['field_name']) layer.addAttribute(field) index = layer.fields().lookupField(exposure_class_field['field_name']) exposure = layer.keywords['exposure'] request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for feature in layer.getFeatures(request): layer.changeAttributeValue(feature.id(), index, exposure) layer.commitChanges() return
The layer doesn't have an exposure class, we need to add it. :param layer: The vector layer. :type layer: QgsVectorLayer
def len_cdc_tube(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp, en_chem, KMinor): """The length of tubing may be longer than the max specified if the stock concentration is too high to give a viable solution with the specified length of tubing.""" index = i_cdc(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp, en_chem, KMinor) len_cdc_tube = (_length_cdc_tube_array(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, temp, en_chem, KMinor))[index].magnitude return len_cdc_tube
The length of tubing may be longer than the max specified if the stock concentration is too high to give a viable solution with the specified length of tubing.
def current_version(): """ Get the current version number from setup.py """ # Monkeypatch setuptools.setup so we get the verison number import setuptools version = [None] def monkey_setup(**settings): version[0] = settings['version'] old_setup = setuptools.setup setuptools.setup = monkey_setup import setup # setup.py reload(setup) setuptools.setup = old_setup return version[0]
Get the current version number from setup.py
def from_json(json_data): """ Returns a pyalveo.OAuth2 given a json string built from the oauth.to_json() method. """ #If we have a string, then decode it, otherwise assume it's already decoded if isinstance(json_data, str): data = json.loads(json_data) else: data = json_data oauth_dict = { 'client_id':data.get('client_id',None), 'client_secret':data.get('client_secret',None), 'redirect_url':data.get('redirect_url',None), } oauth = OAuth2(api_url=data.get('api_url',None), api_key=data.get('api_key',None),oauth=oauth_dict, verifySSL=data.get('verifySSL',True)) oauth.token = data.get('token',None) oauth.state = data.get('state',None) oauth.auth_url = data.get('auth_url',None) return oauth
Returns a pyalveo.OAuth2 given a json string built from the oauth.to_json() method.
def headers(self): ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. ''' self.__dict__['headers'] = hdict = HeaderDict() hdict.dict = self._headers return hdict
An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers.
def extract_first_jpeg_in_pdf(fstream): """ Reads a given PDF file and scans for the first valid embedded JPEG image. Returns either None (if none found) or a string of data for the image. There is no 100% guarantee for this code, yet it seems to work fine with most scanner-produced images around. More testing might be needed though. Note that in principle there is no serious problem extracting PNGs or other image types from PDFs, however at the moment I do not have enough test data to try this, and the one I have seems to be unsuitable for PDFMiner. :param fstream: Readable binary stream of the PDF :return: binary stream, containing the whole contents of the JPEG image or None if extraction failed. """ parser = PDFParser(fstream) if PY2: document = PDFDocument(parser) else: document = PDFDocument() parser.set_document(document) document.set_parser(parser) document.initialize('') rsrcmgr = PDFResourceManager() device = PDFPageAggregator(rsrcmgr) interpreter = PDFPageInterpreter(rsrcmgr, device) pages = PDFPage.create_pages(document) if PY2 else document.get_pages() for page in pages: interpreter.process_page(page) layout = device.result for el in layout: if isinstance(el, LTFigure): for im in el: if isinstance(im, LTImage): # Found one! st = None try: imdata = im.stream.get_data() except: # Failed to decode (seems to happen nearly always - there's probably a bug in PDFMiner), oh well... imdata = im.stream.get_rawdata() if imdata is not None and imdata.startswith(b'\xff\xd8\xff\xe0'): return imdata return None
Reads a given PDF file and scans for the first valid embedded JPEG image. Returns either None (if none found) or a string of data for the image. There is no 100% guarantee for this code, yet it seems to work fine with most scanner-produced images around. More testing might be needed though. Note that in principle there is no serious problem extracting PNGs or other image types from PDFs, however at the moment I do not have enough test data to try this, and the one I have seems to be unsuitable for PDFMiner. :param fstream: Readable binary stream of the PDF :return: binary stream, containing the whole contents of the JPEG image or None if extraction failed.
def load(self, filename=None): """ load runtime configuration from given filename. If filename is None try to read from default file from default location. """ if not filename: filename = self.default_config_file files = self._cfgs_to_read() # insert last, so it will override all values, # which have already been set in previous files. files.insert(-1, filename) try: config = self.__read_cfg(files) except ReadConfigException as e: print(Config._format_msg('config.load("{file}") failed with {error}'.format(file=filename, error=e))) else: self._conf_values = config # notice user? if self.show_config_notification and not self.cfg_dir: print(Config._format_msg("no configuration directory set or usable." " Falling back to defaults."))
load runtime configuration from given filename. If filename is None try to read from default file from default location.
def _connect(self, config): """Establish a connection with a MySQL database.""" if 'connection_timeout' not in self._config: self._config['connection_timeout'] = 480 try: self._cnx = connect(**config) self._cursor = self._cnx.cursor() self._printer('\tMySQL DB connection established with db', config['database']) except Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("Something is wrong with your user name or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("Database does not exist") raise err
Establish a connection with a MySQL database.
def copy_file( host, file_path, remote_path='.', username=None, key_path=None, action='put' ): """ Copy a file via SCP, proxied through the mesos master :param host: host or IP of the machine to execute the command on :type host: str :param file_path: the local path to the file to be copied :type file_path: str :param remote_path: the remote path to copy the file to :type remote_path: str :param username: SSH username :type username: str :param key_path: path to the SSH private key to use for SSH authentication :type key_path: str :return: True if successful, False otherwise :rtype: bool """ if not username: username = shakedown.cli.ssh_user if not key_path: key_path = shakedown.cli.ssh_key_file key = validate_key(key_path) transport = get_transport(host, username, key) transport = start_transport(transport, username, key) if transport.is_authenticated(): start = time.time() channel = scp.SCPClient(transport) if action == 'get': print("\n{}scp {}:{} {}\n".format(shakedown.cli.helpers.fchr('>>'), host, remote_path, file_path)) channel.get(remote_path, file_path) else: print("\n{}scp {} {}:{}\n".format(shakedown.cli.helpers.fchr('>>'), file_path, host, remote_path)) channel.put(file_path, remote_path) print("{} bytes copied in {} seconds.".format(str(os.path.getsize(file_path)), str(round(time.time() - start, 2)))) try_close(channel) try_close(transport) return True else: print("error: unable to authenticate {}@{} with key {}".format(username, host, key_path)) return False
Copy a file via SCP, proxied through the mesos master :param host: host or IP of the machine to execute the command on :type host: str :param file_path: the local path to the file to be copied :type file_path: str :param remote_path: the remote path to copy the file to :type remote_path: str :param username: SSH username :type username: str :param key_path: path to the SSH private key to use for SSH authentication :type key_path: str :return: True if successful, False otherwise :rtype: bool
def picard_index_ref(picard, ref_file): """Provide a Picard style dict index file for a reference genome. """ dict_file = "%s.dict" % os.path.splitext(ref_file)[0] if not file_exists(dict_file): with file_transaction(picard._config, dict_file) as tx_dict_file: opts = [("REFERENCE", ref_file), ("OUTPUT", tx_dict_file)] picard.run("CreateSequenceDictionary", opts) return dict_file
Provide a Picard style dict index file for a reference genome.
def add_file_dep(self, doc, value): """Raises OrderError if no package or file defined. """ if self.has_package(doc) and self.has_file(doc): self.file(doc).add_depend(value) else: raise OrderError('File::Dependency')
Raises OrderError if no package or file defined.
def _get_kvc(kv_arg): '''Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.''' if isinstance(kv_arg, Mapping): return six.iterkeys(kv_arg), six.itervalues(kv_arg), len(kv_arg) assert 2 <= len(kv_arg) <= 3, \ 'Argument must be a mapping or a sequence (keys, values, [len])' return ( kv_arg[0], kv_arg[1], kv_arg[2] if len(kv_arg) == 3 else len(kv_arg[0]))
Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.
def check_entry_points(dist, attr, value): """Verify that entry_points map is parseable""" try: pkg_resources.EntryPoint.parse_map(value) except ValueError, e: raise DistutilsSetupError(e)
Verify that entry_points map is parseable
def get_repository_state(self, relaPath=None): """ Get a list representation of repository state along with useful information. List state is ordered relativeley to directories level :Parameters: #. relaPath (None, str): relative directory path from where to start. If None all repository representation is returned. :Returns: #. state (list): List representation of the repository. List items are all dictionaries. Every dictionary has a single key which is the file or the directory name and the value is a dictionary of information including: * 'type': the type of the tracked whether it's file, dir, or objectdir * 'exists': whether file or directory actually exists on disk * 'pyrepfileinfo': In case of a file or an objectdir whether .%s_pyrepfileinfo exists * 'pyrepdirinfo': In case of a directory whether .pyrepdirinfo exists """ state = [] def _walk_dir(relaPath, dirList): dirDict = {'type':'dir', 'exists':os.path.isdir(os.path.join(self.__path,relaPath)), 'pyrepdirinfo':os.path.isfile(os.path.join(self.__path,relaPath,self.__dirInfo)), } state.append({relaPath:dirDict}) # loop files and dirobjects for fname in sorted([f for f in dirList if isinstance(f, basestring)]): relaFilePath = os.path.join(relaPath,fname) realFilePath = os.path.join(self.__path,relaFilePath) #if os.path.isdir(realFilePath) and df.startswith('.') and df.endswith(self.__objectDir[3:]): # fileDict = {'type':'objectdir', # 'exists':True, # 'pyrepfileinfo':os.path.isfile(os.path.join(self.__path,relaPath,self.__fileInfo%fname)), # } #else: # fileDict = {'type':'file', # 'exists':os.path.isfile(realFilePath), # 'pyrepfileinfo':os.path.isfile(os.path.join(self.__path,relaPath,self.__fileInfo%fname)), # } fileDict = {'type':'file', 'exists':os.path.isfile(realFilePath), 'pyrepfileinfo':os.path.isfile(os.path.join(self.__path,relaPath,self.__fileInfo%fname)), } state.append({relaFilePath:fileDict}) # loop directories #for ddict in sorted([d for d in dirList if isinstance(d, dict) and len(d)], key=lambda k: list(k)[0]): for ddict in sorted([d for d in dirList if isinstance(d, dict)], key=lambda k: list(k)[0]): dirname = list(ddict)[0] _walk_dir(relaPath=os.path.join(relaPath,dirname), dirList=ddict[dirname]) # call recursive _walk_dir if relaPath is None: _walk_dir(relaPath='', dirList=self.__repo['walk_repo']) else: assert isinstance(relaPath, basestring), "relaPath must be None or a str" relaPath = self.to_repo_relative_path(path=relaPath, split=False) spath = relaPath.split(os.sep) dirList = self.__repo['walk_repo'] while len(spath): dirname = spath.pop(0) dList = [d for d in dirList if isinstance(d, dict)] if not len(dList): dirList = None break cDict = [d for d in dList if dirname in d] if not len(cDict): dirList = None break dirList = cDict[0][dirname] if dirList is not None: _walk_dir(relaPath=relaPath, dirList=dirList) # return state list return state
Get a list representation of repository state along with useful information. List state is ordered relativeley to directories level :Parameters: #. relaPath (None, str): relative directory path from where to start. If None all repository representation is returned. :Returns: #. state (list): List representation of the repository. List items are all dictionaries. Every dictionary has a single key which is the file or the directory name and the value is a dictionary of information including: * 'type': the type of the tracked whether it's file, dir, or objectdir * 'exists': whether file or directory actually exists on disk * 'pyrepfileinfo': In case of a file or an objectdir whether .%s_pyrepfileinfo exists * 'pyrepdirinfo': In case of a directory whether .pyrepdirinfo exists
def toDict(self): """ Get information about the HSP as a dictionary. @return: A C{dict} representation of the HSP. """ result = _Base.toDict(self) result['score'] = self.score.score return result
Get information about the HSP as a dictionary. @return: A C{dict} representation of the HSP.
def get_template(self, project, template_id): """GetTemplate. Gets a specific build definition template. :param str project: Project ID or project name :param str template_id: The ID of the requested template. :rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_0.build.models.BuildDefinitionTemplate>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if template_id is not None: route_values['templateId'] = self._serialize.url('template_id', template_id, 'str') response = self._send(http_method='GET', location_id='e884571e-7f92-4d6a-9274-3f5649900835', version='5.0', route_values=route_values) return self._deserialize('BuildDefinitionTemplate', response)
GetTemplate. Gets a specific build definition template. :param str project: Project ID or project name :param str template_id: The ID of the requested template. :rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_0.build.models.BuildDefinitionTemplate>`
def add_source(zone, source, permanent=True): ''' Bind a source to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_source zone 192.168.1.0/24 ''' if source in get_sources(zone, permanent): log.info('Source is already bound to zone.') cmd = '--zone={0} --add-source={1}'.format(zone, source) if permanent: cmd += ' --permanent' return __firewall_cmd(cmd)
Bind a source to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_source zone 192.168.1.0/24
def delete(self, block_type, block_num): """ Deletes a block :param block_type: Type of block :param block_num: Bloc number """ logger.info("deleting block") blocktype = snap7.snap7types.block_types[block_type] result = self.library.Cli_Delete(self.pointer, blocktype, block_num) return result
Deletes a block :param block_type: Type of block :param block_num: Bloc number
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped): """Listen to error messages in the background on the driver. This runs in a separate thread on the driver and pushes (error, time) tuples to the output queue. Args: worker: The worker class that this thread belongs to. task_error_queue (queue.Queue): A queue used to communicate with the thread that prints the errors found by this thread. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit. """ worker.error_message_pubsub_client = worker.redis_client.pubsub( ignore_subscribe_messages=True) # Exports that are published after the call to # error_message_pubsub_client.subscribe and before the call to # error_message_pubsub_client.listen will still be processed in the loop. # Really we should just subscribe to the errors for this specific job. # However, currently all errors seem to be published on the same channel. error_pubsub_channel = str( ray.gcs_utils.TablePubsub.ERROR_INFO).encode("ascii") worker.error_message_pubsub_client.subscribe(error_pubsub_channel) # worker.error_message_pubsub_client.psubscribe("*") try: # Get the exports that occurred before the call to subscribe. error_messages = global_state.error_messages(worker.task_driver_id) for error_message in error_messages: logger.error(error_message) while True: # Exit if we received a signal that we should stop. if threads_stopped.is_set(): return msg = worker.error_message_pubsub_client.get_message() if msg is None: threads_stopped.wait(timeout=0.01) continue gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( msg["data"], 0) assert gcs_entry.EntriesLength() == 1 error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData( gcs_entry.Entries(0), 0) driver_id = error_data.DriverId() if driver_id not in [ worker.task_driver_id.binary(), DriverID.nil().binary() ]: continue error_message = ray.utils.decode(error_data.ErrorMessage()) if (ray.utils.decode( error_data.Type()) == ray_constants.TASK_PUSH_ERROR): # Delay it a bit to see if we can suppress it task_error_queue.put((error_message, time.time())) else: logger.error(error_message) finally: # Close the pubsub client to avoid leaking file descriptors. worker.error_message_pubsub_client.close()
Listen to error messages in the background on the driver. This runs in a separate thread on the driver and pushes (error, time) tuples to the output queue. Args: worker: The worker class that this thread belongs to. task_error_queue (queue.Queue): A queue used to communicate with the thread that prints the errors found by this thread. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit.
def instance(cls): """ Singleton to return only one instance of BaseManager. :returns: instance of BaseManager """ if not hasattr(cls, "_instance") or cls._instance is None: cls._instance = cls() return cls._instance
Singleton to return only one instance of BaseManager. :returns: instance of BaseManager
def stats(self): """Return dictionay with all stats at this level.""" load_count = self.last_level_load.MISS_count load_byte = self.last_level_load.MISS_byte if self.last_level_load.victims_to is not None: # If there is a victim cache between last_level and memory, subtract all victim hits load_count -= self.last_level_load.victims_to.HIT_count load_byte -= self.last_level_load.victims_to.HIT_byte return {'name': self.name, 'LOAD_count': load_count, 'LOAD_byte': load_byte, 'HIT_count': load_count, 'HIT_byte': load_byte, 'STORE_count': self.last_level_store.EVICT_count, 'STORE_byte': self.last_level_store.EVICT_byte, 'EVICT_count': 0, 'EVICT_byte': 0, 'MISS_count': 0, 'MISS_byte': 0}
Return dictionay with all stats at this level.
def inspect(self): """ Inspect access attempt, used for catpcha flow :return: """ last_attempt = self.get_last_failed_access_attempt( ip_address=self.ip, captcha_enabled=True, captcha_passed=False, is_expired=False ) if last_attempt is None and not self.request.user.is_authenticated(): # create a new entry user_access = self._FailedAccessAttemptModel( ip_address=self.ip, username=self.username, captcha_enabled=True, captcha_passed=False, is_expired=False ) elif last_attempt: user_access = last_attempt if self.request.method == 'POST': if not self.request.user.is_authenticated(): user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '<unknown user agent>')[:255] user_access.username = self.username user_access.failed_attempts += 1 user_access.params_get = self.request.GET user_access.params_post = self.request.POST if user_access.failed_attempts >= self.max_failed_attempts: user_access.is_locked = True user_access.save() elif self.request.user.is_authenticated() and last_attempt: last_attempt.is_expired = True last_attempt.save()
Inspect access attempt, used for catpcha flow :return:
def structure(self, obj, cl): # type: (Any, Type[T]) -> T """Convert unstructured Python data structures to structured data.""" return self._structure_func.dispatch(cl)(obj, cl)
Convert unstructured Python data structures to structured data.
def _ProcessGrepSource(self, source): """Find files fulfilling regex conditions.""" attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request
Find files fulfilling regex conditions.
def bll_version(self): """Get the BLL version this session is connected to. Return: Version string if session started. None if session not started. """ if not self.started(): return None status, data = self._rest.get_request('objects', 'system1', ['version', 'name']) return data['version']
Get the BLL version this session is connected to. Return: Version string if session started. None if session not started.
def get_serializer(name): ''' Return the serialize function. ''' try: log.debug('Using %s as serializer', name) return SERIALIZER_LOOKUP[name] except KeyError: msg = 'Serializer {} is not available'.format(name) log.error(msg, exc_info=True) raise InvalidSerializerException(msg)
Return the serialize function.
def generate_changelog(from_version: str, to_version: str = None) -> dict: """ Generates a changelog for the given version. :param from_version: The last version not in the changelog. The changelog will be generated from the commit after this one. :param to_version: The last version in the changelog. :return: a dict with different changelog sections """ debug('generate_changelog("{}", "{}")'.format(from_version, to_version)) changes: dict = {'feature': [], 'fix': [], 'documentation': [], 'refactor': [], 'breaking': []} found_the_release = to_version is None rev = None if from_version: rev = 'v{0}'.format(from_version) for _hash, commit_message in get_commit_log(rev): if not found_the_release: if to_version and to_version not in commit_message: continue else: found_the_release = True if from_version is not None and from_version in commit_message: break try: message = current_commit_parser()(commit_message) if message[1] not in changes: continue changes[message[1]].append((_hash, message[3][0])) if message[3][1] and 'BREAKING CHANGE' in message[3][1]: parts = re_breaking.match(message[3][1]) if parts: changes['breaking'].append(parts.group(1)) if message[3][2] and 'BREAKING CHANGE' in message[3][2]: parts = re_breaking.match(message[3][2]) if parts: changes['breaking'].append(parts.group(1)) except UnknownCommitMessageStyleError as err: debug('Ignoring', err) pass return changes
Generates a changelog for the given version. :param from_version: The last version not in the changelog. The changelog will be generated from the commit after this one. :param to_version: The last version in the changelog. :return: a dict with different changelog sections
def reverse_taskname(name: str) -> str: """ Reverses components in the name of task. Reversed convention is used for filenames since it groups log/scratch files of related tasks together 0.somejob.somerun -> somerun.somejob.0 0.somejob -> somejob.0 somename -> somename Args: name: name of task """ components = name.split('.') assert len(components) <= 3 return '.'.join(components[::-1])
Reverses components in the name of task. Reversed convention is used for filenames since it groups log/scratch files of related tasks together 0.somejob.somerun -> somerun.somejob.0 0.somejob -> somejob.0 somename -> somename Args: name: name of task
def _validate_columns(self): """Validate the options in the styles""" geom_cols = {'the_geom', 'the_geom_webmercator', } col_overlap = set(self.style_cols) & geom_cols if col_overlap: raise ValueError('Style columns cannot be geometry ' 'columns. `{col}` was chosen.'.format( col=','.join(col_overlap)))
Validate the options in the styles
def nested_assign(self, key_list, value): """ Set the value of nested LIVVDicts given a list """ if len(key_list) == 1: self[key_list[0]] = value elif len(key_list) > 1: if key_list[0] not in self: self[key_list[0]] = LIVVDict() self[key_list[0]].nested_assign(key_list[1:], value)
Set the value of nested LIVVDicts given a list
def _prepare_text(self, text): """Returns `text` with each consituent token wrapped in HTML markup for later match annotation. :param text: text to be marked up :type text: `str` :rtype: `str` """ # Remove characters that should be escaped for XML input (but # which cause problems when escaped, since they become # tokens). text = re.sub(r'[<>&]', '', text) pattern = r'({})'.format(self._tokenizer.pattern) return re.sub(pattern, self._base_token_markup, text)
Returns `text` with each consituent token wrapped in HTML markup for later match annotation. :param text: text to be marked up :type text: `str` :rtype: `str`
def get_diff(self, rev1, rev2, path='', ignore_whitespace=False, context=3): """ Returns (git like) *diff*, as plain text. Shows changes introduced by ``rev2`` since ``rev1``. :param rev1: Entry point from which diff is shown. Can be ``self.EMPTY_CHANGESET`` - in this case, patch showing all the changes since empty state of the repository until ``rev2`` :param rev2: Until which revision changes should be shown. :param ignore_whitespace: If set to ``True``, would not show whitespace changes. Defaults to ``False``. :param context: How many lines before/after changed lines should be shown. Defaults to ``3``. """ if hasattr(rev1, 'raw_id'): rev1 = getattr(rev1, 'raw_id') if hasattr(rev2, 'raw_id'): rev2 = getattr(rev2, 'raw_id') # Check if given revisions are present at repository (may raise # ChangesetDoesNotExistError) if rev1 != self.EMPTY_CHANGESET: self.get_changeset(rev1) self.get_changeset(rev2) if path: file_filter = match(self.path, '', [path]) else: file_filter = None return ''.join(patch.diff(self._repo, rev1, rev2, match=file_filter, opts=diffopts(git=True, ignorews=ignore_whitespace, context=context)))
Returns (git like) *diff*, as plain text. Shows changes introduced by ``rev2`` since ``rev1``. :param rev1: Entry point from which diff is shown. Can be ``self.EMPTY_CHANGESET`` - in this case, patch showing all the changes since empty state of the repository until ``rev2`` :param rev2: Until which revision changes should be shown. :param ignore_whitespace: If set to ``True``, would not show whitespace changes. Defaults to ``False``. :param context: How many lines before/after changed lines should be shown. Defaults to ``3``.
async def deserialize(data: dict): """ Create the object from a previously serialized object. :param data: The output of the "serialize" call Example: data = await connection1.serialize() connection2 = await Connection.deserialize(data) :return: A re-instantiated object """ return await Connection._deserialize("vcx_connection_deserialize", json.dumps(data), data.get('source_id'))
Create the object from a previously serialized object. :param data: The output of the "serialize" call Example: data = await connection1.serialize() connection2 = await Connection.deserialize(data) :return: A re-instantiated object
def issuer_cert_urls(self): """ :return: A list of unicode strings that are URLs that should contain either an individual DER-encoded X.509 certificate, or a DER-encoded CMS message containing multiple certificates """ if self._issuer_cert_urls is None: self._issuer_cert_urls = [] if self.authority_information_access_value: for entry in self.authority_information_access_value: if entry['access_method'].native == 'ca_issuers': location = entry['access_location'] if location.name != 'uniform_resource_identifier': continue url = location.native if url.lower()[0:7] == 'http://': self._issuer_cert_urls.append(url) return self._issuer_cert_urls
:return: A list of unicode strings that are URLs that should contain either an individual DER-encoded X.509 certificate, or a DER-encoded CMS message containing multiple certificates
def create(self, **kwargs): """ Creates a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) meta = obj.get_meta() meta.connection = get_es_connection(self.es_url, self.es_kwargs) meta.index=self.index meta.type=self.type obj.save(force=True) return obj
Creates a new object with the given kwargs, saving it to the database and returning the created object.
def validate(self, value): """validate""" # obj can be None or a DataFrame if value is None: return True else: try: with value.open() as hdulist: self.validate_hdulist(hdulist) except Exception: _type, exc, tb = sys.exc_info() six.reraise(ValidationError, exc, tb)
validate
def custom(command, user=None, conf_file=None, bin_env=None): ''' Run any custom supervisord command user user to run supervisorctl as conf_file path to supervisord config file bin_env path to supervisorctl bin or path to virtualenv with supervisor installed CLI Example: .. code-block:: bash salt '*' supervisord.custom "mstop '*gunicorn*'" ''' ret = __salt__['cmd.run_all']( _ctl_cmd(command, None, conf_file, bin_env), runas=user, python_shell=False, ) return _get_return(ret)
Run any custom supervisord command user user to run supervisorctl as conf_file path to supervisord config file bin_env path to supervisorctl bin or path to virtualenv with supervisor installed CLI Example: .. code-block:: bash salt '*' supervisord.custom "mstop '*gunicorn*'"
def send_message(message, params, site, logger): """Send a message to the Sentry server""" client.capture( 'Message', message=message, params=tuple(params), data={ 'site': site, 'logger': logger, }, )
Send a message to the Sentry server
async def _start(self): """ Start coroutine. runs on_start coroutine and then runs the _step coroutine where the body of the behaviour is called. """ self.agent._alive.wait() try: await self.on_start() except Exception as e: logger.error("Exception running on_start in behaviour {}: {}".format(self, e)) self.kill(exit_code=e) await self._step() self._is_done.clear()
Start coroutine. runs on_start coroutine and then runs the _step coroutine where the body of the behaviour is called.
def p_out(p): """ statement : OUT expr COMMA expr """ p[0] = make_sentence('OUT', make_typecast(TYPE.uinteger, p[2], p.lineno(3)), make_typecast(TYPE.ubyte, p[4], p.lineno(4)))
statement : OUT expr COMMA expr
def update_rho(self, k, r, s): """Automatic rho adjustment.""" if self.opt['AutoRho', 'Enabled']: tau = self.rho_tau mu = self.rho_mu xi = self.rho_xi if k != 0 and np.mod(k + 1, self.opt['AutoRho', 'Period']) == 0: if self.opt['AutoRho', 'AutoScaling']: if s == 0.0 or r == 0.0: rhomlt = tau else: rhomlt = np.sqrt(r / (s * xi) if r > s * xi else (s * xi) / r) if rhomlt > tau: rhomlt = tau else: rhomlt = tau rsf = 1.0 if r > xi * mu * s: rsf = rhomlt elif s > (mu / xi) * r: rsf = 1.0 / rhomlt self.rho *= self.dtype.type(rsf) self.U /= rsf if rsf != 1.0: self.rhochange()
Automatic rho adjustment.
def database_renderer(self, name=None, site=None, role=None): """ Renders local settings for a specific database. """ name = name or self.env.default_db_name site = site or self.genv.SITE role = role or self.genv.ROLE key = (name, site, role) self.vprint('checking key:', key) if key not in self._database_renderers: self.vprint('No cached db renderer, generating...') if self.verbose: print('db.name:', name) print('db.databases:', self.env.databases) print('db.databases[%s]:' % name, self.env.databases.get(name)) d = type(self.genv)(self.lenv) d.update(self.get_database_defaults()) d.update(self.env.databases.get(name, {})) d['db_name'] = name if self.verbose: print('db.d:') pprint(d, indent=4) print('db.connection_handler:', d.connection_handler) if d.connection_handler == CONNECTION_HANDLER_DJANGO: self.vprint('Using django handler...') dj = self.get_satchel('dj') if self.verbose: print('Loading Django DB settings for site {} and role {}.'.format(site, role), file=sys.stderr) dj.set_db(name=name, site=site, role=role) _d = dj.local_renderer.collect_genv(include_local=True, include_global=False) # Copy "dj_db_*" into "db_*". for k, v in _d.items(): if k.startswith('dj_db_'): _d[k[3:]] = v del _d[k] if self.verbose: print('Loaded:') pprint(_d) d.update(_d) elif d.connection_handler and d.connection_handler.startswith(CONNECTION_HANDLER_CUSTOM+':'): _callable_str = d.connection_handler[len(CONNECTION_HANDLER_CUSTOM+':'):] self.vprint('Using custom handler %s...' % _callable_str) _d = str_to_callable(_callable_str)(role=self.genv.ROLE) if self.verbose: print('Loaded:') pprint(_d) d.update(_d) r = LocalRenderer(self, lenv=d) # Optionally set any root logins needed for administrative commands. self.set_root_login(r) self._database_renderers[key] = r else: self.vprint('Cached db renderer found.') return self._database_renderers[key]
Renders local settings for a specific database.
def create_event(component, tz=UTC): """ Create an event from its iCal representation. :param component: iCal component :param tz: timezone for start and end times :return: event """ event = Event() event.start = normalize(component.get('dtstart').dt, tz=tz) if component.get('dtend'): event.end = normalize(component.get('dtend').dt, tz=tz) elif component.get('duration'): # compute implicit end as start + duration event.end = event.start + component.get('duration').dt else: # compute implicit end as start + 0 event.end = event.start try: event.summary = str(component.get('summary')) except UnicodeEncodeError as e: event.summary = str(component.get('summary').encode('utf-8')) try: event.description = str(component.get('description')) except UnicodeEncodeError as e: event.description = str(component.get('description').encode('utf-8')) event.all_day = type(component.get('dtstart').dt) is date if component.get('rrule'): event.recurring = True try: event.location = str(component.get('location')) except UnicodeEncodeError as e: event.location = str(component.get('location').encode('utf-8')) if component.get('attendee'): event.attendee = component.get('attendee') if type(event.attendee) is list: temp = [] for a in event.attendee: temp.append(a.encode('utf-8').decode('ascii')) event.attendee = temp else: event.attendee = event.attendee.encode('utf-8').decode('ascii') if component.get('uid'): event.uid = component.get('uid').encode('utf-8').decode('ascii') if component.get('organizer'): event.organizer = component.get('organizer').encode('utf-8').decode('ascii') return event
Create an event from its iCal representation. :param component: iCal component :param tz: timezone for start and end times :return: event
def N_to_Ntriangles(N): """ @N: WD style gridsize Converts WD style grid size @N to the number of triangles on the surface. Returns: number of triangles. """ theta = np.array([np.pi/2*(k-0.5)/N for k in range(1, N+1)]) phi = np.array([[np.pi*(l-0.5)/Mk for l in range(1, Mk+1)] for Mk in np.array(1 + 1.3*N*np.sin(theta), dtype=int)]) Ntri = 2*np.array([len(p) for p in phi]).sum() return Ntri
@N: WD style gridsize Converts WD style grid size @N to the number of triangles on the surface. Returns: number of triangles.
def list_storages(self): '''Returns a list of existing stores. The returned names can then be used to call get_storage(). ''' # Filter out any storages used by xbmcswift2 so caller doesn't corrupt # them. return [name for name in os.listdir(self.storage_path) if not name.startswith('.')]
Returns a list of existing stores. The returned names can then be used to call get_storage().
def write_matrix_to_csv(self, headers, data): """Saves .csv file with data :param headers: column names :param data: Data """ with open(self.path, "w") as out_file: # write to file data_writer = csv.writer(out_file, delimiter=",") data_writer.writerow(headers) # write headers data_writer.writerows(data)
Saves .csv file with data :param headers: column names :param data: Data
def pybel_to_json(molecule, name=None): """Converts a pybel molecule to json. Args: molecule: An instance of `pybel.Molecule` name: (Optional) If specified, will save a "name" property Returns: A Python dictionary containing atom and bond data """ # Save atom element type and 3D location. atoms = [{'element': table.GetSymbol(atom.atomicnum), 'location': list(atom.coords)} for atom in molecule.atoms] # Recover auxiliary data, if exists for json_atom, pybel_atom in zip(atoms, molecule.atoms): if pybel_atom.partialcharge != 0: json_atom['charge'] = pybel_atom.partialcharge if pybel_atom.OBAtom.HasData('_atom_site_label'): obatom = pybel_atom.OBAtom json_atom['label'] = obatom.GetData('_atom_site_label').GetValue() if pybel_atom.OBAtom.HasData('color'): obatom = pybel_atom.OBAtom json_atom['color'] = obatom.GetData('color').GetValue() # Save number of bonds and indices of endpoint atoms bonds = [{'atoms': [b.GetBeginAtom().GetIndex(), b.GetEndAtom().GetIndex()], 'order': b.GetBondOrder()} for b in ob.OBMolBondIter(molecule.OBMol)] output = {'atoms': atoms, 'bonds': bonds, 'units': {}} # If there's unit cell data, save it to the json output if hasattr(molecule, 'unitcell'): uc = molecule.unitcell output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()] for v in uc.GetCellVectors()] density = (sum(atom.atomicmass for atom in molecule.atoms) / (uc.GetCellVolume() * 0.6022)) output['density'] = density output['units']['density'] = 'kg / L' # Save the formula to json. Use Hill notation, just to have a standard. element_count = Counter(table.GetSymbol(a.atomicnum) for a in molecule) hill_count = [] for element in ['C', 'H']: if element in element_count: hill_count += [(element, element_count[element])] del element_count[element] hill_count += sorted(element_count.items()) # If it's a crystal, then reduce the Hill formula div = (reduce(gcd, (c[1] for c in hill_count)) if hasattr(molecule, 'unitcell') else 1) output['formula'] = ''.join(n if c / div == 1 else '%s%d' % (n, c / div) for n, c in hill_count) output['molecular_weight'] = molecule.molwt / div output['units']['molecular_weight'] = 'g / mol' # If the input has been given a name, add that if name: output['name'] = name return output
Converts a pybel molecule to json. Args: molecule: An instance of `pybel.Molecule` name: (Optional) If specified, will save a "name" property Returns: A Python dictionary containing atom and bond data
def start(self): ''' doesn't work''' thread = threading.Thread(target=reactor.run) thread.start()
doesn't work
def solve(self): ''' Solves a one period consumption saving problem with risky income and shocks to medical need. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function, medical spending function ( both defined over market re- sources, permanent income, and medical shock), a marginal value func- tion (defined over market resources and permanent income), and human wealth as a function of permanent income. ''' aLvl,trash = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() if self.vFuncBool: self.makeEndOfPrdvFunc(EndOfPrdvP) if self.CubicBool: interpolator = self.makeCubicxFunc else: interpolator = self.makeLinearxFunc solution = self.makeBasicSolution(EndOfPrdvP,aLvl,interpolator) solution = self.addMPCandHumanWealth(solution) if self.CubicBool: solution = self.addvPPfunc(solution) return solution
Solves a one period consumption saving problem with risky income and shocks to medical need. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function, medical spending function ( both defined over market re- sources, permanent income, and medical shock), a marginal value func- tion (defined over market resources and permanent income), and human wealth as a function of permanent income.
def removeStages(self, personID): """remove(string) Removes all stages of the person. If no new phases are appended, the person will be removed from the simulation in the next simulationStep(). """ # remove all stages after the current and then abort the current stage while self.getRemainingStages(personID) > 1: self.removeStage(personID, 1) self.removeStage(personID, 0)
remove(string) Removes all stages of the person. If no new phases are appended, the person will be removed from the simulation in the next simulationStep().
def ReportConfiguration(self, f): """Report the boundary configuration details :param f: File (or standard out/err) :return: None """ if BoundaryCheck.chrom != -1: print >> f, BuildReportLine("CHROM", BoundaryCheck.chrom) if len(self.start_bounds) > 0: bounds = ",".join(["%s-%s" % (a[0], a[1]) for a in zip(self.start_bounds, self.end_bounds)]) print >> f, BuildReportLine("SNP BOUNDARY", bounds) if len(self.ignored_rs) > 0: print >> f, BuildReportLine("IGNORED RS", ",".join(self.ignored_rs)) if len(self.target_rs) > 0: print >> f, BuildReportLine("TARGET RS", ",".join(self.target_rs))
Report the boundary configuration details :param f: File (or standard out/err) :return: None
def getCSD (lfps,sampr,minf=0.05,maxf=300,norm=True,vaknin=False,spacing=1.0): """ get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns """ datband = getbandpass(lfps,sampr,minf,maxf) if datband.shape[0] > datband.shape[1]: # take CSD along smaller dimension ax = 1 else: ax = 0 # can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that # way would have same number of channels in CSD and LFP (but not critical, and would take more RAM); if vaknin: datband = Vaknin(datband) if norm: removemean(datband,ax=ax) # NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red, # and positive values (hyperpolarizing intracellular current) drawn in blue CSD = -numpy.diff(datband,n=2,axis=ax) / spacing**2 # now each column (or row) is an electrode -- CSD along electrodes return CSD
get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns
def get_ngroups(self, field=None): ''' Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for.
def _GetComplexConjugateArray(Array): """ Calculates the complex conjugate of each element in an array and returns the resulting array. Parameters ---------- Array : ndarray Input array Returns ------- ConjArray : ndarray The complex conjugate of the input array. """ ConjArray = _np.array([num.conj() for num in Array]) return ConjArray
Calculates the complex conjugate of each element in an array and returns the resulting array. Parameters ---------- Array : ndarray Input array Returns ------- ConjArray : ndarray The complex conjugate of the input array.
def generate_random_schema(valid): """ Generate a random plain schema, and a sample generation function. :param valid: Generate valid samples? :type valid: bool :returns: schema, sample-generator :rtype: *, generator """ schema_type = choice(['literal', 'type']) if schema_type == 'literal': type, gen = generate_random_type(valid) value = next(gen) return value, (value if valid else None for i in itertools.count()) elif schema_type == 'type': return generate_random_type(valid) else: raise AssertionError('!')
Generate a random plain schema, and a sample generation function. :param valid: Generate valid samples? :type valid: bool :returns: schema, sample-generator :rtype: *, generator
def dt_day(x): """Extracts the day from a datetime sample. :returns: an expression containing the day extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day Expression = dt_day(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 12 1 11 2 12 """ import pandas as pd return pd.Series(x).dt.day.values
Extracts the day from a datetime sample. :returns: an expression containing the day extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day Expression = dt_day(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 12 1 11 2 12
def save_file(self, path=None, filters='*.dat', force_extension=None, force_overwrite=False, header_only=False, delimiter='use current', binary=None): """ This will save all the header info and columns to an ascii file with the specified path. Parameters ---------- path=None Path for saving the data. If None, this will bring up a save file dialog. filters='*.dat' File filter for the file dialog (for path=None) force_extension=None If set to a string, e.g., 'txt', it will enforce that the chosen filename will have this extension. force_overwrite=False Normally, if the file * exists, this will copy that to *.backup. If the backup already exists, this function will abort. Setting this to True will force overwriting the backup file. header_only=False Only output the header? delimiter='use current' This will set the delimiter of the output file 'use current' means use self.delimiter binary=None Set to one of the allowed numpy dtypes, e.g., float32, float64, complex64, int32, etc. Setting binary=True defaults to float64. Note if the header contains the key SPINMOB_BINARY and binary=None, it will save as binary using the header specification. """ # Make sure there isn't a problem later with no-column databoxes if len(self)==0: header_only=True # This is the final path. We now write to a temporary file in the user # directory, then move it to the destination. This (hopefully) fixes # problems with sync programs. if path in [None]: path = _s.dialogs.save(filters, default_directory=self.directory) if path in ["", None]: print("Aborted.") return False # Force the extension (we do this here redundantly, because the user may have also # specified a path explicitly) if not force_extension == None: # In case the user put "*.txt" instead of just "txt" force_extension = force_extension.replace('*','').replace('.','') # If the file doesn't end with the extension, add it if not _os.path.splitext(path)[-1][1:] == force_extension: path = path + '.' + force_extension # Save the path for future reference self.path=path # if the path exists, make a backup if _os.path.exists(path) and not force_overwrite: _os.rename(path,path+".backup") # get the delimiter if delimiter == "use current": if self.delimiter is None: delimiter = "\t" else: delimiter = self.delimiter # figure out the temporary path temporary_path = _os.path.join(_s.settings.path_home, "temp-"+str(int(1e3*_time.time()))+'-'+str(int(1e9*_n.random.rand(1)))) # open the temporary file f = open(temporary_path, 'w') # Override any existing binary if we're supposed to if binary in [False, 'text', 'Text', 'ASCII', 'csv', 'CSV']: self.pop_header('SPINMOB_BINARY', True) binary = None # If the binary flag is any kind of binary format, add the key if not binary in [None, False, 'text', 'Text', 'ASCII', 'csv', 'CSV']: self.h(SPINMOB_BINARY=binary) # Now use the header element to determine the binary mode if 'SPINMOB_BINARY' in self.hkeys: # Get the binary mode (we'll use this later) binary = self.pop_header('SPINMOB_BINARY') # If it's "True", default to float32 if binary in ['True', True, 1]: binary = 'float32' # Write the special first key. f.write('SPINMOB_BINARY' + delimiter + binary + '\n') # Write the usual header for k in self.hkeys: f.write(k + delimiter + repr(self.headers[k]) + "\n") f.write('\n') # if we're not just supposed to write the header if not header_only: # Normal ascii saving mode. if binary in [None, 'None', False, 'False']: # write the ckeys elements = [] for ckey in self.ckeys: elements.append(str(ckey).replace(delimiter,'_')) f.write(delimiter.join(elements) + "\n") # now loop over the data for n in range(0, len(self[0])): # loop over each column elements = [] for m in range(0, len(self.ckeys)): # write the data if there is any, otherwise, placeholder if n < len(self[m]): elements.append(str(self[m][n])) else: elements.append('_') f.write(delimiter.join(elements) + "\n") # Binary mode else: # Announce that we're done with the header. It's binary time f.write('SPINMOB_BINARY\n') # Loop over the ckeys for n in range(len(self.ckeys)): # Get the binary data string data_string = _n.array(self[n]).astype(binary).tostring() # Write the column # ckey + delimiter + count + \n + datastring + \n f.write(str(self.ckeys[n]).replace(delimiter,'_') + delimiter + str(len(self[n])) + '\n') f.close() f = open(temporary_path, 'ab') f.write(data_string) f.close() f = open(temporary_path, 'a') f.write('\n') f.close() # now move it _shutil.move(temporary_path, path) return self
This will save all the header info and columns to an ascii file with the specified path. Parameters ---------- path=None Path for saving the data. If None, this will bring up a save file dialog. filters='*.dat' File filter for the file dialog (for path=None) force_extension=None If set to a string, e.g., 'txt', it will enforce that the chosen filename will have this extension. force_overwrite=False Normally, if the file * exists, this will copy that to *.backup. If the backup already exists, this function will abort. Setting this to True will force overwriting the backup file. header_only=False Only output the header? delimiter='use current' This will set the delimiter of the output file 'use current' means use self.delimiter binary=None Set to one of the allowed numpy dtypes, e.g., float32, float64, complex64, int32, etc. Setting binary=True defaults to float64. Note if the header contains the key SPINMOB_BINARY and binary=None, it will save as binary using the header specification.
def parse_expr(e): """Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple """ m = relation_re.match(e) if m is None: raise ValueError("error parsing expression '{}'".format(e)) field, op, val = m.groups() # Try different types try: # Integer val_int = int(val) val = val_int except ValueError: try: # Float val_float = float(val) val = val_float except ValueError: try: # Boolean val = {'true': True, 'false': False}[val.lower()] except KeyError: # String if re.match(r'".*"|\'.*\'', val): # strip quotes from strings val = val[1:-1] return field, op, val
Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple
def pool(args): """ %prog pool fastafiles > pool.fasta Pool a bunch of FASTA files, and add prefix to each record based on filenames. File names are simplified to longest unique prefix to avoid collisions after getting shortened. """ from jcvi.formats.base import longest_unique_prefix p = OptionParser(pool.__doc__) p.add_option("--sep", default=".", help="Separator between prefix and name") p.add_option("--sequential", default=False, action="store_true", help="Add sequential IDs") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) for fastafile in args: pf = longest_unique_prefix(fastafile, args) print(fastafile, "=>", pf, file=sys.stderr) prefixopt = "--prefix={0}{1}".format(pf, opts.sep) format_args = [fastafile, "stdout", prefixopt] if opts.sequential: format_args += ["--sequential=replace"] format(format_args)
%prog pool fastafiles > pool.fasta Pool a bunch of FASTA files, and add prefix to each record based on filenames. File names are simplified to longest unique prefix to avoid collisions after getting shortened.
def _set_client_pw(self, v, load=False): """ Setter method for client_pw, mapped from YANG variable /cluster/client_pw (container) If this variable is read-only (config: false) in the source YANG file, then _set_client_pw is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_client_pw() directly. YANG Description: Client Pseudo Wire """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=client_pw.client_pw, is_container='container', presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """client_pw must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=client_pw.client_pw, is_container='container', presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)""", }) self.__client_pw = t if hasattr(self, '_set'): self._set()
Setter method for client_pw, mapped from YANG variable /cluster/client_pw (container) If this variable is read-only (config: false) in the source YANG file, then _set_client_pw is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_client_pw() directly. YANG Description: Client Pseudo Wire
def load_requires_from_file(filepath): """Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names. """ with open(filepath) as fp: return [pkg_name.strip() for pkg_name in fp.readlines()]
Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names.
def insert_completions(self, e): # (M-*) u"""Insert all completions of the text before point that would have been generated by possible-completions.""" completions = self._get_completions() b = self.begidx e = self.endidx for comp in completions: rep = [ c for c in comp ] rep.append(' ') self.l_buffer[b:e] = rep b += len(rep) e = b self.line_cursor = b self.finalize()
u"""Insert all completions of the text before point that would have been generated by possible-completions.
def addStream(self, stream, interpolator="closest", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None): """Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name for the column in the returned dataset. If no column name is given, the full stream path will be used. addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column:: d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.) d.addStream("temperature","average") d.addStream("steps","sum") m = Merge(cdb) m.addStream("mystream") m.addStream("mystream2") d.addStream(m,colname="mycolumn") result = d.run() """ streamquery = query_maker(t1, t2, limit, i1, i2, transform) param_stream(self.cdb, streamquery, stream) streamquery["interpolator"] = interpolator if colname is None: # What do we call this column? if isinstance(stream, six.string_types): colname = stream elif isinstance(stream, Stream): colname = stream.path else: raise Exception( "Could not find a name for the column! use the 'colname' parameter.") if colname in self.query["dataset"] or colname is "x": raise Exception( "The column name either exists, or is labeled 'x'. Use the colname parameter to change the column name.") self.query["dataset"][colname] = streamquery
Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name for the column in the returned dataset. If no column name is given, the full stream path will be used. addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column:: d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.) d.addStream("temperature","average") d.addStream("steps","sum") m = Merge(cdb) m.addStream("mystream") m.addStream("mystream2") d.addStream(m,colname="mycolumn") result = d.run()
def _check_file(parameters): """Return list of errors.""" (filename, args) = parameters if filename == '-': contents = sys.stdin.read() else: with contextlib.closing( docutils.io.FileInput(source_path=filename) ) as input_file: contents = input_file.read() args = load_configuration_from_file( os.path.dirname(os.path.realpath(filename)), args) ignore_directives_and_roles(args.ignore_directives, args.ignore_roles) for substitution in args.ignore_substitutions: contents = contents.replace('|{}|'.format(substitution), 'None') ignore = { 'languages': args.ignore_language, 'messages': args.ignore_messages, } all_errors = [] for error in check(contents, filename=filename, report_level=args.report, ignore=ignore, debug=args.debug): all_errors.append(error) return (filename, all_errors)
Return list of errors.
def parse_complex_fault_node(node, mfd_spacing=0.1, mesh_spacing=4.0): """ Parses a "complexFaultSource" node and returns an instance of the :class: openquake.hmtk.sources.complex_fault.mtkComplexFaultSource """ assert "complexFaultSource" in node.tag sf_taglist = get_taglist(node) # Get metadata sf_id, name, trt = (node.attrib["id"], node.attrib["name"], node.attrib["tectonicRegion"]) # Process geometry edges = node_to_complex_fault_geometry( node.nodes[sf_taglist.index("complexFaultGeometry")]) # Process scaling relation msr = node_to_scalerel(node.nodes[sf_taglist.index("magScaleRel")]) # Process aspect ratio aspect = float_(node.nodes[sf_taglist.index("ruptAspectRatio")].text) # Process MFD mfd = node_to_mfd(node, sf_taglist) # Process rake rake = float_(node.nodes[sf_taglist.index("rake")].text) complex_fault = mtkComplexFaultSource(sf_id, name, trt, geometry=None, mag_scale_rel=msr, rupt_aspect_ratio=aspect, mfd=mfd, rake=rake) complex_fault.create_geometry(edges, mesh_spacing) return complex_fault
Parses a "complexFaultSource" node and returns an instance of the :class: openquake.hmtk.sources.complex_fault.mtkComplexFaultSource
def from_dict(self, d): """ Create a Stage from a dictionary. The change is in inplace. :argument: python dictionary :return: None """ if 'uid' in d: if d['uid']: self._uid = d['uid'] if 'name' in d: if d['name']: self._name = d['name'] if 'state' in d: if isinstance(d['state'], str) or isinstance(d['state'], unicode): if d['state'] in states._stage_state_values.keys(): self._state = d['state'] else: raise ValueError(obj=self._uid, attribute='state', expected_value=states._stage_state_values.keys(), actual_value=value) else: raise TypeError(entity='state', expected_type=str, actual_type=type(d['state'])) else: self._state = states.INITIAL if 'state_history' in d: if isinstance(d['state_history'], list): self._state_history = d['state_history'] else: raise TypeError(entity='state_history', expected_type=list, actual_type=type(d['state_history'])) if 'parent_pipeline' in d: if isinstance(d['parent_pipeline'], dict): self._p_pipeline = d['parent_pipeline'] else: raise TypeError(entity='parent_pipeline', expected_type=dict, actual_type=type(d['parent_pipeline']))
Create a Stage from a dictionary. The change is in inplace. :argument: python dictionary :return: None
def gaussian_kernel(data_shape, sigma, norm='max'): r"""Gaussian kernel This method produces a Gaussian kerenal of a specified size and dispersion Parameters ---------- data_shape : tuple Desiered shape of the kernel sigma : float Standard deviation of the kernel norm : str {'max', 'sum', 'none'}, optional Normalisation of the kerenl (options are 'max', 'sum' or 'none') Returns ------- np.ndarray kernel Examples -------- >>> from modopt.math.stats import gaussian_kernel >>> gaussian_kernel((3, 3), 1) array([[ 0.36787944, 0.60653066, 0.36787944], [ 0.60653066, 1. , 0.60653066], [ 0.36787944, 0.60653066, 0.36787944]]) >>> gaussian_kernel((3, 3), 1, norm='sum') array([[ 0.07511361, 0.1238414 , 0.07511361], [ 0.1238414 , 0.20417996, 0.1238414 ], [ 0.07511361, 0.1238414 , 0.07511361]]) """ if not import_astropy: # pragma: no cover raise ImportError('Astropy package not found.') if norm not in ('max', 'sum', 'none'): raise ValueError('Invalid norm, options are "max", "sum" or "none".') kernel = np.array(Gaussian2DKernel(sigma, x_size=data_shape[1], y_size=data_shape[0])) if norm == 'max': return kernel / np.max(kernel) elif norm == 'sum': return kernel / np.sum(kernel) elif norm == 'none': return kernel
r"""Gaussian kernel This method produces a Gaussian kerenal of a specified size and dispersion Parameters ---------- data_shape : tuple Desiered shape of the kernel sigma : float Standard deviation of the kernel norm : str {'max', 'sum', 'none'}, optional Normalisation of the kerenl (options are 'max', 'sum' or 'none') Returns ------- np.ndarray kernel Examples -------- >>> from modopt.math.stats import gaussian_kernel >>> gaussian_kernel((3, 3), 1) array([[ 0.36787944, 0.60653066, 0.36787944], [ 0.60653066, 1. , 0.60653066], [ 0.36787944, 0.60653066, 0.36787944]]) >>> gaussian_kernel((3, 3), 1, norm='sum') array([[ 0.07511361, 0.1238414 , 0.07511361], [ 0.1238414 , 0.20417996, 0.1238414 ], [ 0.07511361, 0.1238414 , 0.07511361]])
def _get_switchports(profile): """Return list of (switch_ip, interface) tuples from local_link_info""" switchports = [] if profile.get('local_link_information'): for link in profile['local_link_information']: if 'switch_info' in link and 'port_id' in link: switch = link['switch_info'] interface = link['port_id'] switchports.append((switch, interface)) else: LOG.warning("Incomplete link information: %s", link) return switchports
Return list of (switch_ip, interface) tuples from local_link_info
def write_results(filename,config,srcfile,samples): """ Package everything nicely """ results = createResults(config,srcfile,samples=samples) results.write(filename)
Package everything nicely
def ustep(self): """The parent class ystep method is overridden to allow also performing the ystep for the additional variables introduced in the modification to the baseline algorithm. """ super(ConvCnstrMODMaskDcpl_Consensus, self).ustep() self.U1 += self.AX1 - self.Y1 - self.S
The parent class ystep method is overridden to allow also performing the ystep for the additional variables introduced in the modification to the baseline algorithm.
def enqueue_task(self, source, *args): """ Enqueue a task execution. It will run in the background as soon as the coordinator clears it to do so. """ yield from self.cell.coord.enqueue(self) route = Route(source, self.cell, self.spec, self.emit) self.cell.loop.create_task(self.coord_wrap(route, *args)) # To guarantee that the event loop works fluidly, we manually yield # once. The coordinator enqueue coroutine is not required to yield so # this ensures we avoid various forms of event starvation regardless. yield
Enqueue a task execution. It will run in the background as soon as the coordinator clears it to do so.
def _call(self, x, out=None): """Return the constant vector or assign it to ``out``.""" if out is None: return self.range.element(copy(self.constant)) else: out.assign(self.constant)
Return the constant vector or assign it to ``out``.
def trigger_audited(self, id, rev, **kwargs): """ Triggers a build of a specific Build Configuration in a specific revision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.trigger_audited(id, rev, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration id (required) :param int rev: Revision of a Build Configuration (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool build_dependencies: Should we build also dependencies of this BuildConfiguration? :param bool keep_pod_on_failure: Should we keep the build container running, if the build fails? :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param str rebuild_mode: Rebuild Modes: FORCE: always rebuild the configuration; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildRecordSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.trigger_audited_with_http_info(id, rev, **kwargs) else: (data) = self.trigger_audited_with_http_info(id, rev, **kwargs) return data
Triggers a build of a specific Build Configuration in a specific revision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.trigger_audited(id, rev, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration id (required) :param int rev: Revision of a Build Configuration (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool build_dependencies: Should we build also dependencies of this BuildConfiguration? :param bool keep_pod_on_failure: Should we keep the build container running, if the build fails? :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param str rebuild_mode: Rebuild Modes: FORCE: always rebuild the configuration; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildRecordSingleton If the method is called asynchronously, returns the request thread.
def print_head(self, parent_plate_value, plate_values, interval, n=10, print_func=logging.info): """ Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None """ if isinstance(plate_values, Plate): self.print_head(parent_plate_value, plate_values.values, interval, n, print_func) return if len(plate_values) == 1 and len(plate_values[0]) == 2 and isinstance(plate_values[0][0], str): self.print_head(parent_plate_value, (plate_values,), interval, n, print_func) return found = False for plate_value in plate_values: combined_plate_value = Plate.combine_values(parent_plate_value, plate_value) if combined_plate_value not in self._streams: # This can happen if we have created a compound plate and only certain plate values are valid continue found = True print_func("Plate value: {}".format(combined_plate_value)) data = False for k, v in self._streams[combined_plate_value].window(interval).head(n): data = True print_func("{}, {}".format(k, v)) if not data: print_func("No data") print_func("") if not found: print_func("No streams found for the given plate values")
Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None