Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
2,800
def acquire_connection(settings, tag=None, logger_name=None, auto_commit=False): try: connection_properties = settings.get(tag, settings[None]) except KeyError: raise RdbmsConnection.DefaultConnectionPropertiesSettingException() return RdbmsConnection( connection_properties[], connection_properties[], connection_properties[], connection_properties[], connection_properties[], logger_name=logger_name, auto_commit=auto_commit)
Return a connection to a Relational DataBase Management System (RDBMS) the most appropriate for the service requesting this connection. @param settings: a dictionary of connection properties:: { None: { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, 'tag': { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, ... } The key ``None`` is the default tag. @param tag: a tag that specifies which particular connection properties has to be used. @param logger_name: name of the logger for debug information. @param auto_commit: indicate whether the transaction needs to be committed at the end of the session. @return: a ``RdbmsConnection`` instance to be used supporting the Python clause ``with ...:``. @raise DefaultConnectionPropertiesSettingException: if the specified tag is not defined in the dictionary of connection properties, and when no default connection properties is defined either (tag ``None``).
2,801
def plot_xtf(fignum, XTF, Fs, e, b): plt.figure(num=fignum) plt.xlabel() plt.ylabel() k = 0 Flab = [] for freq in XTF: T, X = [], [] for xt in freq: X.append(xt[0]) T.append(xt[1]) plt.plot(T, X) plt.text(T[-1], X[-1], str(int(Fs[k])) + ) k += 1 plt.title(e + + % (b) + )
function to plot series of chi measurements as a function of temperature, holding field constant and varying frequency
2,802
def distance_to_edge(labels): colors = color_labels(labels) max_color = np.max(colors) result = np.zeros(labels.shape) if max_color == 0: return result for i in range(1, max_color+1): mask = (colors==i) result[mask] = scind.distance_transform_edt(mask)[mask] return result
Compute the distance of a pixel to the edge of its object labels - a labels matrix returns a matrix of distances
2,803
def _parse_mirteFile(path, logger=None): l = logging.getLogger() if logger is None else logger cache_path = os.path.join(os.path.dirname(path), CACHE_FILENAME_TEMPLATE % os.path.basename(path)) if (os.path.exists(cache_path) and os.path.getmtime(cache_path) >= os.path.getmtime(path)): with open(cache_path) as f: return msgpack.unpack(f) with open(path) as f: ret = yaml.load(f) try: with open(cache_path, ) as f: msgpack.pack(ret, f) except IOError as e: if e.errno == errno.EACCES: l.warn(, path) else: raise return ret
Open and parses the mirteFile at <path>.
2,804
def transform_obs(self, obs): empty = np.array([], dtype=np.int32).reshape((0, 7)) out = named_array.NamedDict({ "single_select": empty, "multi_select": empty, "build_queue": empty, "cargo": empty, "cargo_slots_available": np.array([0], dtype=np.int32), }) def or_zeros(layer, size): if layer is not None: return layer.astype(np.int32, copy=False) else: return np.zeros((size.y, size.x), dtype=np.int32) aif = self._agent_interface_format if aif.feature_dimensions: out["feature_screen"] = named_array.NamedNumpyArray( np.stack(or_zeros(f.unpack(obs.observation), aif.feature_dimensions.screen) for f in SCREEN_FEATURES), names=[ScreenFeatures, None, None]) out["feature_minimap"] = named_array.NamedNumpyArray( np.stack(or_zeros(f.unpack(obs.observation), aif.feature_dimensions.minimap) for f in MINIMAP_FEATURES), names=[MinimapFeatures, None, None]) if aif.rgb_dimensions: out["rgb_screen"] = Feature.unpack_rgb_image( obs.observation.render_data.map).astype(np.int32) out["rgb_minimap"] = Feature.unpack_rgb_image( obs.observation.render_data.minimap).astype(np.int32) out["last_actions"] = np.array( [self.reverse_action(a).function for a in obs.actions], dtype=np.int32) out["action_result"] = np.array([o.result for o in obs.action_errors], dtype=np.int32) out["alerts"] = np.array(obs.observation.alerts, dtype=np.int32) out["game_loop"] = np.array([obs.observation.game_loop], dtype=np.int32) score_details = obs.observation.score.score_details out["score_cumulative"] = named_array.NamedNumpyArray([ obs.observation.score.score, score_details.idle_production_time, score_details.idle_worker_time, score_details.total_value_units, score_details.total_value_structures, score_details.killed_value_units, score_details.killed_value_structures, score_details.collected_minerals, score_details.collected_vespene, score_details.collection_rate_minerals, score_details.collection_rate_vespene, score_details.spent_minerals, score_details.spent_vespene, ], names=ScoreCumulative, dtype=np.int32) def get_score_details(key, details, categories): row = getattr(details, key.name) return [getattr(row, category.name) for category in categories] out["score_by_category"] = named_array.NamedNumpyArray([ get_score_details(key, score_details, ScoreCategories) for key in ScoreByCategory ], names=[ScoreByCategory, ScoreCategories], dtype=np.int32) out["score_by_vital"] = named_array.NamedNumpyArray([ get_score_details(key, score_details, ScoreVitals) for key in ScoreByVital ], names=[ScoreByVital, ScoreVitals], dtype=np.int32) player = obs.observation.player_common out["player"] = named_array.NamedNumpyArray([ player.player_id, player.minerals, player.vespene, player.food_used, player.food_cap, player.food_army, player.food_workers, player.idle_worker_count, player.army_count, player.warp_gate_count, player.larva_count, ], names=Player, dtype=np.int32) def unit_vec(u): return np.array(( u.unit_type, u.player_relative, u.health, u.shields, u.energy, u.transport_slots_taken, int(u.build_progress * 100), ), dtype=np.int32) ui = obs.observation.ui_data with sw("ui"): groups = np.zeros((10, 2), dtype=np.int32) for g in ui.groups: groups[g.control_group_index, :] = (g.leader_unit_type, g.count) out["control_groups"] = groups if ui.single: out["single_select"] = named_array.NamedNumpyArray( [unit_vec(ui.single.unit)], [None, UnitLayer]) if ui.multi and ui.multi.units: out["multi_select"] = named_array.NamedNumpyArray( [unit_vec(u) for u in ui.multi.units], [None, UnitLayer]) if ui.cargo and ui.cargo.passengers: out["single_select"] = named_array.NamedNumpyArray( [unit_vec(ui.single.unit)], [None, UnitLayer]) out["cargo"] = named_array.NamedNumpyArray( [unit_vec(u) for u in ui.cargo.passengers], [None, UnitLayer]) out["cargo_slots_available"] = np.array([ui.cargo.slots_available], dtype=np.int32) if ui.production and ui.production.build_queue: out["single_select"] = named_array.NamedNumpyArray( [unit_vec(ui.production.unit)], [None, UnitLayer]) out["build_queue"] = named_array.NamedNumpyArray( [unit_vec(u) for u in ui.production.build_queue], [None, UnitLayer]) def full_unit_vec(u, pos_transform, is_raw=False): screen_pos = pos_transform.fwd_pt( point.Point.build(u.pos)) screen_radius = pos_transform.fwd_dist(u.radius) return np.array(( u.unit_type, u.alliance, u.health, u.shield, u.energy, u.cargo_space_taken, int(u.build_progress * 100), int(u.health / u.health_max * 255) if u.health_max > 0 else 0, int(u.shield / u.shield_max * 255) if u.shield_max > 0 else 0, int(u.energy / u.energy_max * 255) if u.energy_max > 0 else 0, u.display_type, u.owner, screen_pos.x, screen_pos.y, u.facing, screen_radius, u.cloak, u.is_selected, u.is_blip, u.is_powered, u.mineral_contents, u.vespene_contents, u.cargo_space_max, u.assigned_harvesters, u.ideal_harvesters, u.weapon_cooldown, len(u.orders), u.tag if is_raw else 0 ), dtype=np.int32) raw = obs.observation.raw_data if aif.use_feature_units: with sw("feature_units"): self._update_camera(point.Point.build(raw.player.camera)) feature_units = [] for u in raw.units: if u.is_on_screen and u.display_type != sc_raw.Hidden: feature_units.append( full_unit_vec(u, self._world_to_feature_screen_px)) out["feature_units"] = named_array.NamedNumpyArray( feature_units, [None, FeatureUnit], dtype=np.int32) if aif.use_raw_units: with sw("raw_units"): raw_units = [full_unit_vec(u, self._world_to_world_tl, is_raw=True) for u in raw.units] out["raw_units"] = named_array.NamedNumpyArray( raw_units, [None, FeatureUnit], dtype=np.int32) if aif.use_unit_counts: with sw("unit_counts"): unit_counts = collections.defaultdict(int) for u in raw.units: if u.alliance == sc_raw.Self: unit_counts[u.unit_type] += 1 out["unit_counts"] = named_array.NamedNumpyArray( sorted(unit_counts.items()), [None, UnitCounts], dtype=np.int32) if aif.use_camera_position: camera_position = self._world_to_world_tl.fwd_pt( point.Point.build(raw.player.camera)) out["camera_position"] = np.array((camera_position.x, camera_position.y), dtype=np.int32) out["available_actions"] = np.array(self.available_actions(obs.observation), dtype=np.int32) return out
Render some SC2 observations into something an agent can handle.
2,805
def _simplify_non_context_field_binary_composition(expression): if any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u u.format(expression)) if expression.operator == u: if expression.left == TrueLiteral or expression.right == TrueLiteral: return TrueLiteral else: return expression elif expression.operator == u: if expression.left == TrueLiteral: return expression.right if expression.right == TrueLiteral: return expression.left else: return expression else: return expression
Return a simplified BinaryComposition if either operand is a TrueLiteral. Args: expression: BinaryComposition without any ContextField operand(s) Returns: simplified expression if the given expression is a disjunction/conjunction and one of it's operands is a TrueLiteral, and the original expression otherwise
2,806
def __is_valid_type(self, typ, typlist): typ_is_str = typ == "string" str_list_in_typlist = "stringlist" in typlist return typ in typlist or (typ_is_str and str_list_in_typlist)
Check if type is valid based on input type list "string" is special because it can be used for stringlist :param typ: the type to check :param typlist: the list of type to check :return: True on success, False otherwise
2,807
def dump(deposition, from_date, with_json=True, latest_only=False, **kwargs): dep_json = json.dumps(deposition.__getstate__(), default=default_serializer) dep_dict = json.loads(dep_json) dep_dict[] = {} dep_dict[][] = deposition.id dep_dict[][] = dt2utc_timestamp(deposition.created) dep_dict[][] = dt2utc_timestamp(deposition.modified) dep_dict[][] = deposition.user_id dep_dict[][] = deposition.state dep_dict[][] = deposition.has_sip() dep_dict[][] = deposition.submitted return dep_dict
Dump the deposition object as dictionary.
2,808
def is_valid_short_number_for_region(short_numobj, region_dialing_from): if not _region_dialing_from_matches_number(short_numobj, region_dialing_from): return False metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: return False short_number = national_significant_number(short_numobj) general_desc = metadata.general_desc if not _matches_possible_number_and_national_number(short_number, general_desc): return False short_number_desc = metadata.short_code if short_number_desc.national_number_pattern is None: return False return _matches_possible_number_and_national_number(short_number, short_number_desc)
Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern
2,809
def create_project(self, name=None, project_id=None, path=None): if project_id is not None and project_id in self._projects: return self._projects[project_id] project = Project(name=name, project_id=project_id, path=path) self._check_available_disk_space(project) self._projects[project.id] = project return project
Create a project and keep a references to it in project manager. See documentation of Project for arguments
2,810
def get_vcs_details_output_vcs_details_principal_switch_wwn(self, **kwargs): config = ET.Element("config") get_vcs_details = ET.Element("get_vcs_details") config = get_vcs_details output = ET.SubElement(get_vcs_details, "output") vcs_details = ET.SubElement(output, "vcs-details") principal_switch_wwn = ET.SubElement(vcs_details, "principal-switch-wwn") principal_switch_wwn.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
2,811
def getPort(self): disp = self.pbmanager.dispatchers[self.portstr] return disp.port.getHost().port
Helper method for testing; returns the TCP port used for this registration, even if it was specified as 0 and thus allocated by the OS.
2,812
def _stdin_raw_block(self): try: data = sys.stdin.read(1) data = data.replace(, ) return data except WindowsError as we: if we.winerror == ERROR_NO_DATA: return None else: raise we
Use a blocking stdin read
2,813
def bovy_text(*args,**kwargs): if kwargs.pop(,False): pyplot.annotate(args[0],(0.5,1.05),xycoords=, horizontalalignment=, verticalalignment=,**kwargs) elif kwargs.pop(,False): pyplot.annotate(args[0],(0.05,0.05),xycoords=,**kwargs) elif kwargs.pop(,False): pyplot.annotate(args[0],(0.95,0.05),xycoords=, horizontalalignment=,**kwargs) elif kwargs.pop(,False): pyplot.annotate(args[0],(0.95,0.95),xycoords=, horizontalalignment=, verticalalignment=,**kwargs) elif kwargs.pop(,False): pyplot.annotate(args[0],(0.05,0.95),xycoords=, verticalalignment=,**kwargs) else: pyplot.text(*args,**kwargs)
NAME: bovy_text PURPOSE: thin wrapper around matplotlib's text and annotate use keywords: 'bottom_left=True' 'bottom_right=True' 'top_left=True' 'top_right=True' 'title=True' to place the text in one of the corners or use it as the title INPUT: see matplotlib's text (http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.text) OUTPUT: prints text on the current figure HISTORY: 2010-01-26 - Written - Bovy (NYU)
2,814
def generate_messages(outf, msgs): print("Generating Messages") t.write(outf, ) for msg in msgs: t.write(outf, , msg)
Generate Swift structs to represent all MAVLink messages
2,815
def _fix_component_id(self, component): theID = getattr(component, "id", None) if theID is not None: setattr(component, "id", self._fix_id(theID)) try: for c in component.children: self._fix_component_id(c) except: pass
Fix name of component ad all of its children
2,816
def get_interval_timedelta(self): now_datetime = timezone.now() current_month_days = monthrange(now_datetime.year, now_datetime.month)[1] if self.interval == reminders_choices.INTERVAL_2_WEEKS: interval_timedelta = datetime.timedelta(days=14) elif self.interval == reminders_choices.INTERVAL_ONE_MONTH: interval_timedelta = datetime.timedelta(days=current_month_days) elif self.interval == reminders_choices.INTERVAL_THREE_MONTHS: three_months = now_datetime + relativedelta(months=+3) interval_timedelta = three_months - now_datetime elif self.interval == reminders_choices.INTERVAL_SIX_MONTHS: six_months = now_datetime + relativedelta(months=+6) interval_timedelta = six_months - now_datetime elif self.interval == reminders_choices.INTERVAL_ONE_YEAR: one_year = now_datetime + relativedelta(years=+1) interval_timedelta = one_year - now_datetime return interval_timedelta
Spits out the timedelta in days.
2,817
def basic_qos(self, prefetch_size, prefetch_count, a_global): args = AMQPWriter() args.write_long(prefetch_size) args.write_short(prefetch_count) args.write_bit(a_global) self._send_method((60, 10), args) return self.wait(allowed_methods=[ (60, 11), ])
Specify quality of service This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. PARAMETERS: prefetch_size: long prefetch window in octets The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The prefetch-size is ignored if the no-ack option is set. RULE: The server MUST ignore this setting when the client is not processing any messages - i.e. the prefetch size does not limit the transfer of single messages to a client, only the sending in advance of more messages while the client still has one or more unacknowledged messages. prefetch_count: short prefetch window in messages Specifies a prefetch window in terms of whole messages. This field may be used in combination with the prefetch-size field; a message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the no-ack option is set. RULE: The server MAY send less data in advance than allowed by the client's specified prefetch windows but it MUST NOT send more. a_global: boolean apply to entire connection By default the QoS settings apply to the current channel only. If this field is set, they are applied to the entire connection.
2,818
def from_json(cls, data): required_keys = (, , , ) for key in required_keys: assert key in data, .format(key) if not in data: data[] = return cls(data[], data[], data[], data[])
Create a Sky Condition from a dictionary. Args: data = { "solar_model": string, "month": int, "day_of_month": int, "clearness": float, "daylight_savings_indicator": string // "Yes" or "No"}
2,819
def romanize(text: str, engine: str = "royin") -> str: if not text or not isinstance(text, str): return "" if engine == "thai2rom": from .thai2rom import romanize else: from .royin import romanize return romanize(text)
Rendering Thai words in the Latin alphabet or "romanization", using the Royal Thai General System of Transcription (RTGS), which is the official system published by the Royal Institute of Thailand. ถอดเสียงภาษาไทยเป็นอักษรละติน :param str text: Thai text to be romanized :param str engine: 'royin' (default) or 'thai2rom'. 'royin' uses the Royal Thai General System of Transcription issued by Royal Institute of Thailand. 'thai2rom' is deep learning Thai romanization (require keras). :return: A string of Thai words rendered in the Latin alphabet.
2,820
def deploy_api_gateway( self, api_id, stage_name, stage_description="", description="", cache_cluster_enabled=False, cache_cluster_size=, variables=None, cloudwatch_log_level=, cloudwatch_data_trace=False, cloudwatch_metrics_enabled=False, cache_cluster_ttl=300, cache_cluster_encrypted=False ): print("Deploying API Gateway..") self.apigateway_client.create_deployment( restApiId=api_id, stageName=stage_name, stageDescription=stage_description, description=description, cacheClusterEnabled=cache_cluster_enabled, cacheClusterSize=cache_cluster_size, variables=variables or {} ) if cloudwatch_log_level not in self.cloudwatch_log_levels: cloudwatch_log_level = self.apigateway_client.update_stage( restApiId=api_id, stageName=stage_name, patchOperations=[ self.get_patch_op(, cloudwatch_log_level), self.get_patch_op(, cloudwatch_data_trace), self.get_patch_op(, cloudwatch_metrics_enabled), self.get_patch_op(, str(cache_cluster_ttl)), self.get_patch_op(, cache_cluster_encrypted) ] ) return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
Deploy the API Gateway! Return the deployed API URL.
2,821
def guess_format( filename, ext, formats, io_table ): ok = False for format in formats: output( % format ) try: ok = io_table[format].guess( filename ) except AttributeError: pass if ok: break else: raise NotImplementedError( % ext) return format
Guess the format of filename, candidates are in formats.
2,822
def get_field_lookups(field_type, nullable): return LOOKUP_TABLE.get(field_type) + [] if nullable else LOOKUP_TABLE.get(field_type)
Return lookup table value and append isnull if this is a nullable field
2,823
def total(self): total = 0 for item in self.items.all(): total += item.total return total
Total cost of the order
2,824
def element(self, inp=None): if inp is not None: s = str(inp)[:self.length] s += * (self.length - len(s)) return s else: return * self.length
Return an element from ``inp`` or from scratch.
2,825
def comment(self, text, comment_prefix=): comment = Comment(self._container) if not text.startswith(comment_prefix): text = "{} {}".format(comment_prefix, text) if not text.endswith(): text = "{}{}".format(text, ) comment.add_line(text) self._container.structure.insert(self._idx, comment) self._idx += 1 return self
Creates a comment block Args: text (str): content of comment without # comment_prefix (str): character indicating start of comment Returns: self for chaining
2,826
def _add_default_exposure_class(layer): layer.startEditing() field = create_field_from_definition(exposure_class_field) layer.keywords[][exposure_class_field[]] = ( exposure_class_field[]) layer.addAttribute(field) index = layer.fields().lookupField(exposure_class_field[]) exposure = layer.keywords[] request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) for feature in layer.getFeatures(request): layer.changeAttributeValue(feature.id(), index, exposure) layer.commitChanges() return
The layer doesn't have an exposure class, we need to add it. :param layer: The vector layer. :type layer: QgsVectorLayer
2,827
def len_cdc_tube(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp, en_chem, KMinor): index = i_cdc(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, LenCDCTubeMax, temp, en_chem, KMinor) len_cdc_tube = (_length_cdc_tube_array(FlowPlant, ConcDoseMax, ConcStock, DiamTubeAvail, HeadlossCDC, temp, en_chem, KMinor))[index].magnitude return len_cdc_tube
The length of tubing may be longer than the max specified if the stock concentration is too high to give a viable solution with the specified length of tubing.
2,828
def current_version(): import setuptools version = [None] def monkey_setup(**settings): version[0] = settings[] old_setup = setuptools.setup setuptools.setup = monkey_setup import setup reload(setup) setuptools.setup = old_setup return version[0]
Get the current version number from setup.py
2,829
def from_json(json_data): return oauth
Returns a pyalveo.OAuth2 given a json string built from the oauth.to_json() method.
2,830
def headers(self): self.__dict__[] = hdict = HeaderDict() hdict.dict = self._headers return hdict
An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers.
2,831
def extract_first_jpeg_in_pdf(fstream): parser = PDFParser(fstream) if PY2: document = PDFDocument(parser) else: document = PDFDocument() parser.set_document(document) document.set_parser(parser) document.initialize() rsrcmgr = PDFResourceManager() device = PDFPageAggregator(rsrcmgr) interpreter = PDFPageInterpreter(rsrcmgr, device) pages = PDFPage.create_pages(document) if PY2 else document.get_pages() for page in pages: interpreter.process_page(page) layout = device.result for el in layout: if isinstance(el, LTFigure): for im in el: if isinstance(im, LTImage): st = None try: imdata = im.stream.get_data() except: return imdata return None
Reads a given PDF file and scans for the first valid embedded JPEG image. Returns either None (if none found) or a string of data for the image. There is no 100% guarantee for this code, yet it seems to work fine with most scanner-produced images around. More testing might be needed though. Note that in principle there is no serious problem extracting PNGs or other image types from PDFs, however at the moment I do not have enough test data to try this, and the one I have seems to be unsuitable for PDFMiner. :param fstream: Readable binary stream of the PDF :return: binary stream, containing the whole contents of the JPEG image or None if extraction failed.
2,832
def load(self, filename=None): if not filename: filename = self.default_config_file files = self._cfgs_to_read() files.insert(-1, filename) try: config = self.__read_cfg(files) except ReadConfigException as e: print(Config._format_msg(.format(file=filename, error=e))) else: self._conf_values = config if self.show_config_notification and not self.cfg_dir: print(Config._format_msg("no configuration directory set or usable." " Falling back to defaults."))
load runtime configuration from given filename. If filename is None try to read from default file from default location.
2,833
def _connect(self, config): if not in self._config: self._config[] = 480 try: self._cnx = connect(**config) self._cursor = self._cnx.cursor() self._printer(, config[]) except Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("Something is wrong with your user name or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("Database does not exist") raise err
Establish a connection with a MySQL database.
2,834
def copy_file( host, file_path, remote_path=, username=None, key_path=None, action= ): if not username: username = shakedown.cli.ssh_user if not key_path: key_path = shakedown.cli.ssh_key_file key = validate_key(key_path) transport = get_transport(host, username, key) transport = start_transport(transport, username, key) if transport.is_authenticated(): start = time.time() channel = scp.SCPClient(transport) if action == : print("\n{}scp {}:{} {}\n".format(shakedown.cli.helpers.fchr(), host, remote_path, file_path)) channel.get(remote_path, file_path) else: print("\n{}scp {} {}:{}\n".format(shakedown.cli.helpers.fchr(), file_path, host, remote_path)) channel.put(file_path, remote_path) print("{} bytes copied in {} seconds.".format(str(os.path.getsize(file_path)), str(round(time.time() - start, 2)))) try_close(channel) try_close(transport) return True else: print("error: unable to authenticate {}@{} with key {}".format(username, host, key_path)) return False
Copy a file via SCP, proxied through the mesos master :param host: host or IP of the machine to execute the command on :type host: str :param file_path: the local path to the file to be copied :type file_path: str :param remote_path: the remote path to copy the file to :type remote_path: str :param username: SSH username :type username: str :param key_path: path to the SSH private key to use for SSH authentication :type key_path: str :return: True if successful, False otherwise :rtype: bool
2,835
def picard_index_ref(picard, ref_file): dict_file = "%s.dict" % os.path.splitext(ref_file)[0] if not file_exists(dict_file): with file_transaction(picard._config, dict_file) as tx_dict_file: opts = [("REFERENCE", ref_file), ("OUTPUT", tx_dict_file)] picard.run("CreateSequenceDictionary", opts) return dict_file
Provide a Picard style dict index file for a reference genome.
2,836
def add_file_dep(self, doc, value): if self.has_package(doc) and self.has_file(doc): self.file(doc).add_depend(value) else: raise OrderError()
Raises OrderError if no package or file defined.
2,837
def _get_kvc(kv_arg): if isinstance(kv_arg, Mapping): return six.iterkeys(kv_arg), six.itervalues(kv_arg), len(kv_arg) assert 2 <= len(kv_arg) <= 3, \ return ( kv_arg[0], kv_arg[1], kv_arg[2] if len(kv_arg) == 3 else len(kv_arg[0]))
Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.
2,838
def check_entry_points(dist, attr, value): try: pkg_resources.EntryPoint.parse_map(value) except ValueError, e: raise DistutilsSetupError(e)
Verify that entry_points map is parseable
2,839
def get_repository_state(self, relaPath=None): state = [] def _walk_dir(relaPath, dirList): dirDict = {:, :os.path.isdir(os.path.join(self.__path,relaPath)), :os.path.isfile(os.path.join(self.__path,relaPath,self.__dirInfo)), } state.append({relaPath:dirDict}) for fname in sorted([f for f in dirList if isinstance(f, basestring)]): relaFilePath = os.path.join(relaPath,fname) realFilePath = os.path.join(self.__path,relaFilePath) fileDict = {:, :os.path.isfile(realFilePath), :os.path.isfile(os.path.join(self.__path,relaPath,self.__fileInfo%fname)), } state.append({relaFilePath:fileDict}) for ddict in sorted([d for d in dirList if isinstance(d, dict)], key=lambda k: list(k)[0]): dirname = list(ddict)[0] _walk_dir(relaPath=os.path.join(relaPath,dirname), dirList=ddict[dirname]) if relaPath is None: _walk_dir(relaPath=, dirList=self.__repo[]) else: assert isinstance(relaPath, basestring), "relaPath must be None or a str" relaPath = self.to_repo_relative_path(path=relaPath, split=False) spath = relaPath.split(os.sep) dirList = self.__repo[] while len(spath): dirname = spath.pop(0) dList = [d for d in dirList if isinstance(d, dict)] if not len(dList): dirList = None break cDict = [d for d in dList if dirname in d] if not len(cDict): dirList = None break dirList = cDict[0][dirname] if dirList is not None: _walk_dir(relaPath=relaPath, dirList=dirList) return state
Get a list representation of repository state along with useful information. List state is ordered relativeley to directories level :Parameters: #. relaPath (None, str): relative directory path from where to start. If None all repository representation is returned. :Returns: #. state (list): List representation of the repository. List items are all dictionaries. Every dictionary has a single key which is the file or the directory name and the value is a dictionary of information including: * 'type': the type of the tracked whether it's file, dir, or objectdir * 'exists': whether file or directory actually exists on disk * 'pyrepfileinfo': In case of a file or an objectdir whether .%s_pyrepfileinfo exists * 'pyrepdirinfo': In case of a directory whether .pyrepdirinfo exists
2,840
def toDict(self): result = _Base.toDict(self) result[] = self.score.score return result
Get information about the HSP as a dictionary. @return: A C{dict} representation of the HSP.
2,841
def get_template(self, project, template_id): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if template_id is not None: route_values[] = self._serialize.url(, template_id, ) response = self._send(http_method=, location_id=, version=, route_values=route_values) return self._deserialize(, response)
GetTemplate. Gets a specific build definition template. :param str project: Project ID or project name :param str template_id: The ID of the requested template. :rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_0.build.models.BuildDefinitionTemplate>`
2,842
def add_source(zone, source, permanent=True): * if source in get_sources(zone, permanent): log.info() cmd = .format(zone, source) if permanent: cmd += return __firewall_cmd(cmd)
Bind a source to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_source zone 192.168.1.0/24
2,843
def delete(self, block_type, block_num): logger.info("deleting block") blocktype = snap7.snap7types.block_types[block_type] result = self.library.Cli_Delete(self.pointer, blocktype, block_num) return result
Deletes a block :param block_type: Type of block :param block_num: Bloc number
2,844
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped): worker.error_message_pubsub_client = worker.redis_client.pubsub( ignore_subscribe_messages=True) error_pubsub_channel = str( ray.gcs_utils.TablePubsub.ERROR_INFO).encode("ascii") worker.error_message_pubsub_client.subscribe(error_pubsub_channel) try: error_messages = global_state.error_messages(worker.task_driver_id) for error_message in error_messages: logger.error(error_message) while True: if threads_stopped.is_set(): return msg = worker.error_message_pubsub_client.get_message() if msg is None: threads_stopped.wait(timeout=0.01) continue gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry( msg["data"], 0) assert gcs_entry.EntriesLength() == 1 error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData( gcs_entry.Entries(0), 0) driver_id = error_data.DriverId() if driver_id not in [ worker.task_driver_id.binary(), DriverID.nil().binary() ]: continue error_message = ray.utils.decode(error_data.ErrorMessage()) if (ray.utils.decode( error_data.Type()) == ray_constants.TASK_PUSH_ERROR): task_error_queue.put((error_message, time.time())) else: logger.error(error_message) finally: worker.error_message_pubsub_client.close()
Listen to error messages in the background on the driver. This runs in a separate thread on the driver and pushes (error, time) tuples to the output queue. Args: worker: The worker class that this thread belongs to. task_error_queue (queue.Queue): A queue used to communicate with the thread that prints the errors found by this thread. threads_stopped (threading.Event): A threading event used to signal to the thread that it should exit.
2,845
def instance(cls): if not hasattr(cls, "_instance") or cls._instance is None: cls._instance = cls() return cls._instance
Singleton to return only one instance of BaseManager. :returns: instance of BaseManager
2,846
def stats(self): load_count = self.last_level_load.MISS_count load_byte = self.last_level_load.MISS_byte if self.last_level_load.victims_to is not None: load_count -= self.last_level_load.victims_to.HIT_count load_byte -= self.last_level_load.victims_to.HIT_byte return {: self.name, : load_count, : load_byte, : load_count, : load_byte, : self.last_level_store.EVICT_count, : self.last_level_store.EVICT_byte, : 0, : 0, : 0, : 0}
Return dictionay with all stats at this level.
2,847
def inspect(self): last_attempt = self.get_last_failed_access_attempt( ip_address=self.ip, captcha_enabled=True, captcha_passed=False, is_expired=False ) if last_attempt is None and not self.request.user.is_authenticated(): user_access = self._FailedAccessAttemptModel( ip_address=self.ip, username=self.username, captcha_enabled=True, captcha_passed=False, is_expired=False ) elif last_attempt: user_access = last_attempt if self.request.method == : if not self.request.user.is_authenticated(): user_access.user_agent = self.request.META.get(, )[:255] user_access.username = self.username user_access.failed_attempts += 1 user_access.params_get = self.request.GET user_access.params_post = self.request.POST if user_access.failed_attempts >= self.max_failed_attempts: user_access.is_locked = True user_access.save() elif self.request.user.is_authenticated() and last_attempt: last_attempt.is_expired = True last_attempt.save()
Inspect access attempt, used for catpcha flow :return:
2,848
def structure(self, obj, cl): return self._structure_func.dispatch(cl)(obj, cl)
Convert unstructured Python data structures to structured data.
2,849
def _ProcessGrepSource(self, source): attributes = source.base_source.attributes paths = artifact_utils.InterpolateListKbAttributes( attributes["paths"], self.knowledge_base, self.ignore_interpolation_errors) regex = utils.RegexListDisjunction(attributes["content_regex_list"]) condition = rdf_file_finder.FileFinderCondition.ContentsRegexMatch( regex=regex, mode="ALL_HITS") file_finder_action = rdf_file_finder.FileFinderAction.Stat() request = rdf_file_finder.FileFinderArgs( paths=paths, action=file_finder_action, conditions=[condition], follow_links=True) action = file_finder.FileFinderOSFromClient yield action, request
Find files fulfilling regex conditions.
2,850
def bll_version(self): if not self.started(): return None status, data = self._rest.get_request(, , [, ]) return data[]
Get the BLL version this session is connected to. Return: Version string if session started. None if session not started.
2,851
def get_serializer(name): try: log.debug(, name) return SERIALIZER_LOOKUP[name] except KeyError: msg = .format(name) log.error(msg, exc_info=True) raise InvalidSerializerException(msg)
Return the serialize function.
2,852
def generate_changelog(from_version: str, to_version: str = None) -> dict: debug(.format(from_version, to_version)) changes: dict = {: [], : [], : [], : [], : []} found_the_release = to_version is None rev = None if from_version: rev = .format(from_version) for _hash, commit_message in get_commit_log(rev): if not found_the_release: if to_version and to_version not in commit_message: continue else: found_the_release = True if from_version is not None and from_version in commit_message: break try: message = current_commit_parser()(commit_message) if message[1] not in changes: continue changes[message[1]].append((_hash, message[3][0])) if message[3][1] and in message[3][1]: parts = re_breaking.match(message[3][1]) if parts: changes[].append(parts.group(1)) if message[3][2] and in message[3][2]: parts = re_breaking.match(message[3][2]) if parts: changes[].append(parts.group(1)) except UnknownCommitMessageStyleError as err: debug(, err) pass return changes
Generates a changelog for the given version. :param from_version: The last version not in the changelog. The changelog will be generated from the commit after this one. :param to_version: The last version in the changelog. :return: a dict with different changelog sections
2,853
def reverse_taskname(name: str) -> str: components = name.split() assert len(components) <= 3 return .join(components[::-1])
Reverses components in the name of task. Reversed convention is used for filenames since it groups log/scratch files of related tasks together 0.somejob.somerun -> somerun.somejob.0 0.somejob -> somejob.0 somename -> somename Args: name: name of task
2,854
def _validate_columns(self): geom_cols = {, , } col_overlap = set(self.style_cols) & geom_cols if col_overlap: raise ValueError( .format( col=.join(col_overlap)))
Validate the options in the styles
2,855
def nested_assign(self, key_list, value): if len(key_list) == 1: self[key_list[0]] = value elif len(key_list) > 1: if key_list[0] not in self: self[key_list[0]] = LIVVDict() self[key_list[0]].nested_assign(key_list[1:], value)
Set the value of nested LIVVDicts given a list
2,856
def _prepare_text(self, text): text = re.sub(r, , text) pattern = r.format(self._tokenizer.pattern) return re.sub(pattern, self._base_token_markup, text)
Returns `text` with each consituent token wrapped in HTML markup for later match annotation. :param text: text to be marked up :type text: `str` :rtype: `str`
2,857
def get_diff(self, rev1, rev2, path=, ignore_whitespace=False, context=3): if hasattr(rev1, ): rev1 = getattr(rev1, ) if hasattr(rev2, ): rev2 = getattr(rev2, ) if rev1 != self.EMPTY_CHANGESET: self.get_changeset(rev1) self.get_changeset(rev2) if path: file_filter = match(self.path, , [path]) else: file_filter = None return .join(patch.diff(self._repo, rev1, rev2, match=file_filter, opts=diffopts(git=True, ignorews=ignore_whitespace, context=context)))
Returns (git like) *diff*, as plain text. Shows changes introduced by ``rev2`` since ``rev1``. :param rev1: Entry point from which diff is shown. Can be ``self.EMPTY_CHANGESET`` - in this case, patch showing all the changes since empty state of the repository until ``rev2`` :param rev2: Until which revision changes should be shown. :param ignore_whitespace: If set to ``True``, would not show whitespace changes. Defaults to ``False``. :param context: How many lines before/after changed lines should be shown. Defaults to ``3``.
2,858
async def deserialize(data: dict): return await Connection._deserialize("vcx_connection_deserialize", json.dumps(data), data.get())
Create the object from a previously serialized object. :param data: The output of the "serialize" call Example: data = await connection1.serialize() connection2 = await Connection.deserialize(data) :return: A re-instantiated object
2,859
def issuer_cert_urls(self): if self._issuer_cert_urls is None: self._issuer_cert_urls = [] if self.authority_information_access_value: for entry in self.authority_information_access_value: if entry[].native == : location = entry[] if location.name != : continue url = location.native if url.lower()[0:7] == : self._issuer_cert_urls.append(url) return self._issuer_cert_urls
:return: A list of unicode strings that are URLs that should contain either an individual DER-encoded X.509 certificate, or a DER-encoded CMS message containing multiple certificates
2,860
def create(self, **kwargs): obj = self.model(**kwargs) meta = obj.get_meta() meta.connection = get_es_connection(self.es_url, self.es_kwargs) meta.index=self.index meta.type=self.type obj.save(force=True) return obj
Creates a new object with the given kwargs, saving it to the database and returning the created object.
2,861
def validate(self, value): if value is None: return True else: try: with value.open() as hdulist: self.validate_hdulist(hdulist) except Exception: _type, exc, tb = sys.exc_info() six.reraise(ValidationError, exc, tb)
validate
2,862
def custom(command, user=None, conf_file=None, bin_env=None): **gunicorn* ret = __salt__[]( _ctl_cmd(command, None, conf_file, bin_env), runas=user, python_shell=False, ) return _get_return(ret)
Run any custom supervisord command user user to run supervisorctl as conf_file path to supervisord config file bin_env path to supervisorctl bin or path to virtualenv with supervisor installed CLI Example: .. code-block:: bash salt '*' supervisord.custom "mstop '*gunicorn*'"
2,863
def send_message(message, params, site, logger): client.capture( , message=message, params=tuple(params), data={ : site, : logger, }, )
Send a message to the Sentry server
2,864
async def _start(self): self.agent._alive.wait() try: await self.on_start() except Exception as e: logger.error("Exception running on_start in behaviour {}: {}".format(self, e)) self.kill(exit_code=e) await self._step() self._is_done.clear()
Start coroutine. runs on_start coroutine and then runs the _step coroutine where the body of the behaviour is called.
2,865
def p_out(p): p[0] = make_sentence(, make_typecast(TYPE.uinteger, p[2], p.lineno(3)), make_typecast(TYPE.ubyte, p[4], p.lineno(4)))
statement : OUT expr COMMA expr
2,866
def update_rho(self, k, r, s): if self.opt[, ]: tau = self.rho_tau mu = self.rho_mu xi = self.rho_xi if k != 0 and np.mod(k + 1, self.opt[, ]) == 0: if self.opt[, ]: if s == 0.0 or r == 0.0: rhomlt = tau else: rhomlt = np.sqrt(r / (s * xi) if r > s * xi else (s * xi) / r) if rhomlt > tau: rhomlt = tau else: rhomlt = tau rsf = 1.0 if r > xi * mu * s: rsf = rhomlt elif s > (mu / xi) * r: rsf = 1.0 / rhomlt self.rho *= self.dtype.type(rsf) self.U /= rsf if rsf != 1.0: self.rhochange()
Automatic rho adjustment.
2,867
def database_renderer(self, name=None, site=None, role=None): name = name or self.env.default_db_name site = site or self.genv.SITE role = role or self.genv.ROLE key = (name, site, role) self.vprint(, key) if key not in self._database_renderers: self.vprint() if self.verbose: print(, name) print(, self.env.databases) print( % name, self.env.databases.get(name)) d = type(self.genv)(self.lenv) d.update(self.get_database_defaults()) d.update(self.env.databases.get(name, {})) d[] = name if self.verbose: print() pprint(d, indent=4) print(, d.connection_handler) if d.connection_handler == CONNECTION_HANDLER_DJANGO: self.vprint() dj = self.get_satchel() if self.verbose: print(.format(site, role), file=sys.stderr) dj.set_db(name=name, site=site, role=role) _d = dj.local_renderer.collect_genv(include_local=True, include_global=False) for k, v in _d.items(): if k.startswith(): _d[k[3:]] = v del _d[k] if self.verbose: print() pprint(_d) d.update(_d) elif d.connection_handler and d.connection_handler.startswith(CONNECTION_HANDLER_CUSTOM+): _callable_str = d.connection_handler[len(CONNECTION_HANDLER_CUSTOM+):] self.vprint( % _callable_str) _d = str_to_callable(_callable_str)(role=self.genv.ROLE) if self.verbose: print() pprint(_d) d.update(_d) r = LocalRenderer(self, lenv=d) self.set_root_login(r) self._database_renderers[key] = r else: self.vprint() return self._database_renderers[key]
Renders local settings for a specific database.
2,868
def create_event(component, tz=UTC): event = Event() event.start = normalize(component.get().dt, tz=tz) if component.get(): event.end = normalize(component.get().dt, tz=tz) elif component.get(): event.end = event.start + component.get().dt else: event.end = event.start try: event.summary = str(component.get()) except UnicodeEncodeError as e: event.summary = str(component.get().encode()) try: event.description = str(component.get()) except UnicodeEncodeError as e: event.description = str(component.get().encode()) event.all_day = type(component.get().dt) is date if component.get(): event.recurring = True try: event.location = str(component.get()) except UnicodeEncodeError as e: event.location = str(component.get().encode()) if component.get(): event.attendee = component.get() if type(event.attendee) is list: temp = [] for a in event.attendee: temp.append(a.encode().decode()) event.attendee = temp else: event.attendee = event.attendee.encode().decode() if component.get(): event.uid = component.get().encode().decode() if component.get(): event.organizer = component.get().encode().decode() return event
Create an event from its iCal representation. :param component: iCal component :param tz: timezone for start and end times :return: event
2,869
def N_to_Ntriangles(N): theta = np.array([np.pi/2*(k-0.5)/N for k in range(1, N+1)]) phi = np.array([[np.pi*(l-0.5)/Mk for l in range(1, Mk+1)] for Mk in np.array(1 + 1.3*N*np.sin(theta), dtype=int)]) Ntri = 2*np.array([len(p) for p in phi]).sum() return Ntri
@N: WD style gridsize Converts WD style grid size @N to the number of triangles on the surface. Returns: number of triangles.
2,870
def list_storages(self):
Returns a list of existing stores. The returned names can then be used to call get_storage().
2,871
def write_matrix_to_csv(self, headers, data): with open(self.path, "w") as out_file: data_writer = csv.writer(out_file, delimiter=",") data_writer.writerow(headers) data_writer.writerows(data)
Saves .csv file with data :param headers: column names :param data: Data
2,872
def pybel_to_json(molecule, name=None): atoms = [{: table.GetSymbol(atom.atomicnum), : list(atom.coords)} for atom in molecule.atoms] for json_atom, pybel_atom in zip(atoms, molecule.atoms): if pybel_atom.partialcharge != 0: json_atom[] = pybel_atom.partialcharge if pybel_atom.OBAtom.HasData(): obatom = pybel_atom.OBAtom json_atom[] = obatom.GetData().GetValue() if pybel_atom.OBAtom.HasData(): obatom = pybel_atom.OBAtom json_atom[] = obatom.GetData().GetValue() bonds = [{: [b.GetBeginAtom().GetIndex(), b.GetEndAtom().GetIndex()], : b.GetBondOrder()} for b in ob.OBMolBondIter(molecule.OBMol)] output = {: atoms, : bonds, : {}} div = (reduce(gcd, (c[1] for c in hill_count)) if hasattr(molecule, ) else 1) output[] = .join(n if c / div == 1 else % (n, c / div) for n, c in hill_count) output[] = molecule.molwt / div output[][] = if name: output[] = name return output
Converts a pybel molecule to json. Args: molecule: An instance of `pybel.Molecule` name: (Optional) If specified, will save a "name" property Returns: A Python dictionary containing atom and bond data
2,873
def start(self): t work' thread = threading.Thread(target=reactor.run) thread.start()
doesn't work
2,874
def solve(self): aLvl,trash = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() if self.vFuncBool: self.makeEndOfPrdvFunc(EndOfPrdvP) if self.CubicBool: interpolator = self.makeCubicxFunc else: interpolator = self.makeLinearxFunc solution = self.makeBasicSolution(EndOfPrdvP,aLvl,interpolator) solution = self.addMPCandHumanWealth(solution) if self.CubicBool: solution = self.addvPPfunc(solution) return solution
Solves a one period consumption saving problem with risky income and shocks to medical need. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function, medical spending function ( both defined over market re- sources, permanent income, and medical shock), a marginal value func- tion (defined over market resources and permanent income), and human wealth as a function of permanent income.
2,875
def removeStages(self, personID): while self.getRemainingStages(personID) > 1: self.removeStage(personID, 1) self.removeStage(personID, 0)
remove(string) Removes all stages of the person. If no new phases are appended, the person will be removed from the simulation in the next simulationStep().
2,876
def ReportConfiguration(self, f): if BoundaryCheck.chrom != -1: print >> f, BuildReportLine("CHROM", BoundaryCheck.chrom) if len(self.start_bounds) > 0: bounds = ",".join(["%s-%s" % (a[0], a[1]) for a in zip(self.start_bounds, self.end_bounds)]) print >> f, BuildReportLine("SNP BOUNDARY", bounds) if len(self.ignored_rs) > 0: print >> f, BuildReportLine("IGNORED RS", ",".join(self.ignored_rs)) if len(self.target_rs) > 0: print >> f, BuildReportLine("TARGET RS", ",".join(self.target_rs))
Report the boundary configuration details :param f: File (or standard out/err) :return: None
2,877
def getCSD (lfps,sampr,minf=0.05,maxf=300,norm=True,vaknin=False,spacing=1.0): datband = getbandpass(lfps,sampr,minf,maxf) if datband.shape[0] > datband.shape[1]: ax = 1 else: ax = 0 if vaknin: datband = Vaknin(datband) if norm: removemean(datband,ax=ax) CSD = -numpy.diff(datband,n=2,axis=ax) / spacing**2 return CSD
get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns
2,878
def get_ngroups(self, field=None): field = field if field else self._determine_group_field(field) if in self.data[][field]: return self.data[][field][] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for.
2,879
def _GetComplexConjugateArray(Array): ConjArray = _np.array([num.conj() for num in Array]) return ConjArray
Calculates the complex conjugate of each element in an array and returns the resulting array. Parameters ---------- Array : ndarray Input array Returns ------- ConjArray : ndarray The complex conjugate of the input array.
2,880
def generate_random_schema(valid): schema_type = choice([, ]) if schema_type == : type, gen = generate_random_type(valid) value = next(gen) return value, (value if valid else None for i in itertools.count()) elif schema_type == : return generate_random_type(valid) else: raise AssertionError()
Generate a random plain schema, and a sample generation function. :param valid: Generate valid samples? :type valid: bool :returns: schema, sample-generator :rtype: *, generator
2,881
def dt_day(x): import pandas as pd return pd.Series(x).dt.day.values
Extracts the day from a datetime sample. :returns: an expression containing the day extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.day Expression = dt_day(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 12 1 11 2 12
2,882
def save_file(self, path=None, filters=, force_extension=None, force_overwrite=False, header_only=False, delimiter=, binary=None): if not _os.path.splitext(path)[-1][1:] == force_extension: path = path + + force_extension self.path=path if _os.path.exists(path) and not force_overwrite: _os.rename(path,path+".backup") if delimiter == "use current": if self.delimiter is None: delimiter = "\t" else: delimiter = self.delimiter temporary_path = _os.path.join(_s.settings.path_home, "temp-"+str(int(1e3*_time.time()))++str(int(1e9*_n.random.rand(1)))) f = open(temporary_path, ) binary = self.pop_header() if not header_only: if binary in [None, , False, ]: elements = [] for ckey in self.ckeys: elements.append(str(ckey).replace(delimiter,)) f.write(delimiter.join(elements) + "\n") for n in range(0, len(self[0])): elements = [] for m in range(0, len(self.ckeys)): if n < len(self[m]): elements.append(str(self[m][n])) else: elements.append() f.write(delimiter.join(elements) + "\n") else: f.write() for n in range(len(self.ckeys)): data_string = _n.array(self[n]).astype(binary).tostring() f.write(str(self.ckeys[n]).replace(delimiter,) + delimiter + str(len(self[n])) + ) f.close() f = open(temporary_path, ) f.write(data_string) f.close() f = open(temporary_path, ) f.write() f.close() _shutil.move(temporary_path, path) return self
This will save all the header info and columns to an ascii file with the specified path. Parameters ---------- path=None Path for saving the data. If None, this will bring up a save file dialog. filters='*.dat' File filter for the file dialog (for path=None) force_extension=None If set to a string, e.g., 'txt', it will enforce that the chosen filename will have this extension. force_overwrite=False Normally, if the file * exists, this will copy that to *.backup. If the backup already exists, this function will abort. Setting this to True will force overwriting the backup file. header_only=False Only output the header? delimiter='use current' This will set the delimiter of the output file 'use current' means use self.delimiter binary=None Set to one of the allowed numpy dtypes, e.g., float32, float64, complex64, int32, etc. Setting binary=True defaults to float64. Note if the header contains the key SPINMOB_BINARY and binary=None, it will save as binary using the header specification.
2,883
def parse_expr(e): m = relation_re.match(e) if m is None: raise ValueError("error parsing expression ".format(e)) field, op, val = m.groups() try: val_int = int(val) val = val_int except ValueError: try: val_float = float(val) val = val_float except ValueError: try: val = {: True, : False}[val.lower()] except KeyError: if re.match(r.*\, val): val = val[1:-1] return field, op, val
Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple
2,884
def pool(args): from jcvi.formats.base import longest_unique_prefix p = OptionParser(pool.__doc__) p.add_option("--sep", default=".", help="Separator between prefix and name") p.add_option("--sequential", default=False, action="store_true", help="Add sequential IDs") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) for fastafile in args: pf = longest_unique_prefix(fastafile, args) print(fastafile, "=>", pf, file=sys.stderr) prefixopt = "--prefix={0}{1}".format(pf, opts.sep) format_args = [fastafile, "stdout", prefixopt] if opts.sequential: format_args += ["--sequential=replace"] format(format_args)
%prog pool fastafiles > pool.fasta Pool a bunch of FASTA files, and add prefix to each record based on filenames. File names are simplified to longest unique prefix to avoid collisions after getting shortened.
2,885
def _set_client_pw(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=client_pw.client_pw, is_container=, presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__client_pw = t if hasattr(self, ): self._set()
Setter method for client_pw, mapped from YANG variable /cluster/client_pw (container) If this variable is read-only (config: false) in the source YANG file, then _set_client_pw is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_client_pw() directly. YANG Description: Client Pseudo Wire
2,886
def load_requires_from_file(filepath): with open(filepath) as fp: return [pkg_name.strip() for pkg_name in fp.readlines()]
Read a package list from a given file path. Args: filepath: file path of the package list. Returns: a list of package names.
2,887
def insert_completions(self, e): u completions = self._get_completions() b = self.begidx e = self.endidx for comp in completions: rep = [ c for c in comp ] rep.append() self.l_buffer[b:e] = rep b += len(rep) e = b self.line_cursor = b self.finalize()
u"""Insert all completions of the text before point that would have been generated by possible-completions.
2,888
def addStream(self, stream, interpolator="closest", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None): streamquery = query_maker(t1, t2, limit, i1, i2, transform) param_stream(self.cdb, streamquery, stream) streamquery["interpolator"] = interpolator if colname is None: if isinstance(stream, six.string_types): colname = stream elif isinstance(stream, Stream): colname = stream.path else: raise Exception( "Could not find a name for the column! use the parameter.") if colname in self.query["dataset"] or colname is "x": raise Exception( "The column name either exists, or is labeled . Use the colname parameter to change the column name.") self.query["dataset"][colname] = streamquery
Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name for the column in the returned dataset. If no column name is given, the full stream path will be used. addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column:: d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.) d.addStream("temperature","average") d.addStream("steps","sum") m = Merge(cdb) m.addStream("mystream") m.addStream("mystream2") d.addStream(m,colname="mycolumn") result = d.run()
2,889
def _check_file(parameters): (filename, args) = parameters if filename == : contents = sys.stdin.read() else: with contextlib.closing( docutils.io.FileInput(source_path=filename) ) as input_file: contents = input_file.read() args = load_configuration_from_file( os.path.dirname(os.path.realpath(filename)), args) ignore_directives_and_roles(args.ignore_directives, args.ignore_roles) for substitution in args.ignore_substitutions: contents = contents.replace(.format(substitution), ) ignore = { : args.ignore_language, : args.ignore_messages, } all_errors = [] for error in check(contents, filename=filename, report_level=args.report, ignore=ignore, debug=args.debug): all_errors.append(error) return (filename, all_errors)
Return list of errors.
2,890
def parse_complex_fault_node(node, mfd_spacing=0.1, mesh_spacing=4.0): assert "complexFaultSource" in node.tag sf_taglist = get_taglist(node) sf_id, name, trt = (node.attrib["id"], node.attrib["name"], node.attrib["tectonicRegion"]) edges = node_to_complex_fault_geometry( node.nodes[sf_taglist.index("complexFaultGeometry")]) msr = node_to_scalerel(node.nodes[sf_taglist.index("magScaleRel")]) aspect = float_(node.nodes[sf_taglist.index("ruptAspectRatio")].text) mfd = node_to_mfd(node, sf_taglist) rake = float_(node.nodes[sf_taglist.index("rake")].text) complex_fault = mtkComplexFaultSource(sf_id, name, trt, geometry=None, mag_scale_rel=msr, rupt_aspect_ratio=aspect, mfd=mfd, rake=rake) complex_fault.create_geometry(edges, mesh_spacing) return complex_fault
Parses a "complexFaultSource" node and returns an instance of the :class: openquake.hmtk.sources.complex_fault.mtkComplexFaultSource
2,891
def from_dict(self, d): if in d: if d[]: self._uid = d[] if in d: if d[]: self._name = d[] if in d: if isinstance(d[], str) or isinstance(d[], unicode): if d[] in states._stage_state_values.keys(): self._state = d[] else: raise ValueError(obj=self._uid, attribute=, expected_value=states._stage_state_values.keys(), actual_value=value) else: raise TypeError(entity=, expected_type=str, actual_type=type(d[])) else: self._state = states.INITIAL if in d: if isinstance(d[], list): self._state_history = d[] else: raise TypeError(entity=, expected_type=list, actual_type=type(d[])) if in d: if isinstance(d[], dict): self._p_pipeline = d[] else: raise TypeError(entity=, expected_type=dict, actual_type=type(d[]))
Create a Stage from a dictionary. The change is in inplace. :argument: python dictionary :return: None
2,892
def gaussian_kernel(data_shape, sigma, norm=): r if not import_astropy: raise ImportError() if norm not in (, , ): raise ValueError() kernel = np.array(Gaussian2DKernel(sigma, x_size=data_shape[1], y_size=data_shape[0])) if norm == : return kernel / np.max(kernel) elif norm == : return kernel / np.sum(kernel) elif norm == : return kernel
r"""Gaussian kernel This method produces a Gaussian kerenal of a specified size and dispersion Parameters ---------- data_shape : tuple Desiered shape of the kernel sigma : float Standard deviation of the kernel norm : str {'max', 'sum', 'none'}, optional Normalisation of the kerenl (options are 'max', 'sum' or 'none') Returns ------- np.ndarray kernel Examples -------- >>> from modopt.math.stats import gaussian_kernel >>> gaussian_kernel((3, 3), 1) array([[ 0.36787944, 0.60653066, 0.36787944], [ 0.60653066, 1. , 0.60653066], [ 0.36787944, 0.60653066, 0.36787944]]) >>> gaussian_kernel((3, 3), 1, norm='sum') array([[ 0.07511361, 0.1238414 , 0.07511361], [ 0.1238414 , 0.20417996, 0.1238414 ], [ 0.07511361, 0.1238414 , 0.07511361]])
2,893
def _get_switchports(profile): switchports = [] if profile.get(): for link in profile[]: if in link and in link: switch = link[] interface = link[] switchports.append((switch, interface)) else: LOG.warning("Incomplete link information: %s", link) return switchports
Return list of (switch_ip, interface) tuples from local_link_info
2,894
def write_results(filename,config,srcfile,samples): results = createResults(config,srcfile,samples=samples) results.write(filename)
Package everything nicely
2,895
def ustep(self): super(ConvCnstrMODMaskDcpl_Consensus, self).ustep() self.U1 += self.AX1 - self.Y1 - self.S
The parent class ystep method is overridden to allow also performing the ystep for the additional variables introduced in the modification to the baseline algorithm.
2,896
def enqueue_task(self, source, *args): yield from self.cell.coord.enqueue(self) route = Route(source, self.cell, self.spec, self.emit) self.cell.loop.create_task(self.coord_wrap(route, *args)) yield
Enqueue a task execution. It will run in the background as soon as the coordinator clears it to do so.
2,897
def _call(self, x, out=None): if out is None: return self.range.element(copy(self.constant)) else: out.assign(self.constant)
Return the constant vector or assign it to ``out``.
2,898
def trigger_audited(self, id, rev, **kwargs): kwargs[] = True if kwargs.get(): return self.trigger_audited_with_http_info(id, rev, **kwargs) else: (data) = self.trigger_audited_with_http_info(id, rev, **kwargs) return data
Triggers a build of a specific Build Configuration in a specific revision This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.trigger_audited(id, rev, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration id (required) :param int rev: Revision of a Build Configuration (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool build_dependencies: Should we build also dependencies of this BuildConfiguration? :param bool keep_pod_on_failure: Should we keep the build container running, if the build fails? :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param str rebuild_mode: Rebuild Modes: FORCE: always rebuild the configuration; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildRecordSingleton If the method is called asynchronously, returns the request thread.
2,899
def print_head(self, parent_plate_value, plate_values, interval, n=10, print_func=logging.info): if isinstance(plate_values, Plate): self.print_head(parent_plate_value, plate_values.values, interval, n, print_func) return if len(plate_values) == 1 and len(plate_values[0]) == 2 and isinstance(plate_values[0][0], str): self.print_head(parent_plate_value, (plate_values,), interval, n, print_func) return found = False for plate_value in plate_values: combined_plate_value = Plate.combine_values(parent_plate_value, plate_value) if combined_plate_value not in self._streams: continue found = True print_func("Plate value: {}".format(combined_plate_value)) data = False for k, v in self._streams[combined_plate_value].window(interval).head(n): data = True print_func("{}, {}".format(k, v)) if not data: print_func("No data") print_func("") if not found: print_func("No streams found for the given plate values")
Print the first n values from the streams in the given time interval. The parent plate value is the value of the parent plate, and then the plate values are the values for the plate that are to be printed. e.g. print_head(None, ("house", "1")) :param parent_plate_value: The (fixed) parent plate value :param plate_values: The plate values over which to loop :param interval: The time interval :param n: The maximum number of elements to print :param print_func: The function used for printing (e.g. logging.info() or print()) :return: None