code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if (phone is None) and (email is None): raise ParamsError() if password is None: raise ParamsError() r = NCloudBot() # r.username = phone or email md5 = hashlib.md5() md5.update(password) password = md5.hexdigest() print password r.data = {'password': password, 'rememberLogin': rememberLogin} if phone is not None: r.data['phone'] = phone r.method = 'LOGIN' else: r.data['username'] = email r.method = 'EMAIL_LOGIN' r.send() return r.response
def login(password, phone=None, email=None, rememberLogin=True)
登录接口,返回 :class:'Response' 对象 :param password: 网易云音乐的密码 :param phone: (optional) 手机登录 :param email: (optional) 邮箱登录 :param rememberLogin: (optional) 是否记住密码,默认 True
3.393271
3.379711
1.004012
if uid is None: raise ParamsError() r = NCloudBot() r.method = 'USER_PLAY_LIST' r.data = {'offset': offset, 'uid': uid, 'limit': limit, 'csrf_token': ''} r.send() return r.response
def user_play_list(uid, offset=0, limit=1000)
获取用户歌单,包含收藏的歌单 :param uid: 用户的ID,可通过登录或者其他接口获取 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 1000
5.784021
6.747431
0.857218
if uid is None: raise ParamsError() r = NCloudBot() r.method = 'USER_DJ' r.data = {'offset': offset, 'limit': limit, "csrf_token": ""} r.params = {'uid': uid} r.send() return r.response
def user_dj(uid, offset=0, limit=30)
获取用户电台数据 :param uid: 用户的ID,可通过登录或者其他接口获取 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 30
6.452674
7.678125
0.840397
if keyword is None: raise ParamsError() r = NCloudBot() r.method = 'SEARCH' r.data = { 's': keyword, 'limit': str(limit), 'type': str(type), 'offset': str(offset) } r.send() return r.response
def search(keyword, type=1, offset=0, limit=30)
搜索歌曲,支持搜索歌曲、歌手、专辑等 :param keyword: 关键词 :param type: (optional) 搜索类型,1: 单曲, 100: 歌手, 1000: 歌单, 1002: 用户 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 30
4.862638
5.656178
0.859704
if uid is None: raise ParamsError() r = NCloudBot() r.method = 'USER_FOLLOWS' r.params = {'uid': uid} r.data = {'offset': offset, 'limit': limit, 'order': True} r.send() return r.response
def user_follows(uid, offset='0', limit=30)
获取用户关注列表 :param uid: 用户的ID,可通过登录或者其他接口获取 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 30
5.808166
6.995786
0.830238
if uid is None: raise ParamsError() r = NCloudBot() r.method = 'USER_EVENT' r.params = {'uid': uid} r.data = {'time': -1, 'getcounts': True, "csrf_token": ""} r.send() return r.response
def user_event(uid)
获取用户动态 :param uid: 用户的ID,可通过登录或者其他接口获取
10.012704
11.090661
0.902805
if uid is None: raise ParamsError() r = NCloudBot() r.method = 'USER_RECORD' r.data = {'type': type, 'uid': uid, "csrf_token": ""} r.send() return r.response
def user_record(uid, type=0)
获取用户的播放列表,必须登录 :param uid: 用户的ID,可通过登录或者其他接口获取 :param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
8.505954
9.941768
0.855578
r = NCloudBot() r.method = 'EVENT' r.data = {"csrf_token": ""} r.send() return r.response
def event()
获取好友的动态,包括分享视频、音乐、动态等
15.788038
15.358591
1.027961
r = NCloudBot() r.method = 'TOP_PLAYLIST_HIGHQUALITY' r.data = {'cat': cat, 'offset': offset, 'limit': limit} r.send() return r.response
def top_playlist_highquality(cat='全部', offset=0, limit=20)
获取网易云音乐的精品歌单 :param cat: (optional) 歌单类型,默认 ‘全部’,比如 华语、欧美等 :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 20
5.045775
6.227322
0.810264
if id is None: raise ParamsError() r = NCloudBot() r.method = 'PLAY_LIST_DETAIL' r.data = {'id': id, 'limit': limit, "csrf_token": ""} r.send() return r.response
def play_list_detail(id, limit=20)
获取歌单中的所有音乐。由于获取精品中,只能看到歌单名字和 ID 并没有歌单的音乐,因此增加该接口传入歌单 ID 获取歌单中的所有音乐. :param id: 歌单的ID :param limit: (optional) 数据上限多少行,默认 20
7.085087
7.985322
0.887264
if not isinstance(ids, list): raise ParamsError() r = NCloudBot() r.method = 'MUSIC_URL' r.data = {'ids': ids, 'br': 999000, "csrf_token": ""} r.send() return r.response
def music_url(ids=[])
通过歌曲 ID 获取歌曲下载地址 :param ids: 歌曲 ID 的 list
6.262943
6.687058
0.936577
if id is None: raise ParamsError() r = NCloudBot() r.method = 'LYRIC' r.params = {'id': id} r.send() return r.response
def lyric(id)
通过歌曲 ID 获取歌曲歌词地址 :param id: 歌曲ID
7.959171
9.683273
0.82195
if id is None: raise ParamsError() r = NCloudBot() r.method = 'MUSIC_COMMENT' r.params = {'id': id} r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""} r.send() return r.response
def music_comment(id, offset=0, limit=20)
获取歌曲的评论列表 :param id: 歌曲 ID :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 20
5.906661
6.879433
0.858597
if not isinstance(ids, list): raise ParamsError() c = [] for id in ids: c.append({'id': id}) r = NCloudBot() r.method = 'SONG_DETAIL' r.data = {'c': json.dumps(c), 'ids': c, "csrf_token": ""} r.send() return r.response
def song_detail(ids)
通过歌曲 ID 获取歌曲的详细信息 :param ids: 歌曲 ID 的 list
6.382484
6.685088
0.954735
r = NCloudBot() r.method = 'PERSONAL_FM' r.data = {"csrf_token": ""} r.send() return r.response
def personal_fm()
个人的 FM ,必须在登录之后调用,即 login 之后调用
12.472488
11.590472
1.076098
headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://music.163.com', 'Host': 'music.163.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36' } NCloudBot.req.headers.update(headers) return NCloudBot.req
def _get_webapi_requests(self)
Update headers of webapi for Requests.
1.929775
1.792944
1.076316
# rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username) self.response.content = resp.content self.response.status_code = resp.status_code self.response.headers = resp.headers
def _build_response(self, resp)
Build internal Response object from given response.
7.686454
7.637975
1.006347
success = False if self.method is None: raise ParamsError() try: if self.method == 'SEARCH': req = self._get_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] resp = req.post(_url, data=self.data) self._build_response(resp) self.response.ok = True else: if isinstance(self.data, dict): data = encrypted_request(self.data) req = self._get_webapi_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'): _url = _url % self.params['uid'] if self.method in ('LYRIC', 'MUSIC_COMMENT'): _url = _url % self.params['id'] # GET if self.method in ('LYRIC'): resp = req.get(_url) else: resp = req.post(_url, data=data) self._build_response(resp) self.response.ok = True except Exception as why: traceback.print_exc() print 'Requests Exception', why # self._build_response(why) self.response.error = why
def send(self)
Sens the request.
3.888766
3.86138
1.007092
if not self.headers and len(self.content) > 3: encoding = get_encoding_from_headers(self.headers) if encoding is not None: return json.loads(self.content.decode(encoding)) return json.loads(self.content)
def json(self)
Returns the json-encoded content of a response, if any.
3.653063
3.030901
1.205273
old = get_option(name) globals()[name] = value return old
def set_option(name, value)
Set plydata option Parameters ---------- name : str Name of the option value : object New value of the option Returns ------- old : object Old value of the option See also -------- :class:`options`
5.131089
10.648097
0.481878
data = pd.merge(verb.x, verb.y, **verb.kwargs) # Preserve x groups if isinstance(verb.x, GroupedDataFrame): data.plydata_groups = list(verb.x.plydata_groups) return data
def _join(verb)
Join helper
10.115255
9.568386
1.057154
if by is None: by = self.plydata_groups # Turn off sorting by groups messes with some verbs if 'sort' not in kwargs: kwargs['sort'] = False return super().groupby(by, **kwargs)
def groupby(self, by=None, **kwargs)
Group by and do not sort (unless specified) For plydata use cases, there is no need to specify group columns.
9.153531
7.242239
1.263909
# No groups if not self.plydata_groups: return np.ones(len(self), dtype=int) grouper = self.groupby() indices = np.empty(len(self), dtype=int) for i, (_, idx) in enumerate(sorted(grouper.indices.items())): indices[idx] = i return indices
def group_indices(self)
Return group indices
4.330039
4.100807
1.055899
@wraps(verb_func) def _verb_func(verb): verb.expressions, new_columns = build_expressions(verb) if add_groups: verb.groups = new_columns return verb_func(verb) return _verb_func
def _make_verb_helper(verb_func, add_groups=False)
Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the expressions. Returns ------- out : function A function that implements a helper verb.
4.09805
3.572335
1.147163
if isinstance(df, GroupedDataFrame): base_df = GroupedDataFrame( df.loc[:, df.plydata_groups], df.plydata_groups, copy=True) else: base_df = pd.DataFrame(index=df.index) return base_df
def _get_base_dataframe(df)
Remove all columns other than those grouped on
5.131686
4.669199
1.09905
n = len(data) if isinstance(gdf, GroupedDataFrame): for i, col in enumerate(gdf.plydata_groups): if col not in data: group_values = [gdf[col].iloc[0]] * n # Need to be careful and maintain the dtypes # of the group columns if pdtypes.is_categorical_dtype(gdf[col]): col_values = pd.Categorical( group_values, categories=gdf[col].cat.categories, ordered=gdf[col].cat.ordered ) else: col_values = pd.Series( group_values, index=data.index, dtype=gdf[col].dtype ) # Group columns come first data.insert(i, col, col_values) return data
def _add_group_columns(data, gdf)
Add group columns to data with a value from the grouped dataframe It is assumed that the grouped dataframe contains a single group >>> data = pd.DataFrame({ ... 'x': [5, 6, 7]}) >>> gdf = GroupedDataFrame({ ... 'g': list('aaa'), ... 'x': range(3)}, groups=['g']) >>> _add_group_columns(data, gdf) g x 0 a 5 1 a 6 2 a 7
3.214729
3.555518
0.904152
with suppress(AttributeError): # If the index of a series and the dataframe # in which the series will be assigned to a # column do not match, missing values/NaNs # are created. We do not want that. if not value.index.equals(data.index): if len(value) == len(data): value.index = data.index else: value.reset_index(drop=True, inplace=True) # You cannot assign a scalar value to a dataframe # without an index. You need an interable value. if data.index.empty: try: len(value) except TypeError: scalar = True else: scalar = isinstance(value, str) if scalar: value = [value] data[col] = value return data
def _create_column(data, col, value)
Create column in dataframe Helper method meant to deal with problematic column values. e.g When the series index does not match that of the data. Parameters ---------- data : pandas.DataFrame dataframe in which to insert value col : column label Column name value : object Value to assign to column Returns ------- data : pandas.DataFrame Modified original dataframe >>> df = pd.DataFrame({'x': [1, 2, 3]}) >>> y = pd.Series([11, 12, 13], index=[21, 22, 23]) Data index and value index do not match >>> _create_column(df, 'y', y) x y 0 1 11 1 2 12 2 3 13 Non-empty dataframe, scalar value >>> _create_column(df, 'z', 3) x y z 0 1 11 3 1 2 12 3 2 3 13 3 Empty dataframe, scalar value >>> df = pd.DataFrame() >>> _create_column(df, 'w', 3) w 0 3 >>> _create_column(df, 'z', 'abc') w z 0 3 abc
4.837102
5.252002
0.921002
def partial(func, col, *args, **kwargs): def new_func(gdf): return func(gdf[col], *args, **kwargs) return new_func def make_statement(func, col): if isinstance(func, str): expr = '{}({})'.format(func, col) elif callable(func): expr = partial(func, col, *verb.args, **verb.kwargs) else: raise TypeError("{} is not a function".format(func)) return expr def func_name(func): if isinstance(func, str): return func try: return func.__name__ except AttributeError: return '' # Generate function names. They act as identifiers (postfixed # to the original columns) in the new_column names. if isinstance(verb.functions, (tuple, list)): names = (func_name(func) for func in verb.functions) names_and_functions = zip(names, verb.functions) else: names_and_functions = verb.functions.items() # Create statements for the expressions # and postfix identifiers columns = Selector.get(verb) # columns to act on postfixes = [] stmts = [] for name, func in names_and_functions: postfixes.append(name) for col in columns: stmts.append(make_statement(func, col)) if not stmts: stmts = columns # Names of the new columns # e.g col1_mean, col2_mean, col1_std, col2_std add_postfix = (isinstance(verb.functions, dict) or len(verb.functions) > 1) if add_postfix: fmt = '{}_{}'.format new_columns = [fmt(c, p) for p in postfixes for c in columns] else: new_columns = columns expressions = [Expression(stmt, col) for stmt, col in zip(stmts, new_columns)] return expressions, new_columns
def build_expressions(verb)
Build expressions for helper verbs Parameters ---------- verb : verb A verb with a *functions* attribute. Returns ------- out : tuple (List of Expressions, New columns). The expressions and the new columns in which the results of those expressions will be stored. Even when a result will stored in a column with an existing label, that column is still considered new, i.e An expression ``x='x+1'``, will create a new_column `x` to replace an old column `x`.
3.490683
3.391462
1.029256
# Short cut if self._all_expressions_evaluated(): if self.drop: # Drop extra columns. They do not correspond to # any expressions. columns = [expr.column for expr in self.expressions] self.data = self.data.loc[:, columns] return self.data # group_by # evaluate expressions # combine columns # concat evalutated group data and clean up index and group gdfs = self._get_group_dataframes() egdfs = self._evaluate_expressions(gdfs) edata = self._concat(egdfs) return edata
def process(self)
Run the expressions Returns ------- out : pandas.DataFrame Resulting data
9.026324
8.600218
1.049546
def present(expr): return expr.stmt == expr.column and expr.column in self.data return all(present(expr) for expr in self.expressions)
def _all_expressions_evaluated(self)
Return True all expressions match with the columns Saves some processor cycles
9.616929
9.335732
1.03012
if isinstance(self.data, GroupedDataFrame): grouper = self.data.groupby() # groupby on categorical columns uses the categories # even if they are not present in the data. This # leads to empty groups. We exclude them. return (gdf for _, gdf in grouper if not gdf.empty) else: return (self.data, )
def _get_group_dataframes(self)
Get group dataframes Returns ------- out : tuple or generator Group dataframes
6.054814
6.041039
1.00228
gdf._is_copy = None result_index = gdf.index if self.keep_index else [] data = pd.DataFrame(index=result_index) for expr in self.expressions: value = expr.evaluate(gdf, self.env) if isinstance(value, pd.DataFrame): data = value break else: _create_column(data, expr.column, value) data = _add_group_columns(data, gdf) return data
def _evaluate_group_dataframe(self, gdf)
Evaluate a single group dataframe Parameters ---------- gdf : pandas.DataFrame Input group dataframe Returns ------- out : pandas.DataFrame Result data
4.171311
4.832239
0.863225
egdfs = list(egdfs) edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False) # groupby can mixup the rows. We try to maintain the original # order, but we can only do that if the result has a one to # one relationship with the original one2one = ( self.keep_index and not any(edata.index.duplicated()) and len(edata.index) == len(self.data.index)) if one2one: edata = edata.sort_index() else: edata.reset_index(drop=True, inplace=True) # Maybe this should happen in the verb functions if self.keep_groups and self.groups: edata = GroupedDataFrame(edata, groups=self.groups) return edata
def _concat(self, egdfs)
Concatenate evaluated group dataframes Parameters ---------- egdfs : iterable Evaluated dataframes Returns ------- edata : pandas.DataFrame Evaluated data
4.730981
4.86245
0.972963
def _get_slice_cols(sc): # Just like pandas.DataFrame.loc the stop # column is included idx_start = data_columns.get_loc(sc.start) idx_stop = data_columns.get_loc(sc.stop) + 1 return data_columns[idx_start:idx_stop:sc.step] result = [] for col in names: if isinstance(col, slice): result.extend(_get_slice_cols(col)) else: result.append(col) return tuple(result)
def _resolve_slices(data_columns, names)
Convert any slices into column names Parameters ---------- data_columns : pandas.Index Dataframe columns names : tuple Names (including slices) of columns in the dataframe. Returns ------- out : tuple Names of columns in the dataframe. Has no slices.
3.222194
3.016267
1.068272
columns = verb.data.columns contains = verb.contains matches = verb.matches groups = _get_groups(verb) names = cls._resolve_slices(columns, verb.names) names_set = set(names) groups_set = set(groups) lst = [[]] if names or groups: # group variable missing from the selection are prepended missing = [g for g in groups if g not in names_set] missing_set = set(missing) c1 = missing + [x for x in names if x not in missing_set] lst.append(c1) if verb.startswith: c2 = [x for x in columns if isinstance(x, str) and x.startswith(verb.startswith)] lst.append(c2) if verb.endswith: c3 = [x for x in columns if isinstance(x, str) and x.endswith(verb.endswith)] lst.append(c3) if contains: c4 = [] for col in columns: if (isinstance(col, str) and any(s in col for s in contains)): c4.append(col) lst.append(c4) if matches: c5 = [] patterns = [x if hasattr(x, 'match') else re.compile(x) for x in matches] for col in columns: if isinstance(col, str): if any(bool(p.match(col)) for p in patterns): c5.append(col) lst.append(c5) selected = unique(list(itertools.chain(*lst))) if verb.drop: to_drop = [col for col in selected if col not in groups_set] selected = [col for col in columns if col not in to_drop] return selected
def select(cls, verb)
Return selected columns for the select verb Parameters ---------- verb : object verb with the column selection attributes: - names - startswith - endswith - contains - matches
2.716984
2.597303
1.046079
groups = set(_get_groups(verb)) return [col for col in verb.data if col not in groups]
def _all(cls, verb)
A verb
11.7691
11.830774
0.994787
# Named (listed) columns are always included columns = cls.select(verb) final_columns_set = set(cls.select(verb)) groups_set = set(_get_groups(verb)) final_columns_set -= groups_set - set(verb.names) def pred(col): if col not in verb.data: raise KeyError( "Unknown column name, {!r}".format(col)) return col in final_columns_set return [col for col in columns if pred(col)]
def _at(cls, verb)
A verb with a select text match
6.512822
6.07697
1.071722
pred = verb.predicate data = verb.data groups = set(_get_groups(verb)) # force predicate if isinstance(pred, str): if not pred.endswith('_dtype'): pred = '{}_dtype'.format(pred) pred = getattr(pdtypes, pred) elif pdtypes.is_bool_dtype(np.array(pred)): # Turn boolean array into a predicate function it = iter(pred) def pred(col): return next(it) return [col for col in data if pred(data[col]) and col not in groups]
def _if(cls, verb)
A verb with a predicate function
6.137596
5.739845
1.069297
try: module = type_lookup[type(data)] except KeyError: # Some guess work for subclasses for type_, mod in type_lookup.items(): if isinstance(data, type_): module = mod break try: return getattr(module, verb) except (NameError, AttributeError): msg = "Data source of type '{}' is not supported." raise TypeError(msg.format(type(data)))
def get_verb_function(data, verb)
Return function that implements the verb for given data type
4.492496
4.275864
1.050664
# dispatch if not hasattr(args[0], '_Expression'): return BaseExpression(*args, *kwargs) else: return args[0]._Expression(*args, **kwargs)
def Expression(*args, **kwargs)
Return an appropriate Expression given the arguments Parameters ---------- args : tuple Positional arguments passed to the Expression class kwargs : dict Keyword arguments passed to the Expression class
7.044723
6.09853
1.155151
def n(): return len(data) if isinstance(self.stmt, str): # Add function n() that computes the # size of the group data to the inner namespace. if self._has_n_func: namespace = dict(data, n=n) else: namespace = data # Avoid obvious keywords e.g if a column # is named class if self.stmt not in KEYWORDS: value = env.eval( self.stmt, source_name='Expression.evaluate', inner_namespace=namespace) else: value = namespace[self.stmt] elif callable(self.stmt): value = self.stmt(data) else: value = self.stmt return value
def evaluate(self, data, env)
Evaluate statement Parameters ---------- data : pandas.DataFrame Data in whose namespace the statement will be evaluated. Typically, this is a group dataframe. Returns ------- out : object Result of the evaluation.pandas.DataFrame
8.204559
7.580857
1.082273
# For each predicate-value, we keep track of the positions # that have been copied to the result, so that the later # more general values do not overwrite the previous ones. result = np.repeat(None, len(data)) copied = np.repeat(False, len(data)) for pred_expr, value_expr in self.pv_expressions: bool_idx = pred_expr.evaluate(data, env) if not pdtypes.is_bool_dtype(np.asarray(bool_idx)): raise TypeError( "The predicate keys must return a boolean array, " "or a boolean value.") value = value_expr.evaluate(data, env) mask = (copied ^ bool_idx) & bool_idx copied |= bool_idx idx = np.where(mask)[0] result[idx] = self.nice_value(value, idx) return np.array(list(result))
def evaluate(self, data, env)
Evaluate the predicates and values
5.348423
5.221165
1.024374
bool_idx = self.predicate_expr.evaluate(data, env) true_value = self.true_value_expr.evaluate(data, env) false_value = self.false_value_expr.evaluate(data, env) true_idx = np.where(bool_idx)[0] false_idx = np.where(~bool_idx)[0] result = np.repeat(None, len(data)) result[true_idx] = self.nice_value(true_value, true_idx) result[false_idx] = self.nice_value(false_value, false_idx) return np.array(list(result))
def evaluate(self, data, env)
Evaluate the predicates and values
2.509588
2.482395
1.010954
return self.__class__(self._namespaces + [outer_namespace], self.flags)
def with_outer_namespace(self, outer_namespace)
Return a new EvalEnvironment with an extra namespace added. This namespace will be used only for variables that are not found in any existing namespace, i.e., it is "outside" them all.
10.739295
8.754438
1.226726
code = compile(expr, source_name, "eval", self.flags, False) return eval(code, {}, VarLookupDict([inner_namespace] + self._namespaces))
def eval(self, expr, source_name="<string>", inner_namespace={})
Evaluate some Python code in the encapsulated environment. :arg expr: A string containing a Python expression. :arg source_name: A name for this string, for use in tracebacks. :arg inner_namespace: A dict-like object that will be checked first when `expr` attempts to access any variables. :returns: The value of `expr`.
8.80812
12.233155
0.72002
if isinstance(eval_env, cls): return eval_env elif isinstance(eval_env, numbers.Integral): depth = eval_env + reference else: raise TypeError("Parameter 'eval_env' must be either an integer " "or an instance of patsy.EvalEnvironment.") frame = inspect.currentframe() try: for i in range(depth + 1): if frame is None: raise ValueError("call-stack is not that deep!") frame = frame.f_back return cls([frame.f_locals, frame.f_globals], frame.f_code.co_flags & _ALL_FUTURE_FLAGS) # The try/finally is important to avoid a potential reference cycle -- # any exception traceback will carry a reference to *our* frame, which # contains a reference to our local variables, which would otherwise # carry a reference to some parent frame, where the exception was # caught...: finally: del frame
def capture(cls, eval_env=0, reference=0)
Capture an execution environment from the stack. If `eval_env` is already an :class:`EvalEnvironment`, it is returned unchanged. Otherwise, we walk up the stack by ``eval_env + reference`` steps and capture that function's evaluation environment. For ``eval_env=0`` and ``reference=0``, the default, this captures the stack frame of the function that calls :meth:`capture`. If ``eval_env + reference`` is 1, then we capture that function's caller, etc. This somewhat complicated calling convention is designed to be convenient for functions which want to capture their caller's environment by default, but also allow explicit environments to be specified. See the second example. Example:: x = 1 this_env = EvalEnvironment.capture() assert this_env.namespace["x"] == 1 def child_func(): return EvalEnvironment.capture(1) this_env_from_child = child_func() assert this_env_from_child.namespace["x"] == 1 Example:: # This function can be used like: # my_model(formula_like, data) # -> evaluates formula_like in caller's environment # my_model(formula_like, data, eval_env=1) # -> evaluates formula_like in caller's caller's environment # my_model(formula_like, data, eval_env=my_env) # -> evaluates formula_like in environment 'my_env' def my_model(formula_like, data, eval_env=0): eval_env = EvalEnvironment.capture(eval_env, reference=1) return model_setup_helper(formula_like, data, eval_env) This is how :func:`dmatrix` works. .. versionadded: 0.2.0 The ``reference`` argument.
5.104238
4.97391
1.026202
vld = VarLookupDict(self._namespaces) new_ns = dict((name, vld[name]) for name in names) return EvalEnvironment([new_ns], self.flags)
def subset(self, names)
Creates a new, flat EvalEnvironment that contains only the variables specified.
12.554305
7.501159
1.673649
d[key] = value try: yield d finally: del d[key]
def temporary_key(d, key, value)
Context manager that removes key from dictionary on closing The dictionary will hold the key for the duration of the context. Parameters ---------- d : dict-like Dictionary in which to insert a temporary key. key : hashable Location at which to insert ``value``. value : object Value to insert in ``d`` at location ``key``.
3.285757
5.072637
0.647741
setattr(obj, name, value) try: yield obj finally: delattr(obj, name)
def temporary_attr(obj, name, value)
Context manager that removes key from dictionary on closing The dictionary will hold the key for the duration of the context. Parameters ---------- obj : object Object onto which to add a temporary attribute. name : str Name of attribute to add to ``obj``. value : object Value of ``attr``.
2.818514
3.793048
0.743074
env = EvalEnvironment.capture(1) try: return env.namespace[name] except KeyError: raise NameError("No data named {!r} found".format(name))
def Q(name)
Quote a variable name A way to 'quote' variable names, especially ones that do not otherwise meet Python's variable name rules. Parameters ---------- name : str Name of variable Returns ------- value : object Value of variable Examples -------- >>> import pandas as pd >>> from plydata import define >>> df = pd.DataFrame({'class': [10, 20, 30]}) Since ``class`` is a reserved python keyword it cannot be a variable name, and therefore cannot be used in an expression without quoting it. >>> df >> define(y='class+1') Traceback (most recent call last): File "<string>", line 1 class+1 ^ SyntaxError: invalid syntax >>> df >> define(y='Q("class")+1') class y 0 10 11 1 20 21 2 30 31 Note that it is ``'Q("some name")'`` and not ``'Q(some name)'``. As in the above example, you do not need to ``import`` ``Q`` before you can use it.
8.118253
12.806493
0.633917
original_index = [df.index for df in dfs] have_bad_index = [not isinstance(df.index, pd.RangeIndex) for df in dfs] for df, bad in zip(dfs, have_bad_index): if bad: df.reset_index(drop=True, inplace=True) try: yield dfs finally: for df, bad, idx in zip(dfs, have_bad_index, original_index): if bad and len(df.index) == len(idx): df.index = idx
def regular_index(*dfs)
Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0]) >>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13]) Within the contexmanager all frames have nice range indices >>> with regular_index(df1, df2, df3): ... print(df1.index) ... print(df2.index) ... print(df3.index) RangeIndex(start=0, stop=4, step=1) RangeIndex(start=0, stop=3, step=1) RangeIndex(start=0, stop=3, step=1) Indices restored >>> df1.index RangeIndex(start=0, stop=4, step=1) >>> df2.index Int64Index([3, 0, 0], dtype='int64') >>> df3.index Int64Index([11, 12, 13], dtype='int64')
2.488878
2.839272
0.87659
seen = set() def make_seen(x): seen.add(x) return x return [make_seen(x) for x in lst if x not in seen]
def unique(lst)
Return unique elements :class:`pandas.unique` and :class:`numpy.unique` cast mixed type lists to the same type. They are faster, but some times we want to maintain the type. Parameters ---------- lst : list-like List of items Returns ------- out : list Unique items in the order that they appear in the input. Examples -------- >>> import pandas as pd >>> import numpy as np >>> lst = ['one', 'two', 123, 'three'] >>> pd.unique(lst) array(['one', 'two', '123', 'three'], dtype=object) >>> np.unique(lst) array(['123', 'one', 'three', 'two'], dtype='<U5') >>> unique(lst) ['one', 'two', 123, 'three'] pandas and numpy cast 123 to a string!, and numpy does not even maintain the order.
3.058544
4.655399
0.656989
try: return arr.iloc[n] except (KeyError, IndexError): return np.nan
def _nth(arr, n)
Return the nth value of array If it is missing return NaN
4.036141
3.467903
1.163856
if frames is None and fps is None: return times_to_ms(h, m, s, ms) elif frames is not None and fps is not None: return frames_to_ms(frames, fps) else: raise ValueError("Both fps and frames must be specified")
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None)
Convert time to milliseconds. See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified, :func:`pysubs2.time.frames_to_ms()` is called instead. Raises: ValueError: Invalid fps, or one of frames/fps is missing. Example: >>> make_time(s=1.5) 1500 >>> make_time(frames=50, fps=25) 2000
2.482312
2.663802
0.931868
h, m, s, frac = map(int, groups) ms = frac * 10**(3 - len(groups[-1])) ms += s * 1000 ms += m * 60000 ms += h * 3600000 return ms
def timestamp_to_ms(groups)
Convert groups from :data:`pysubs2.time.TIMESTAMP` match to milliseconds. Example: >>> timestamp_to_ms(TIMESTAMP.match("0:00:00.42").groups()) 420
2.483495
2.911049
0.853127
ms += s * 1000 ms += m * 60000 ms += h * 3600000 return int(round(ms))
def times_to_ms(h=0, m=0, s=0, ms=0)
Convert hours, minutes, seconds to milliseconds. Arguments may be positive or negative, int or float, need not be normalized (``s=120`` is okay). Returns: Number of milliseconds (rounded to int).
2.146832
2.737198
0.784317
if fps <= 0: raise ValueError("Framerate must be positive number (%f)." % fps) return int(round(frames * (1000 / fps)))
def frames_to_ms(frames, fps)
Convert frame-based duration to milliseconds. Arguments: frames: Number of frames (should be int). fps: Framerate (must be a positive number, eg. 23.976). Returns: Number of milliseconds (rounded to int). Raises: ValueError: fps was negative or zero.
4.528205
4.666367
0.970392
if fps <= 0: raise ValueError("Framerate must be positive number (%f)." % fps) return int(round((ms / 1000) * fps))
def ms_to_frames(ms, fps)
Convert milliseconds to number of frames. Arguments: ms: Number of milliseconds (may be int, float or other numeric class). fps: Framerate (must be a positive number, eg. 23.976). Returns: Number of frames (int). Raises: ValueError: fps was negative or zero.
4.12071
4.430006
0.930182
ms = int(round(ms)) h, ms = divmod(ms, 3600000) m, ms = divmod(ms, 60000) s, ms = divmod(ms, 1000) return Times(h, m, s, ms)
def ms_to_times(ms)
Convert milliseconds to normalized tuple (h, m, s, ms). Arguments: ms: Number of milliseconds (may be int, float or other numeric class). Should be non-negative. Returns: Named tuple (h, m, s, ms) of ints. Invariants: ``ms in range(1000) and s in range(60) and m in range(60)``
2.013486
2.057446
0.978634
sgn = "-" if ms < 0 else "" h, m, s, ms = ms_to_times(abs(ms)) if fractions: return sgn + "{:01d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms) else: return sgn + "{:01d}:{:02d}:{:02d}".format(h, m, s)
def ms_to_str(ms, fractions=False)
Prettyprint milliseconds to [-]H:MM:SS[.mmm] Handles huge and/or negative times. Non-negative times with ``fractions=True`` are matched by :data:`pysubs2.time.TIMESTAMP`. Arguments: ms: Number of milliseconds (int, float or other numeric class). fractions: Whether to print up to millisecond precision. Returns: str
1.984356
2.231961
0.889064
# XXX throw on overflow/underflow? if ms < 0: ms = 0 if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME h, m, s, ms = ms_to_times(ms) return "%01d:%02d:%02d.%02d" % (h, m, s, ms//10)
def ms_to_timestamp(ms)
Convert ms to 'H:MM:SS.cc
3.643121
3.636626
1.001786
fragments = SSAEvent.OVERRIDE_SEQUENCE.split(text) if len(fragments) == 1: return [(text, style)] def apply_overrides(all_overrides): s = style.copy() for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides): if tag == r"\r": s = style.copy() # reset to original line style elif tag.startswith(r"\r"): name = tag[2:] if name in styles: s = styles[name].copy() # reset to named style else: if "i" in tag: s.italic = "1" in tag elif "b" in tag: s.bold = "1" in tag elif "u" in tag: s.underline = "1" in tag elif "s" in tag: s.strikeout = "1" in tag return s overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text) overrides_prefix_sum = ["".join(overrides[:i]) for i in range(len(overrides) + 1)] computed_styles = map(apply_overrides, overrides_prefix_sum) return list(zip(fragments, computed_styles))
def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={})
Split text into fragments with computed SSAStyles. Returns list of tuples (fragment, style), where fragment is a part of text between two brace-delimited override sequences, and style is the computed styling of the fragment, ie. the original style modified by all override sequences before the fragment. Newline and non-breakable space overrides are left as-is. Supported override tags: - i, b, u, s - r (with or without style name)
3.771954
3.289493
1.146667
text = self.text text = self.OVERRIDE_SEQUENCE.sub("", text) text = text.replace(r"\h", " ") text = text.replace(r"\n", "\n") text = text.replace(r"\N", "\n") return text
def plaintext(self)
Subtitle text as multi-line string with no tags (read/write property). Writing to this property replaces :attr:`SSAEvent.text` with given plain text. Newlines are converted to ``\\N`` tags.
4.522697
4.334288
1.043469
delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps) self.start += delta self.end += delta
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None)
Shift start and end times. See :meth:`SSAFile.shift()` for full description.
2.891431
3.192194
0.905782
if isinstance(other, SSAEvent): return self.as_dict() == other.as_dict() else: raise TypeError("Cannot compare to non-SSAEvent object")
def equals(self, other)
Field-based equality for SSAEvents.
4.458555
2.835731
1.572277
with open(path, encoding=encoding) as fp: return cls.from_file(fp, format_, fps=fps, **kwargs)
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs)
Load subtitle file from given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of input file. Defaults to UTF-8, you may need to change this. format_ (str): Optional, forces use of specific parser (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file contents. This argument should be rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. Framerate might be detected from the file, in which case you don't need to specify it here (when given, this argument overrides autodetection). kwargs: Extra options for the parser. Returns: SSAFile Raises: IOError UnicodeDecodeError pysubs2.exceptions.UnknownFPSError pysubs2.exceptions.UnknownFormatIdentifierError pysubs2.exceptions.FormatAutodetectionError Note: pysubs2 may autodetect subtitle format and/or framerate. These values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps` attributes. Example: >>> subs1 = pysubs2.load("subrip-subtitles.srt") >>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
2.821504
6.601945
0.427375
fp = io.StringIO(string) return cls.from_file(fp, format_, fps=fps, **kwargs)
def from_string(cls, string, format_=None, fps=None, **kwargs)
Load subtitle file from string. See :meth:`SSAFile.load()` for full description. Arguments: string (str): Subtitle file in a string. Note that the string must be Unicode (in Python 2). Returns: SSAFile Example: >>> text = ''' ... 1 ... 00:00:00,000 --> 00:00:05,000 ... An example SubRip file. ... ''' >>> subs = SSAFile.from_string(text)
3.452757
5.655931
0.610467
if format_ is None: # Autodetect subtitle format, then read again using correct parser. # The file might be a pipe and we need to read it twice, # so just buffer everything. text = fp.read() fragment = text[:10000] format_ = autodetect_format(fragment) fp = io.StringIO(text) impl = get_format_class(format_) subs = cls() # an empty subtitle file subs.format = format_ subs.fps = fps impl.from_file(subs, fp, format_, fps=fps, **kwargs) return subs
def from_file(cls, fp, format_=None, fps=None, **kwargs)
Read subtitle file from file object. See :meth:`SSAFile.load()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.load()` or :meth:`SSAFile.from_string()` is preferable. Arguments: fp (file object): A file object, ie. :class:`io.TextIOBase` instance. Note that the file must be opened in text mode (as opposed to binary). Returns: SSAFile
5.906202
6.110672
0.966539
if format_ is None: ext = os.path.splitext(path)[1].lower() format_ = get_format_identifier(ext) with open(path, "w", encoding=encoding) as fp: self.to_file(fp, format_, fps=fps, **kwargs)
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs)
Save subtitle file to given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of output file. Defaults to UTF-8, which should be fine for most purposes. format_ (str): Optional, specifies desired subtitle format (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file extension. Thus, this argument is rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. When omitted, :attr:`SSAFile.fps` value is used (ie. the framerate used for loading the file, if any). When the :class:`SSAFile` wasn't loaded from MicroDVD, or if you wish save it with different framerate, use this argument. See also :meth:`SSAFile.transform_framerate()` for fixing bad frame-based to time-based conversions. kwargs: Extra options for the writer. Raises: IOError UnicodeEncodeError pysubs2.exceptions.UnknownFPSError pysubs2.exceptions.UnknownFormatIdentifierError pysubs2.exceptions.UnknownFileExtensionError
2.648534
2.713807
0.975948
fp = io.StringIO() self.to_file(fp, format_, fps=fps, **kwargs) return fp.getvalue()
def to_string(self, format_, fps=None, **kwargs)
Get subtitle file as a string. See :meth:`SSAFile.save()` for full description. Returns: str
2.903417
4.49524
0.645887
impl = get_format_class(format_) impl.to_file(self, fp, format_, fps=fps, **kwargs)
def to_file(self, fp, format_, fps=None, **kwargs)
Write subtitle file to file object. See :meth:`SSAFile.save()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.save()` or :meth:`SSAFile.to_string()` is preferable. Arguments: fp (file object): A file object, ie. :class:`io.TextIOBase` instance. Note that the file must be opened in text mode (as opposed to binary).
4.253644
6.884232
0.617882
if in_fps <= 0 or out_fps <= 0: raise ValueError("Framerates must be positive, cannot transform %f -> %f" % (in_fps, out_fps)) ratio = in_fps / out_fps for line in self: line.start = int(round(line.start * ratio)) line.end = int(round(line.end * ratio))
def transform_framerate(self, in_fps, out_fps)
Rescale all timestamps by ratio of in_fps/out_fps. Can be used to fix files converted from frame-based to time-based with wrongly assumed framerate. Arguments: in_fps (float) out_fps (float) Raises: ValueError: Non-positive framerate given.
2.525565
2.763763
0.913814
if old_name not in self.styles: raise KeyError("Style %r not found" % old_name) if new_name in self.styles: raise ValueError("There is already a style called %r" % new_name) if not is_valid_field_content(new_name): raise ValueError("%r is not a valid name" % new_name) self.styles[new_name] = self.styles[old_name] del self.styles[old_name] for line in self: # XXX also handle \r override tag if line.style == old_name: line.style = new_name
def rename_style(self, old_name, new_name)
Rename a style, including references to it. Arguments: old_name (str): Style to be renamed. new_name (str): New name for the style (must be unused). Raises: KeyError: No style named old_name. ValueError: new_name is not a legal name (cannot use commas) or new_name is taken.
2.825773
2.962086
0.95398
if not isinstance(subs, SSAFile): raise TypeError("Must supply an SSAFile.") for name, style in subs.styles.items(): if name not in self.styles or overwrite: self.styles[name] = style
def import_styles(self, subs, overwrite=True)
Merge in styles from other SSAFile. Arguments: subs (SSAFile): Subtitle file imported from. overwrite (bool): On name conflict, use style from the other file (default: True).
3.330021
2.928171
1.137236
if isinstance(other, SSAFile): for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}: sv, ov = self.info.get(key), other.info.get(key) if sv is None: logging.debug("%r missing in self.info", key) return False elif ov is None: logging.debug("%r missing in other.info", key) return False elif sv != ov: logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov) return False for key in set(chain(self.styles.keys(), other.styles.keys())): sv, ov = self.styles.get(key), other.styles.get(key) if sv is None: logging.debug("%r missing in self.styles", key) return False elif ov is None: logging.debug("%r missing in other.styles", key) return False elif sv != ov: for k in sv.FIELDS: if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k) logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict()) return False if len(self) != len(other): logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other)) return False for i, (se, oe) in enumerate(zip(self.events, other.events)): if not se.equals(oe): for k in se.FIELDS: if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k) logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict()) return False return True else: raise TypeError("Cannot compare to non-SSAFile object")
def equals(self, other)
Equality of two SSAFiles. Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`. Order of entries in OrderedDicts does not matter. "ScriptType" key in info is considered an implementation detail and thus ignored. Useful mostly in unit tests. Differences are logged at DEBUG level.
1.982447
1.730492
1.145597
if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS: raise UnknownFormatIdentifierError(format_) for ext, f in FILE_EXTENSION_TO_FORMAT_IDENTIFIER.items(): if f == format_: return ext raise RuntimeError("No file extension for format %r" % format_)
def get_file_extension(format_)
Format identifier -> file extension
4.21907
4.00032
1.054683
formats = set() for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values(): guess = impl.guess_format(content) if guess is not None: formats.add(guess) if len(formats) == 1: return formats.pop() elif not formats: raise FormatAutodetectionError("No suitable formats") else: raise FormatAutodetectionError("Multiple suitable formats (%r)" % formats)
def autodetect_format(content)
Return format identifier for given fragment or raise FormatAutodetectionError.
3.059107
2.591321
1.18052
app._debug = True dft_logger.debug('livereload enabled: %s', '✓' if config.livereload else '✖') def get_host(request): if config.infer_host: return request.headers.get('host', 'localhost').split(':', 1)[0] else: return config.host if config.livereload: async def on_prepare(request, response): if (not request.path.startswith('/_debugtoolbar') and 'text/html' in response.content_type and getattr(response, 'body', False)): lr_snippet = LIVE_RELOAD_HOST_SNIPPET.format(get_host(request), config.aux_port) dft_logger.debug('appending live reload snippet "%s" to body', lr_snippet) response.body += lr_snippet.encode() app.on_response_prepare.append(on_prepare) static_path = config.static_url.strip('/') if config.infer_host and config.static_path is not None: # we set the app key even in middleware to make the switch to production easier and for backwards compat. @web.middleware async def static_middleware(request, handler): static_url = 'http://{}:{}/{}'.format(get_host(request), config.aux_port, static_path) dft_logger.debug('settings app static_root_url to "%s"', static_url) request.app['static_root_url'].change(static_url) return await handler(request) app.middlewares.insert(0, static_middleware) if config.static_path is not None: static_url = 'http://{}:{}/{}'.format(config.host, config.aux_port, static_path) dft_logger.debug('settings app static_root_url to "%s"', static_url) app['static_root_url'] = MutableValue(static_url) if config.debug_toolbar and aiohttp_debugtoolbar: aiohttp_debugtoolbar.setup(app, intercept_redirects=False)
def modify_main_app(app, config: Config)
Modify the app we're serving to make development easier, eg. * modify responses to add the livereload snippet * set ``static_root_url`` on the app * setup the debug toolbar
3.527499
3.340269
1.056053
cli_count = len(app[WS]) if cli_count == 0: return 0 is_html = None if path: path = str(Path(app['static_url']) / Path(path).relative_to(app['static_path'])) is_html = mimetypes.guess_type(path)[0] == 'text/html' reloads = 0 aux_logger.debug('prompting source reload for %d clients', cli_count) for ws, url in app[WS]: if path and is_html and path not in {url, url + '.html', url.rstrip('/') + '/index.html'}: aux_logger.debug('skipping reload for client at %s', url) continue aux_logger.debug('reload client at %s', url) data = { 'command': 'reload', 'path': path or url, 'liveCSS': True, 'liveImg': True, } try: await ws.send_str(json.dumps(data)) except RuntimeError as e: # eg. "RuntimeError: websocket connection is closing" aux_logger.error('Error broadcasting change to %s, RuntimeError: %s', path or url, e) else: reloads += 1 if reloads: s = '' if reloads == 1 else 's' aux_logger.info('prompted reload of %s on %d client%s', path or 'page', reloads, s) return reloads
async def src_reload(app, path: str = None)
prompt each connected browser to reload by sending websocket message. :param path: if supplied this must be a path relative to app['static_path'], eg. reload of a single file is only supported for static resources. :return: number of sources reloaded
3.741135
3.410756
1.096864
filename = URL.build(path=request.match_info['filename'], encoded=True).path raw_path = self._directory.joinpath(filename) try: filepath = raw_path.resolve() if not filepath.exists(): # simulate strict=True for python 3.6 which is not permitted with 3.5 raise FileNotFoundError() except FileNotFoundError: try: html_file = raw_path.with_name(raw_path.name + '.html').resolve().relative_to(self._directory) except (FileNotFoundError, ValueError): pass else: request.match_info['filename'] = str(html_file) else: if filepath.is_dir(): index_file = filepath / 'index.html' if index_file.exists(): try: request.match_info['filename'] = str(index_file.relative_to(self._directory)) except ValueError: # path is not not relative to self._directory pass
def modify_request(self, request)
Apply common path conventions eg. / > /index.html, /foobar > /foobar.html
3.584155
3.362305
1.065981
for attr_name in dir(self): if attr_name.startswith('_') or attr_name.upper() != attr_name: continue orig_value = getattr(self, attr_name) is_required = isinstance(orig_value, Required) orig_type = orig_value.v_type if is_required else type(orig_value) env_var_name = self._ENV_PREFIX + attr_name env_var = os.getenv(env_var_name, None) if env_var is not None: if issubclass(orig_type, bool): env_var = env_var.upper() in ('1', 'TRUE') elif issubclass(orig_type, int): env_var = int(env_var) elif issubclass(orig_type, Path): env_var = Path(env_var) elif issubclass(orig_type, bytes): env_var = env_var.encode() # could do floats here and lists etc via json setattr(self, attr_name, env_var) elif is_required and attr_name not in self._custom_settings: raise RuntimeError('The required environment variable "{0}" is currently not set, ' 'you\'ll need to run `source activate.settings.sh` ' 'or you can set that single environment variable with ' '`export {0}="<value>"`'.format(env_var_name))
def substitute_environ(self)
Substitute environment variables into settings.
3.046323
2.941597
1.035602
settings = Settings() conn = psycopg2.connect( password=settings.DB_PASSWORD, host=settings.DB_HOST, port=settings.DB_PORT, user=settings.DB_USER, ) conn.autocommit = True cur = conn.cursor() db_name = settings.DB_NAME cur.execute('SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)', (db_name,)) already_exists = bool(cur.fetchone()[0]) if already_exists: if not delete_existing: print('database "{}" already exists, skipping'.format(db_name)) return False else: print('dropping database "{}" as it already exists...'.format(db_name)) cur.execute('DROP DATABASE {}'.format(db_name)) else: print('database "{}" does not yet exist'.format(db_name)) print('creating database "{}"...'.format(db_name)) cur.execute('CREATE DATABASE {}'.format(db_name)) cur.close() conn.close() # {% if database.is_pg_sqlalchemy %} engine = create_engine(pg_dsn(settings)) print('creating tables from model definition...') Base.metadata.create_all(engine) engine.dispose() # {% else %} # TODO # {% endif %} return True
def prepare_database(delete_existing: bool) -> bool
(Re)create a fresh database and run migrations. :param delete_existing: whether or not to drop an existing database if it exists :return: whether or not a database has been (re)created
2.302294
2.275527
1.011763
# {% if database.is_none and example.is_message_board %} # app.router allows us to generate urls based on their names, # see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources message_url = request.app.router['messages'].url_for() ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content=.format(message_url=message_url) ) # {% else %} ctx = dict( title=request.app['name'], styles_css_url=request.app['static_root_url'] + '/styles.css', content="<p>Success! you've setup a basic aiohttp app.</p>", ) # {% endif %} # with the base web.Response type we have to manually set the content type, otherwise text/plain will be used. return web.Response(text=BASE_PAGE.format(**ctx), content_type='text/html')
async def index(request)
This is the view handler for the "/" url. **Note: returning html without a template engine like jinja2 is ugly, no way around that.** :param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request :return: aiohttp.web.Response object
5.454116
5.43157
1.004151
messages = [] # {% if database.is_none %} if request.app['settings'].MESSAGE_FILE.exists(): # read the message file, process it and populate the "messages" list with request.app['settings'].MESSAGE_FILE.open() as msg_file: for line in msg_file: if not line: # ignore blank lines eg. end of file continue # split the line into it constituent parts, see process_form above username, ts, message = line.split('|', 2) # parse the datetime string and render it in a more readable format. ts = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%f')) messages.append({'username': username, 'timestamp': ts, 'message': message}) messages.reverse() # {% elif database.is_pg_sqlalchemy %} async with request.app['pg_engine'].acquire() as conn: async for row in conn.execute(sa_messages.select().order_by(sa_messages.c.timestamp.desc())): ts = '{:%Y-%m-%d %H:%M:%S}'.format(row.timestamp) messages.append({'username': row.username, 'timestamp': ts, 'message': row.message}) # {% endif %} return json_response(messages)
async def message_data(request)
As an example of aiohttp providing a non-html response, we load the actual messages for the "messages" view above via ajax using this endpoint to get data. see static/message_display.js for details of rendering.
3.277727
3.136764
1.044939
return str(URL( database=settings.DB_NAME, password=settings.DB_PASSWORD, host=settings.DB_HOST, port=settings.DB_PORT, username=settings.DB_USER, drivername='postgres', ))
def pg_dsn(settings: Settings) -> str
:param settings: settings including connection settings :return: DSN url suitable for sqlalchemy and aiopg.
2.994002
2.516082
1.189946
setup_logging(verbose) run_app(*serve_static(static_path=path, livereload=livereload, port=port))
def serve(path, livereload, port, verbose)
Serve static files from a directory.
6.48475
6.474376
1.001602
active_config = {k: v for k, v in config.items() if v is not None} setup_logging(config['verbose']) try: run_app(*_runserver(**active_config)) except AiohttpDevException as e: if config['verbose']: tb = click.style(traceback.format_exc().strip('\n'), fg='white', dim=True) main_logger.warning('AiohttpDevException traceback:\n%s', tb) main_logger.error('Error: %s', e) sys.exit(2)
def runserver(**config)
Run a development server for an aiohttp apps. Takes one argument "app-path" which should be a path to either a directory containing a recognized default file ("app.py" or "main.py") or to a specific file. Defaults to the environment variable "AIO_APP_PATH" or ".". The app path is run directly, see the "--app-factory" option for details on how an app is loaded from a python module.
4.193582
4.497646
0.932395
setup_logging(verbose) try: check_dir_clean(Path(path)) if name is None: name = Path(path).name for kwarg_name, choice_enum in DECISIONS: docs = dedent(choice_enum.__doc__).split('\n') title, *help_text = filter(bool, docs) click.secho('\n' + title, fg='green') if kwargs[kwarg_name] is None: click.secho('\n'.join(help_text), dim=True) choices = _display_enum_choices(choice_enum) kwargs[kwarg_name] = click.prompt( 'choose which {} to use {}'.format(kwarg_name, choices), type=EnumChoice(choice_enum), show_default=False, default=enum_default(choice_enum), ) click.echo('using: {}'.format(click.style(kwargs[kwarg_name], bold=True))) continue StartProject(path=path, name=name, **kwargs) except AiohttpDevException as e: main_logger.error('Error: %s', e) sys.exit(2)
def start(*, path, name, verbose, **kwargs)
Create a new aiohttp app.
4.366308
4.261542
1.024584
rel_py_file = self.py_file.relative_to(self.python_path) module_path = '.'.join(rel_py_file.with_suffix('').parts) sys.path.append(str(self.python_path)) try: module = import_module(module_path) except ImportError as e: raise AdevConfigError('error importing "{}" ' 'from "{}": {}'.format(module_path, self.python_path, e)) from e logger.debug('successfully loaded "%s" from "%s"', module_path, self.python_path) if self.app_factory_name is None: try: self.app_factory_name = next(an for an in APP_FACTORY_NAMES if hasattr(module, an)) except StopIteration as e: raise AdevConfigError('No name supplied and no default app factory ' 'found in {s.py_file.name}'.format(s=self)) from e else: logger.debug('found default attribute "%s" in module "%s"', self.app_factory_name, module) try: attr = getattr(module, self.app_factory_name) except AttributeError as e: raise AdevConfigError('Module "{s.py_file.name}" ' 'does not define a "{s.app_factory_name}" attribute/class'.format(s=self)) from e self.watch_path = self.watch_path or Path(module.__file__).parent return attr
def import_app_factory(self)
Import attribute/class from from a python module. Raise AdevConfigError if the import failed. :return: (attribute, Path object for directory of file)
2.813577
2.618619
1.07445
# force a full reload in sub processes so they load an updated version of code, this must be called only once set_start_method('spawn') config = Config(**config_kwargs) config.import_app_factory() loop = asyncio.get_event_loop() loop.run_until_complete(check_port_open(config.main_port, loop)) aux_app = create_auxiliary_app( static_path=config.static_path_str, static_url=config.static_url, livereload=config.livereload, ) main_manager = AppTask(config, loop) aux_app.on_startup.append(main_manager.start) aux_app.on_shutdown.append(main_manager.close) if config.static_path: static_manager = LiveReloadTask(config.static_path, loop) logger.debug('starting livereload to watch %s', config.static_path_str) aux_app.on_startup.append(static_manager.start) aux_app.on_shutdown.append(static_manager.close) url = 'http://{0.host}:{0.aux_port}'.format(config) logger.info('Starting aux server at %s ◆', url) if config.static_path: rel_path = config.static_path.relative_to(os.getcwd()) logger.info('serving static files from ./%s/ at %s%s', rel_path, url, config.static_url) return aux_app, config.aux_port, loop, AuxAccessLogger
def runserver(**config_kwargs)
Prepare app ready to run development server. :param config_kwargs: see config.Config for more details :return: tuple (auxiliary app, auxiliary app port, event loop)
4.040883
3.900947
1.035872
log_level = 'DEBUG' if verbose else 'INFO' return { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { 'format': '[%(asctime)s] %(message)s', 'datefmt': '%H:%M:%S', 'class': 'aiohttp_devtools.logs.DefaultFormatter', }, 'no_ts': { 'format': '%(message)s', 'class': 'aiohttp_devtools.logs.DefaultFormatter', }, 'aiohttp': { 'format': '%(message)s', 'class': 'aiohttp_devtools.logs.AccessFormatter', }, }, 'handlers': { 'default': { 'level': log_level, 'class': 'aiohttp_devtools.logs.HighlightStreamHandler', 'formatter': 'default' }, 'no_ts': { 'level': log_level, 'class': 'aiohttp_devtools.logs.HighlightStreamHandler', 'formatter': 'no_ts' }, 'aiohttp_access': { 'level': log_level, 'class': 'aiohttp_devtools.logs.HighlightStreamHandler', 'formatter': 'aiohttp' }, 'aiohttp_server': { 'class': 'aiohttp_devtools.logs.HighlightStreamHandler', 'formatter': 'aiohttp' }, }, 'loggers': { rs_dft_logger.name: { 'handlers': ['default'], 'level': log_level, }, rs_aux_logger.name: { 'handlers': ['default'], 'level': log_level, }, tools_logger.name: { 'handlers': ['default'], 'level': log_level, }, main_logger.name: { 'handlers': ['no_ts'], 'level': log_level, }, 'aiohttp.access': { 'handlers': ['aiohttp_access'], 'level': log_level, 'propagate': False, }, 'aiohttp.server': { 'handlers': ['aiohttp_server'], 'level': log_level, }, }, }
def log_config(verbose: bool) -> dict
Setup default config. for dictConfig. :param verbose: level: DEBUG if True, INFO if False :return: dict suitable for ``logging.config.dictConfig``
1.612044
1.612017
1.000016
def _scenario(func, *args, **kw): _check_coroutine(func) if weight > 0: sname = name or func.__name__ data = {'name': sname, 'weight': weight, 'delay': delay, 'func': func, 'args': args, 'kw': kw} _SCENARIO[sname] = data @functools.wraps(func) def __scenario(*args, **kw): return func(*args, **kw) return __scenario return _scenario
def scenario(weight=1, delay=0.0, name=None)
Decorator to register a function as a Molotov test. Options: - **weight** used by Molotov when the scenarii are randomly picked. The functions with the highest values are more likely to be picked. Integer, defaults to 1. This value is ignored when the *scenario_picker* decorator is used. - **delay** once the scenario is done, the worker will sleep *delay* seconds. Float, defaults to 0. The general --delay argument you can pass to Molotov will be summed with this delay. - **name** name of the scenario. If not provided, will use the function __name___ attribute. The decorated function receives an :class:`aiohttp.ClientSession` instance.
2.643283
3.377786
0.782549
req = functools.partial(_request, endpoint, verb, session_options, **options) return _run_in_fresh_loop(req)
def request(endpoint, verb='GET', session_options=None, **options)
Performs a synchronous request. Uses a dedicated event loop and aiohttp.ClientSession object. Options: - endpoint: the endpoint to call - verb: the HTTP verb to use (defaults: GET) - session_options: a dict containing options to initialize the session (defaults: None) - options: extra options for the request (defaults: None) Returns a dict object with the following keys: - content: the content of the response - status: the status - headers: a dict with all the response headers
7.09925
11.811403
0.601051
req = functools.partial(_request, endpoint, verb, session_options, json=True, **options) return _run_in_fresh_loop(req)
def json_request(endpoint, verb='GET', session_options=None, **options)
Like :func:`molotov.request` but extracts json from the response.
6.508356
5.265905
1.235942
if name not in _VARS and factory is not None: _VARS[name] = factory() return _VARS.get(name)
def get_var(name, factory=None)
Gets a global variable given its name. If factory is not None and the variable is not set, factory is a callable that will set the variable. If not set, returns None.
3.204886
4.024689
0.796307
if scenario is None: scenario = pick_scenario(self.wid, step_id) try: await self.send_event('scenario_start', scenario=scenario) await scenario['func'](session, *scenario['args'], **scenario['kw']) await self.send_event('scenario_success', scenario=scenario) if scenario['delay'] > 0.: await cancellable_sleep(scenario['delay']) return 1 except Exception as exc: await self.send_event('scenario_failure', scenario=scenario, exception=exc) if self.args.verbose > 0: self.console.print_error(exc) await self.console.flush() return -1
async def step(self, step_id, session, scenario=None)
single scenario call. When it returns 1, it works. -1 the script failed, 0 the test is stopping or needs to stop.
4.013882
3.922611
1.023268
parser = argparse.ArgumentParser(description='Github-based load test') parser.add_argument('--version', action='store_true', default=False, help='Displays version and exits.') parser.add_argument('--virtualenv', type=str, default='virtualenv', help='Virtualenv executable.') parser.add_argument('--python', type=str, default=sys.executable, help='Python executable.') parser.add_argument('--config', type=str, default='molotov.json', help='Path of the configuration file.') parser.add_argument('repo', help='Github repo', type=str, nargs="?") parser.add_argument('run', help='Test to run', nargs="?") args = parser.parse_args() if args.version: print(__version__) sys.exit(0) tempdir = tempfile.mkdtemp() curdir = os.getcwd() os.chdir(tempdir) print('Working directory is %s' % tempdir) try: clone_repo(args.repo) config_file = os.path.join(tempdir, args.config) with open(config_file) as f: config = json.loads(f.read()) # creating the virtualenv create_virtualenv(args.virtualenv, args.python) # install deps if 'requirements' in config['molotov']: install_reqs(config['molotov']['requirements']) # load deps into sys.path pyver = '%d.%d' % (sys.version_info.major, sys.version_info.minor) site_pkg = os.path.join(tempdir, 'venv', 'lib', 'python' + pyver, 'site-packages') site.addsitedir(site_pkg) pkg_resources.working_set.add_entry(site_pkg) # environment if 'env' in config['molotov']: for key, value in config['molotov']['env'].items(): os.environ[key] = value run_test(**config['molotov']['tests'][args.run]) except Exception: os.chdir(curdir) shutil.rmtree(tempdir, ignore_errors=True) raise
def main()
Moloslave clones a git repo and runs a molotov test
2.386853
2.243498
1.063898
errors = [] def onerror(function, path, excinfo): if show_warnings: print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1]) errors.append((function, path, excinfo)) if os.path.exists(directory): if not os.path.isdir(directory): raise NotADirectoryError(directory) shutil.rmtree(directory, onerror=onerror) return errors
def remove_directory(directory, show_warnings=True)
Deletes a directory and its contents. Returns a list of errors in form (function, path, excinfo).
2.522339
2.152481
1.171828
try: os.makedirs(target_directory) except: # TODO: specific exception? pass for f in source_files: source = os.path.join(source_directory, f) if source_directory else f target = os.path.join(target_directory, f) shutil.copy2(source, target)
def copy_files(source_files, target_directory, source_directory=None)
Copies a list of files to the specified directory. If source_directory is provided, it will be prepended to each source file.
2.284193
2.317869
0.985471
while True: print message, '(yes/no)', line = raw_input() if line is None: return None line = line.lower() if line == 'y' or line == 'ye' or line == 'yes': return True if line == 'n' or line == 'no': return False
def yes_or_no(message)
Gets user input and returns True for yes and False for no.
2.455924
2.402636
1.022179