code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: func_str = aliasing[func] except KeyError: if callable(func): return func else: if func_str in implementations: return func_str if func_str.startswith('nan') and \ func_str[3:] in funcs_no_separate_nan: raise ValueError("%s does not have a nan-version".format(func_str[3:])) else: raise NotImplementedError("No such function available") raise ValueError("func %s is neither a valid function string nor a " "callable object".format(func))
def get_func(func, aliasing, implementations)
Return the key of a found implementation or the func itself
5.513145
5.437521
1.013908
def check_type(x, dtype): try: converted = dtype.type(x) except (ValueError, OverflowError): return False # False if some overflow has happened return converted == x or np.isnan(x) def type_loop(x, dtype, dtype_dict, default=None): while True: try: dtype = np.dtype(dtype_dict[dtype.name]) if check_type(x, dtype): return np.dtype(dtype) except KeyError: if default is not None: return np.dtype(default) raise ValueError("Can not determine dtype of %r" % x) dtype = np.dtype(dtype) if check_type(x, dtype): return dtype if np.issubdtype(dtype, np.inexact): return type_loop(x, dtype, _next_float_dtype) else: return type_loop(x, dtype, _next_int_dtype, default=np.float32)
def minimum_dtype(x, dtype=np.bool_)
returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.
3.043203
2.98662
1.018946
if n.ndim != 1: raise ValueError("n is supposed to be 1d array.") n_mask = n.astype(bool) n_cumsum = np.cumsum(n) ret = np.ones(n_cumsum[-1] + 1, dtype=int) ret[n_cumsum[n_mask]] -= n[n_mask] ret[0] -= 1 return np.cumsum(ret)[:-1]
def multi_arange(n)
By example: # 0 1 2 3 4 5 6 7 8 n = [0, 0, 3, 0, 0, 2, 0, 2, 1] res = [0, 1, 2, 0, 1, 0, 1, 0] That is it is equivalent to something like this : hstack((arange(n_i) for n_i in n)) This version seems quite a bit faster, at least for some possible inputs, and at any rate it encapsulates a task in a function.
2.908028
2.959193
0.98271
if X.ndim != 1: raise ValueError("this is for 1d masks only.") is_start = np.empty(len(X), dtype=bool) is_start[0] = X[0] # True if X[0] is True or non-zero if X.dtype.kind == 'b': is_start[1:] = ~X[:-1] & X[1:] M = X else: M = X.astype(bool) is_start[1:] = X[:-1] != X[1:] is_start[~M] = False L = np.cumsum(is_start) L[~M] = 0 return L
def label_contiguous_1d(X)
WARNING: API for this function is not liable to change!!! By example: X = [F T T F F T F F F T T T] result = [0 1 1 0 0 2 0 0 0 3 3 3] Or: X = [0 3 3 0 0 5 5 5 1 1 0 2] result = [0 1 1 0 0 2 2 2 3 3 0 4] The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X`` is a boolean array, each contiguous block of ``True`` is given an integer label, if ``X`` is not boolean, then each contiguous block of identical values is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1 and increase by 1 for each block with no skipped numbers.)
3.621922
3.449126
1.050098
keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool) keep_group[0] = True keep_group[group_idx] = True return relabel_groups_masked(group_idx, keep_group)
def relabel_groups_unique(group_idx)
See also ``relabel_groups_masked``. keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4] Description of above: unique groups in input was ``1,2,3,5``, i.e. ``4`` was missing, so group 5 was relabled to be ``4``. Relabeling maintains order, just "compressing" the higher numbers to fill gaps.
2.864803
2.25065
1.272878
keep_group = keep_group.astype(bool, copy=not keep_group[0]) if not keep_group[0]: # ensuring keep_group[0] is True makes life easier keep_group[0] = True relabel = np.zeros(keep_group.size, dtype=group_idx.dtype) relabel[keep_group] = np.arange(np.count_nonzero(keep_group)) return relabel[group_idx]
def relabel_groups_masked(group_idx, keep_group)
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] 0 1 2 3 4 5 keep_group: [0 1 0 1 1 1] ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4] Description of above in words: remove group 2, and relabel group 3,4, and 5 to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used in the input group_idx, but the user supplied mask said to keep group 4, so group 5 is only moved up by one place to fill the gap created by removing group 2. That is, the mask describes which groups to remove, the remaining groups are relabled to remove the gaps created by the falsy elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers to the zero group which cannot be "removed". ``keep_group`` should be bool and ``group_idx`` int. Values in ``group_idx`` can be any order, and
3.456149
3.516739
0.982771
if fill_value is not None and not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be None, a scalar or an empty " "sequence") order_group_idx = np.argsort(group_idx, kind='mergesort') counts = np.bincount(group_idx, minlength=size) ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1]) ret = np.asanyarray(ret) if fill_value is None or np.isscalar(fill_value): _fill_untouched(group_idx, ret, fill_value) return ret
def _array(group_idx, a, size, fill_value, dtype=None)
groups a into separate arrays, keeping the order intact.
2.817027
2.694956
1.045296
groups = _array(group_idx, a, size, ()) ret = np.full(size, fill_value, dtype=dtype or np.float64) for i, grp in enumerate(groups): if np.ndim(grp) == 1 and len(grp) > 0: ret[i] = func(grp) return ret
def _generic_callable(group_idx, a, size, fill_value, dtype=None, func=lambda g: g, **kwargs)
groups a by inds, and then applies foo to each group in turn, placing the results in an array.
3.432287
3.700979
0.9274
sortidx = np.argsort(group_idx, kind='mergesort') invsortidx = np.argsort(sortidx, kind='mergesort') group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] a_srt_cumsum = np.cumsum(a_srt, dtype=dtype) increasing = np.arange(len(a), dtype=int) group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt] a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts] return a_srt_cumsum[invsortidx]
def _cumsum(group_idx, a, size, fill_value=None, dtype=None)
N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
2.638185
2.925824
0.90169
untouched = np.ones_like(ret, dtype=bool) untouched[idx] = False ret[untouched] = fill_value
def _fill_untouched(idx, ret, fill_value)
any elements of ret not indexed by idx are set to fill_value.
2.419864
2.330592
1.038305
extrafuncs = {'allnan': allnan, 'anynan': anynan, 'first': itemgetter(0), 'last': itemgetter(-1), 'nanfirst': nanfirst, 'nanlast': nanlast} func = kwargs.pop('func') func = extrafuncs.get(func, func) if isinstance(func, str): raise NotImplementedError("Grouploop needs to be called with a function") return aggregate_numpy.aggregate(*args, func=lambda x: func(x), **kwargs)
def aggregate_grouploop(*args, **kwargs)
wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.
4.318792
4.103914
1.052359
dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value != 1: ret[group_idx] = 1 # product should start from 1 np.multiply.at(ret, group_idx, a) return ret
def _prod(group_idx, a, size, fill_value, dtype=None)
Same as aggregate_numpy.py
3.888837
3.82761
1.015996
varnames = ['group_idx', 'a', 'ret', 'counter'] codebase = c_base_reverse if reverse else c_base iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname] if scalar: varnames.remove('a') return codebase % dict(init=c_init(varnames), iter=iteration, finish=c_finish.get(funcname, ''), ri_redir=(c_ri_redir if nans else c_ri))
def c_func(funcname, reverse=False, nans=False, scalar=False)
Fill c_funcs with constructed code from the templates
8.116943
7.933195
1.023162
ilen = step_count(group_idx) + 1 indices = np.empty(ilen, int) indices[0] = 0 indices[-1] = group_idx.size inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args) return indices
def step_indices(group_idx)
Get the edges of areas within group_idx, which are filled with the same value
5.393188
6.065235
0.889197
# [1] # return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6]) # [2] s = 1 / self.density return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)], size=size, p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)])
def __create_proj_mat(self, size)
Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
2.954024
2.586419
1.142129
all_genres = ['Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'] n_genre = len(all_genres) movies = {} if size == '100k': with open(os.path.join(data_home, 'u.item'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('|'), f.readlines())) for line in lines: movie_vec = np.zeros(n_genre) for i, flg_chr in enumerate(line[-n_genre:]): if flg_chr == '1': movie_vec[i] = 1. movie_id = int(line[0]) movies[movie_id] = movie_vec elif size == '1m': with open(os.path.join(data_home, 'movies.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('::'), f.readlines())) for item_id_str, title, genres in lines: movie_vec = np.zeros(n_genre) for genre in genres.split('|'): i = all_genres.index(genre) movie_vec[i] = 1. item_id = int(item_id_str) movies[item_id] = movie_vec return movies
def load_movies(data_home, size)
Load movie genres as a context. Returns: dict of movie vectors: item_id -> numpy array (n_genre,)
1.711679
1.659708
1.031313
ages = [1, 18, 25, 35, 45, 50, 56, 999] users = {} if size == '100k': all_occupations = ['administrator', 'artist', 'doctor', 'educator', 'engineer', 'entertainment', 'executive', 'healthcare', 'homemaker', 'lawyer', 'librarian', 'marketing', 'none', 'other', 'programmer', 'retired', 'salesman', 'scientist', 'student', 'technician', 'writer'] with open(os.path.join(data_home, 'u.user'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('|'), f.readlines())) for user_id_str, age_str, sex_str, occupation_str, zip_code in lines: user_vec = np.zeros(1 + 1 + 21) # 1 categorical, 1 value, 21 categorical user_vec[0] = 0 if sex_str == 'M' else 1 # sex # age (ML1M is "age group", but 100k has actual "age") age = int(age_str) for i in range(7): if age >= ages[i] and age < ages[i + 1]: user_vec[1] = i break user_vec[2 + all_occupations.index(occupation_str)] = 1 # occupation (1-of-21) users[int(user_id_str)] = user_vec elif size == '1m': with open(os.path.join(data_home, 'users.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('::'), f.readlines())) for user_id_str, sex_str, age_str, occupation_str, zip_code in lines: user_vec = np.zeros(1 + 1 + 21) # 1 categorical, 1 value, 21 categorical user_vec[0] = 0 if sex_str == 'M' else 1 # sex user_vec[1] = ages.index(int(age_str)) # age group (1, 18, ...) user_vec[2 + int(occupation_str)] = 1 # occupation (1-of-21) users[int(user_id_str)] = user_vec return users
def load_users(data_home, size)
Load user demographics as contexts.User ID -> {sex (M/F), age (7 groupd), occupation(0-20; 21)} Returns: dict of user vectors: user_id -> numpy array (1+1+21,); (sex_flg + age_group + n_occupation, )
2.301309
2.170929
1.060057
if size == '100k': with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines())) elif size == '1m': with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines())) ratings = [] for l in lines: # Since we consider positive-only feedback setting, ratings < 5 will be excluded. if l[2] == 5: ratings.append(l) ratings = np.asarray(ratings) # sorted by timestamp return ratings[np.argsort(ratings[:, 3])]
def load_ratings(data_home, size)
Load all samples in the dataset.
2.459126
2.463173
0.998357
delta = 0 if opt == 'm': while True: mdays = monthrange(d1.year, d1.month)[1] d1 += timedelta(days=mdays) if d1 <= d2: delta += 1 else: break else: delta = (d2 - d1).days return delta
def delta(d1, d2, opt='d')
Compute difference between given 2 dates in month/day.
2.579663
2.288958
1.127003
vec = np.zeros(sum(dims)) offset = 0 for seed, dim in zip(seeds, dims): vec[offset:(offset + dim)] = feature_hash(feature, dim, seed) offset += dim return vec
def n_feature_hash(feature, dims, seeds)
N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`.
2.945976
3.367965
0.874705
vec = np.zeros(dim) i = mmh3.hash(feature, seed) % dim vec[i] = 1 return vec
def feature_hash(feature, dim, seed=123)
Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`.
4.142386
4.412919
0.938695
tp = 0 for r in recommend: if r in truth: tp += 1 return tp
def count_true_positive(truth, recommend)
Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives.
2.369937
4.30554
0.550439
if len(truth) == 0: if len(recommend) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(truth.size)
def recall(truth, recommend, k=None)
Recall@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Recall@k.
2.966065
3.305001
0.897447
if len(recommend) == 0: if len(truth) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(k)
def precision(truth, recommend, k=None)
Precision@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Precision@k.
2.607529
2.978189
0.875542
if len(truth) == 0: if len(recommend) == 0: return 1. return 0. tp = accum = 0. for n in range(recommend.size): if recommend[n] in truth: tp += 1. accum += (tp / (n + 1.)) return accum / truth.size
def average_precision(truth, recommend)
Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP.
2.922138
3.164897
0.923297
tp = correct = 0. for r in recommend: if r in truth: # keep track number of true positives placed before tp += 1. else: correct += tp # number of all possible tp-fp pairs pairs = tp * (recommend.size - tp) # if there is no TP (or no FP), it's meaningless for this metric (i.e., AUC=0.5) if pairs == 0: return 0.5 return correct / pairs
def auc(truth, recommend)
Area under the ROC curve (AUC). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AUC.
7.308402
8.729234
0.837233
for n in range(recommend.size): if recommend[n] in truth: return 1. / (n + 1) return 0.
def reciprocal_rank(truth, recommend)
Reciprocal Rank (RR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: RR.
3.423032
4.429127
0.772846
if len(recommend) == 0 and len(truth) == 0: return 0. # best elif len(truth) == 0 or len(truth) == 0: return 100. # worst accum = 0. n_recommend = recommend.size for t in truth: r = np.where(recommend == t)[0][0] / float(n_recommend) accum += r return accum * 100. / truth.size
def mpr(truth, recommend)
Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR.
3.295807
3.475816
0.948211
if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: continue dcg += 1. / np.log2(n + 2) res_idcg = idcg(np.min([truth.size, k])) if res_idcg == 0.: return 0. return dcg / res_idcg
def ndcg(truth, recommend, k=None)
Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG.
2.432622
2.702034
0.900293
# number of observed users self.n_user = 0 # store user data self.users = {} # number of observed items self.n_item = 0 # store item data self.items = {}
def initialize(self, *args)
Initialize a recommender by resetting stored users and items.
3.97389
3.176641
1.250972
self.users[user.index] = {'known_items': set()} self.n_user += 1
def register_user(self, user)
For new users, append their information into the dictionaries. Args: user (User): User.
12.633882
14.049193
0.89926
sorted_indices = np.argsort(scores) if rev: sorted_indices = sorted_indices[::-1] return candidates[sorted_indices], scores[sorted_indices]
def scores2recos(self, scores, candidates, rev=False)
Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores).
2.564451
3.125538
0.820483
# make initial status for batch training for e in train_events: self.__validate(e) self.rec.users[e.user.index]['known_items'].add(e.item.index) self.item_buffer.append(e.item.index) # for batch evaluation, temporarily save new users info for e in test_events: self.__validate(e) self.item_buffer.append(e.item.index) self.__batch_update(train_events, test_events, n_epoch) # batch test events are considered as a new observations; # the model is incrementally updated based on them before the incremental evaluation step for e in test_events: self.rec.users[e.user.index]['known_items'].add(e.item.index) self.rec.update(e)
def fit(self, train_events, test_events, n_epoch=1)
Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training.
5.539002
5.112032
1.083523
for i, e in enumerate(test_events): self.__validate(e) # target items (all or unobserved depending on a detaset) unobserved = set(self.item_buffer) if not self.repeat: unobserved -= self.rec.users[e.user.index]['known_items'] # item i interacted by user u must be in the recommendation candidate # even if it is a new item unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) # make top-{at} recommendation for the 1001 items start = time.clock() recos, scores = self.__recommend(e, candidates) recommend_time = (time.clock() - start) rank = np.where(recos == e.item.index)[0][0] # Step 2: update the model with the observed event self.rec.users[e.user.index]['known_items'].add(e.item.index) start = time.clock() self.rec.update(e) update_time = (time.clock() - start) self.item_buffer.append(e.item.index) # (top-1 score, where the correct item is ranked, rec time, update time) yield scores[0], rank, recommend_time, update_time
def evaluate(self, test_events)
Iterate recommend/update procedure and compute incremental recall. Args: test_events (list of Event): Positive test events. Returns: list of tuples: (rank, recommend time, update time)
6.064999
5.606741
1.081733
for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching) if n_epoch != 1: np.random.shuffle(train_events) # train for e in train_events: self.rec.update(e, batch_train=True) # test MPR = self.__batch_evaluate(test_events) if self.debug: logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))
def __batch_update(self, train_events, test_events, n_epoch)
Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training.
5.472105
5.961437
0.917917
percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): # check if the data allows users to interact the same items repeatedly unobserved = all_items if not self.repeat: # make recommendation for all unobserved items unobserved -= self.rec.users[e.user.index]['known_items'] # true item itself must be in the recommendation candidates unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) recos, scores = self.__recommend(e, candidates) pos = np.where(recos == e.item.index)[0][0] percentiles[i] = pos / (len(recos) - 1) * 100 return np.mean(percentiles)
def __batch_evaluate(self, test_events)
Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set.
5.91242
5.910385
1.000344
'''Scale X values to new width''' if type(values) == dict: values = self._scale_x_values_timestamps(values=values, max_width=max_width) adjusted_values = list(values) if len(adjusted_values) > max_width: def get_position(current_pos): return len(adjusted_values) * current_pos // max_width adjusted_values = [statistics.mean(adjusted_values[get_position(i):get_position(i + 1)]) for i in range(max_width)] return adjusted_values
def _scale_x_values(self, values, max_width)
Scale X values to new width
3.516097
3.262423
1.077756
'''Scale X values to new width based on timestamps''' first_timestamp = float(values[0][0]) last_timestamp = float(values[-1][0]) step_size = (last_timestamp - first_timestamp) / max_width values_by_column = [[] for i in range(max_width)] for timestamp, value in values: if value is None: continue timestamp = float(timestamp) column = (timestamp - first_timestamp) // step_size column = int(min(column, max_width - 1)) # Don't go beyond the last column values_by_column[column].append(value) adjusted_values = [statistics.mean(values) if values else 0 for values in values_by_column] # Average each column, 0 if no values return adjusted_values
def _scale_x_values_timestamps(self, values, max_width)
Scale X values to new width based on timestamps
2.944258
2.745307
1.072469
''' Take values and transmute them into a new range ''' # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max(values) new_min = 0 OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same NewRange = (new_max - new_min) # max_height is new_max for old_value in values: new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min scaled_values.append(new_value) return scaled_values
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True)
Take values and transmute them into a new range
3.573125
3.119069
1.145574
'''Create a representation of an ascii graph using two lists in this format: field[x][y] = "char"''' empty_space = ' ' # This formats as field[x][y] field = [[empty_space for y in range(max(values) + 1)] for x in range(len(values))] # Draw graph into field for x in range(len(values)): y = values[x] y_prev = values[x - 1] if x - 1 in range(len(values)) else y y_next = values[x + 1] if x + 1 in range(len(values)) else y # Fill empty space if abs(y_prev - y) > 1: # Fill space between y and y_prev step = 1 if y_prev - y > 0 else -1 # We don't want the first item to be inclusive, so we use step instead of y+1 since step can be negative for h in range(y + step, y_prev, step): if field[x][h] is empty_space: field[x][h] = '|' # Assign the character to be placed into the graph char = self._assign_ascii_character(y_prev, y, y_next) field[x][y] = char return field
def _get_ascii_field(self, values)
Create a representation of an ascii graph using two lists in this format: field[x][y] = "char"
4.396677
3.433986
1.280342
char = '-' elif y_next < y and y_prev < y: char = '-' elif y_prev < y and y == y_next: char = '-' elif y_prev == y and y_next < y: char = '-' elif y_next > y: char = '/' elif y_next < y: char = '\\' elif y_prev < y: char = '/' elif y_prev > y: char = '\\' elif y_next == y: char = '-' elif y == y_prev: char = '-' return char
def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity '''Assign the character to be placed into the graph''' char = '?' if y_next > y and y_prev > y
Assign the character to be placed into the graph
2.23483
1.925216
1.160821
'''Draw graph from field double nested list, format field[x][y] = char''' row_strings = [] for y in range(len(field[0])): row = '' for x in range(len(field)): row += field[x][y] row_strings.insert(0, row) graph_string = '\n'.join(row_strings) return graph_string
def _draw_ascii_graph(self, field)
Draw graph from field double nested list, format field[x][y] = char
4.007404
2.408173
1.664085
''' Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. ''' result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values if isinstance(values, dict): time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps start_timestamp = time_series_sorted[0][0] end_timestamp = time_series_sorted[-1][0] start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime() end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime() values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width) values = [value for value in values if value is not None] if not max_height: max_height = min(20, max(values)) stdev = statistics.stdev(values) mean = statistics.mean(values) # Do value adjustments adjusted_values = list(values) adjusted_values = self._scale_x_values(values=values, max_width=max_width) upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see lower_value = min(adjusted_values) adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False) adjusted_values = self._round_floats_to_ints(values=adjusted_values) # Obtain Ascii Graph String field = self._get_ascii_field(adjusted_values) graph_string = self._draw_ascii_graph(field=field) # Label the graph if label: top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char) result += top_label + '\n' result += '{graph_string}\n'.format(graph_string=graph_string) if label: lower = f'Lower value: {lower_value:.2f} ' stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******' fill_length = max_width - len(lower) - len(stats) stat_label = f'{lower}{"*" * fill_length}{stats}\n' result += stat_label if start_ctime and end_ctime: fill_length = max_width - len(start_ctime) - len(end_ctime) result += f'{start_ctime}{" " * fill_length}{end_ctime}\n' return result
def asciigraph(self, values=None, max_height=None, max_width=None, label=False)
Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
3.570115
3.008437
1.186701
if isinstance(expression, Pattern): expression = expression.expression return _substitute(expression, substitution)[0]
def substitute(expression: Union[Expression, Pattern], substitution: Substitution) -> Replacement
Replaces variables in the given *expression* using the given *substitution*. >>> print(substitute(f(x_), {'x': a})) f(a) If nothing was substituted, the original expression is returned: >>> expression = f(x_) >>> result = substitute(expression, {'y': a}) >>> print(result) f(x_) >>> expression is result True Note that this function returns a list of expressions iff the expression is a variable and its substitution is a list of expressions. In other cases were a substitution is a list of expressions, the expressions will be integrated as operands in the surrounding operation: >>> print(substitute(f(x_, c), {'x': [a, b]})) f(a, b, c) If you substitute with a `Multiset` of values, they will be sorted: >>> replacement = Multiset([b, a, b]) >>> print(substitute(f(x_, c), {'x': replacement})) f(a, b, b, c) Parameters: expression: An expression in which variables are substituted. substitution: A substitution dictionary. The key is the name of the variable, the value either an expression or a list of expression to use as a replacement for the variable. Returns: The expression resulting from applying the substitution.
5.443305
10.321874
0.527356
r if len(position) == 0: return replacement if not isinstance(expression, Operation): raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression)) if position[0] >= op_len(expression): raise IndexError("Position {!r} out of range for expression {!s}".format(position, expression)) pos = position[0] operands = list(op_iter(expression)) subexpr = replace(operands[pos], position[1:], replacement) if isinstance(subexpr, Sequence): new_operands = tuple(operands[:pos]) + tuple(subexpr) + tuple(operands[pos + 1:]) return create_operation_expression(expression, new_operands) operands[pos] = subexpr return create_operation_expression(expression, operands)
def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement
r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>> print(replace(f(a), (0, ), [b, c])) f(b, c) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. position: A tuple of indices, e.g. the empty tuple refers to the `expression` itself, `(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first child of the first child etc. replacement: Either an :class:`Expression` or a list of :class:`Expression`\s to be inserted into the `expression` instead of the original expression at that `position`. Returns: The resulting expression from the replacement. Raises: IndexError: If the position is invalid or out of range.
2.950181
2.799921
1.053666
r if len(replacements) == 0: return expression replacements = sorted(replacements) if len(replacements[0][0]) == 0: if len(replacements) > 1: raise IndexError( "Cannot replace child positions for expression {}, got {!r}".format(expression, replacements[1:]) ) return replacements[0][1] if len(replacements) == 1: return replace(expression, replacements[0][0], replacements[0][1]) if not isinstance(expression, Operation): raise IndexError("Invalid replacements {!r} for expression {!s}".format(replacements, expression)) operands = list(op_iter(expression)) new_operands = [] last_index = 0 for index, group in itertools.groupby(replacements, lambda r: r[0][0]): new_operands.extend(operands[last_index:index]) replacements = [(pos[1:], r) for pos, r in group] if len(replacements) == 1: replacement = replace(operands[index], replacements[0][0], replacements[0][1]) else: replacement = replace_many(operands[index], replacements) if isinstance(replacement, (list, tuple, Multiset)): new_operands.extend(replacement) else: new_operands.append(replacement) last_index = index + 1 new_operands.extend(operands[last_index:len(operands)]) return create_operation_expression(expression, new_operands)
def replace_many(expression: Expression, replacements: Sequence[Tuple[Sequence[int], Replacement]]) -> Replacement
r"""Replaces the subexpressions of *expression* at the given positions with the given replacements. The original *expression* itself is not modified, but a modified copy is returned. If the replacement is a sequence of expressions, it will be expanded into the list of operands of the respective operation. This function works the same as `replace`, but allows multiple positions to be replaced at the same time. However, compared to just replacing each position individually with `replace`, this does work when positions are modified due to replacing a position with a sequence: >>> expr = f(a, b) >>> expected_result = replace_many(expr, [((0, ), [c, c]), ((1, ), a)]) >>> print(expected_result) f(c, c, a) However, using `replace` for one position at a time gives the wrong result: >>> step1 = replace(expr, (0, ), [c, c]) >>> print(step1) f(c, c, b) >>> step2 = replace(step1, (1, ), a) >>> print(step2) f(c, a, b) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. replacements: A collection of tuples consisting of a position in the expression and a replacement for that position. With just a single replacement pair, this is equivalent to using `replace`: >>> replace(a, (), b) == replace_many(a, [((), b)]) True Returns: The resulting expression from the replacements. Raises: IndexError: If a position is invalid or out of range or if you try to replace a subterm of a term you are already replacing.
2.378458
2.393391
0.993761
rules = [ReplacementRule(pattern, replacement) for pattern, replacement in rules] expression = expression replaced = True replace_count = 0 while replaced and replace_count < max_count: replaced = False for subexpr, pos in preorder_iter_with_position(expression): for pattern, replacement in rules: try: subst = next(match(subexpr, pattern)) result = replacement(**subst) expression = replace(expression, pos, result) replaced = True break except StopIteration: pass if replaced: break replace_count += 1 return expression
def replace_all(expression: Expression, rules: Iterable[ReplacementRule], max_count: int=math.inf) \ -> Union[Expression, Sequence[Expression]]
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule.
3.019305
3.572991
0.845036
return _replace_all_post_order(expression, rules)[0]
def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) \ -> Union[Expression, Sequence[Expression]]
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule.
4.550606
13.261083
0.343155
return any(True for _ in match(subject, pattern))
def is_match(subject: Expression, pattern: Expression) -> bool
Check whether the given *subject* matches given *pattern*. Args: subject: The subject. pattern: The pattern. Returns: True iff the subject matches the pattern.
15.064775
21.282749
0.70784
raise ImportError('The graphviz package is required to draw the graph.') graph = Graph() nodes_left = {} # type: Dict[TLeft, str] nodes_right = {} # type: Dict[TRight, str] node_id = 0 for (left, right), value in self._edges.items(): if left not in nodes_left: name = 'node{:d}'.format(node_id) nodes_left[left] = name graph.node(name, label=str(left)) node_id += 1 if right not in nodes_right: name = 'node{:d}'.format(node_id) nodes_right[right] = name graph.node(name, label=str(right)) node_id += 1 edge_label = value is not True and str(value) or '' graph.edge(nodes_left[left], nodes_right[right], edge_label) return graph
def as_graph(self) -> Graph: # pragma: no cover if Graph is None
Returns a :class:`graphviz.Graph` representation of this bipartite graph.
2.129218
1.989867
1.07003
# The directed graph is represented as a dictionary of edges # The key is the tail of all edges which are represented by the value # The value is a set of heads for the all edges originating from the tail (key) # In addition, the graph stores which part of the bipartite graph a node originated from # to avoid problems when a value exists in both halfs. # Only one direction of the undirected edge is needed for the HopcroftKarp class directed_graph = {} # type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]] for (left, right) in self._edges: tail = (LEFT, left) head = (RIGHT, right) if tail not in directed_graph: directed_graph[tail] = {head} else: directed_graph[tail].add(head) matching = HopcroftKarp(directed_graph).maximum_matching() # Filter out the partitions (LEFT and RIGHT) and only return the matching edges # that go from LEFT to RIGHT return dict((tail[1], head[1]) for tail, head in matching.items() if tail[0] == LEFT)
def find_matching(self) -> Dict[TLeft, TRight]
Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part.
5.04389
4.945477
1.0199
return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1])
def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]'
Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed.
2.4347
2.419386
1.006329
return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]'
Returns a copy of this bipartite graph with the given edge removed.
4.55515
4.609358
0.98824
return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right)
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]'
Returns the induced subgraph where only the nodes from the given sets are included.
2.733119
2.744363
0.995903
raise ImportError('The graphviz package is required to draw the graph.') graph = Digraph() subgraphs = [Digraph(graph_attr={'rank': 'same'}), Digraph(graph_attr={'rank': 'same'})] nodes = [{}, {}] # type: List[Dict[Union[TLeft, TRight], str]] edges = [] # type: List [Tuple[str, str]] node_id = 0 for (tail_part, tail), head_set in self.items(): if tail not in nodes[tail_part]: name = 'node{:d}'.format(node_id) nodes[tail_part][tail] = name subgraphs[tail_part].node(name, label=str(tail)) node_id += 1 for head_part, head in head_set: if head not in nodes[head_part]: name = 'node{:d}'.format(node_id) nodes[head_part][head] = name subgraphs[head_part].node(name, label=str(head)) node_id += 1 edges.append((nodes[tail_part][tail], nodes[head_part][head])) graph.subgraph(subgraphs[0]) graph.subgraph(subgraphs[1]) for tail_node, head_node in edges: graph.edge(tail_node, head_node) return graph
def as_graph(self) -> Digraph: # pragma: no cover if Digraph is None
Returns a :class:`graphviz.Digraph` representation of this directed match graph.
2.251864
2.206907
1.020371
if isinstance(expression, Wildcard): return False if isinstance(expression, Expression): return expression.is_constant if isinstance(expression, Operation): return all(is_constant(o) for o in op_iter(expression)) return True
def is_constant(expression)
Check if the given expression is constant, i.e. it does not contain Wildcards.
3.489348
3.078031
1.13363
if isinstance(expression, Wildcard): return expression.fixed_size if isinstance(expression, Expression): return expression.is_syntactic if isinstance(expression, (AssociativeOperation, CommutativeOperation)): return False if isinstance(expression, Operation): return all(is_syntactic(o) for o in op_iter(expression)) return True
def is_syntactic(expression)
Check if the given expression is syntactic, i.e. it does not contain sequence wildcards or associative/commutative operations.
3.65999
3.160872
1.157905
if isinstance(expression, Wildcard): if isinstance(expression, SymbolWildcard): return expression.symbol_type return None return type(expression)
def get_head(expression)
Returns the given expression's head.
5.411252
4.986569
1.085165
if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) assert subject_head is not None return issubclass(subject_head, pattern_head)
def match_head(subject, pattern)
Checks if the head of subject matches the pattern's head.
3.389587
3.384139
1.00161
yield expression if isinstance(expression, Operation): for operand in op_iter(expression): yield from preorder_iter(operand)
def preorder_iter(expression)
Iterate over the expression in preorder.
4.088847
3.870757
1.056343
yield expression, () if isinstance(expression, Operation): for i, operand in enumerate(op_iter(expression)): for child, pos in preorder_iter_with_position(operand): yield child, (i, ) + pos
def preorder_iter_with_position(expression)
Iterate over the expression in preorder. Also yields the position of each subexpression.
4.052983
4.653429
0.870967
if hasattr(expression, 'variable_name') and expression.variable_name: return False if isinstance(expression, Operation): return all(is_anonymous(o) for o in op_iter(expression)) return True
def is_anonymous(expression)
Returns True iff the expression does not contain any variables.
3.926522
3.66506
1.071339
if hasattr(expression, 'variable_name') and expression.variable_name in variables: return True if isinstance(expression, Operation): return any(contains_variables_from_set(o, variables) for o in op_iter(expression)) return False
def contains_variables_from_set(expression, variables)
Returns True iff the expression contains any of the variables from the given set.
3.055142
3.018142
1.012259
if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name) if isinstance(expression, Operation): for operand in op_iter(expression): get_variables(operand, variables) return variables
def get_variables(expression, variables=None)
Returns the set of variable names in the given expression.
2.463719
2.306363
1.068227
if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression
Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables.
2.552687
2.661186
0.959229
if vector_sum < 0: raise ValueError("Vector sum must not be negative") if len(max_vector) == 0: if vector_sum == 0: yield tuple() return total = sum(max_vector) if vector_sum <= total: start = max(max_vector[0] + vector_sum - total, 0) end = min(max_vector[0], vector_sum) for j in range(start, end + 1): for vec in fixed_integer_vector_iter(max_vector[1:], vector_sum - j): yield (j, ) + vec
def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]
Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2: >>> vectors = list(fixed_integer_vector_iter([2, 2], 2)) >>> vectors [(0, 2), (1, 1), (2, 0)] >>> list(map(sum, vectors)) [2, 2, 2] Args: max_vector: Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise. vector_sum: Every iterated vector will have a component sum equal to this value. Yields: All non-negative vectors that have the given sum and are not larger than the given maximum. Raises: ValueError: If *vector_sum* is negative.
2.011697
2.449595
0.821236
if n < 0: raise ValueError("Total must not be negative") if num_parts < 0: raise ValueError("Number of num_parts must not be negative") if num_parts == 0: if n == 0: yield tuple() return m = n + num_parts - 1 last = (m, ) first = (-1, ) for t in itertools.combinations(range(m), num_parts - 1): yield tuple(v - u - 1 for u, v in zip(first + t, t + last))
def weak_composition_iter(n: int, num_parts: int) -> Iterator[Tuple[int, ...]]
Yield all weak compositions of integer *n* into *num_parts* parts. Each composition is yielded as a tuple. The generated partitions are order-dependant and not unique when ignoring the order of the components. The partitions are yielded in lexicographical order. Example: >>> compositions = list(weak_composition_iter(5, 2)) >>> compositions [(0, 5), (1, 4), (2, 3), (3, 2), (4, 1), (5, 0)] We can easily verify that all compositions are indeed valid: >>> list(map(sum, compositions)) [5, 5, 5, 5, 5, 5] The algorithm was adapted from an answer to this `Stackoverflow question`_. Args: n: The integer to partition. num_parts: The number of parts for the combination. Yields: All non-negative tuples that have the given sum and size. Raises: ValueError: If *n* or *num_parts* are negative. .. _Stackoverflow question: http://stackoverflow.com/questions/40538923/40540014#40540014
3.334639
3.683947
0.905181
if len(variables) == 1: yield from _commutative_single_variable_partiton_iter(values, variables[0]) return generators = [] for value, count in values.items(): generators.append(_make_variable_generator_factory(value, count, variables)) initial = dict((var.name, Multiset()) for var in variables) # type: Dict[str, 'Multiset[T]'] for subst in generator_chain(initial, *generators): valid = True for var in variables: if var.default is not None and len(subst[var.name]) == 0: subst[var.name] = var.default elif len(subst[var.name]) < var.minimum: valid = False break if valid: if None in subst: del subst[None] yield subst
def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount] ) -> Iterator[Dict[str, Multiset]]
Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the following input parameters for the partitioning: >>> x = VariableWithCount(name='x', count=1, minimum=1, default=None) >>> y = VariableWithCount(name='y', count=2, minimum=0, default=None) >>> values = Multiset('aaabbc') Then the solutions are found (and sorted to get a unique output): >>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y]) >>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions) >>> for substitution in sorted(as_strings): ... print(substitution) {x ↦ {a, a, a, b, b, c}, y ↦ {}} {x ↦ {a, a, a, c}, y ↦ {b}} {x ↦ {a, b, b, c}, y ↦ {a}} {x ↦ {a, c}, y ↦ {a, b}} Args: values: The multiset of values which are partitioned and distributed among the variables. variables: A list of the variables to distribute the values among. Each variable has a name, a count of how many times it occurs and a minimum number of values it needs. Yields: Each possible substitutions that is a valid partitioning of the values among the variables.
3.099422
3.145462
0.985363
try: all_source_lines, lnum = inspect.findsource(lambda_func) source_lines, _ = inspect.getsourcelines(lambda_func) except (IOError, TypeError): return None all_source_lines = [l.rstrip('\r\n') for l in all_source_lines] block_end = lnum + len(source_lines) source_ast = None for i in range(lnum, -1, -1): try: block = all_source_lines[i:block_end] if block[0].startswith(' ') or block[0].startswith('\t'): block.insert(0, 'with 0:') source_ast = ast.parse(os.linesep.join(block)) except (SyntaxError, tokenize.TokenError): pass else: break nv = LambdaNodeVisitor(block) nv.visit(source_ast) lambda_code = lambda_func.__code__ for candidate_code, lambda_text in nv.lambdas: candidate_code = candidate_code.co_consts[0] # We don't check for direct equivalence since the flags can be different if (candidate_code.co_code == lambda_code.co_code and candidate_code.co_consts == lambda_code.co_consts and candidate_code.co_names == lambda_code.co_names and candidate_code.co_varnames == lambda_code.co_varnames and candidate_code.co_cellvars == lambda_code.co_cellvars and candidate_code.co_freevars == lambda_code.co_freevars): return lambda_text[lambda_text.index(':')+1:].strip() return None
def get_short_lambda_source(lambda_func: LambdaType) -> Optional[str]
Return the source of a (short) lambda function. If it's impossible to obtain, return ``None``. The source is returned without the ``lambda`` and signature parts: >>> get_short_lambda_source(lambda x, y: x < y) 'x < y' This should work well for most lambda definitions, however for multi-line or highly nested lambdas, the source extraction might not succeed. Args: lambda_func: The lambda function. Returns: The source of the lambda function without its signature.
2.602059
2.62796
0.990144
if b == 0: return (1, 0, a) x0, y0, d = extended_euclid(b, a % b) x, y = y0, x0 - (a // b) * y0 return (x, y, d)
def extended_euclid(a: int, b: int) -> Tuple[int, int, int]
Extended Euclidean algorithm that computes the Bézout coefficients as well as :math:`gcd(a, b)` Returns ``x, y, d`` where *x* and *y* are a solution to :math:`ax + by = d` and :math:`d = gcd(a, b)`. *x* and *y* are a minimal pair of Bézout's coefficients. See `Extended Euclidean algorithm <https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm>`_ or `Bézout's identity <https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity>`_ for more information. Example: Compute the Bézout coefficients and GCD of 42 and 12: >>> a, b = 42, 12 >>> x, y, d = extended_euclid(a, b) >>> x, y, d (1, -3, 6) Verify the results: >>> import math >>> d == math.gcd(a, b) True >>> a * x + b * y == d True Args: a: The first integer. b: The second integer. Returns: A tuple with the Bézout coefficients and the greatest common divider of the arguments.
1.87788
2.493742
0.753037
r if a <= 0 or b <= 0: raise ValueError('Coefficients a and b must be positive integers.') if c < 0: raise ValueError('Constant c must not be negative.') d = math.gcd(a, math.gcd(b, c)) a = a // d b = b // d c = c // d if c == 0: yield (0, 0) else: x0, y0, d = extended_euclid(a, b) # If c is not divisible by gcd(a, b), then there is no solution if c % d != 0: return x, y = c * x0, c * y0 if x <= 0: while y >= 0: if x >= 0: yield (x, y) x += b y -= a else: while x >= 0: if y >= 0: yield (x, y) x -= b y += a
def base_solution_linear(a: int, b: int, c: int) -> Iterator[Tuple[int, int]]
r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative solutions are generated by using that the general solution is :math:`(x_0 + b t, y_0 - a t)`. Because the base solution is one of the minimal pairs of Bézout's coefficients, for all non-negative solutions either :math:`t \geq 0` or :math:`t \leq 0` must hold. Also, all the non-negative solutions are consecutive with respect to :math:`t`. Hence, by adding or subtracting :math:`a` resp. :math:`b` from the base solution, all non-negative solutions can be efficiently generated. Args: a: The first coefficient of the equation. b: The second coefficient of the equation. c: The constant of the equation. Yields: Each non-negative integer solution of the equation as a tuple ``(x, y)``. Raises: ValueError: If any of the coefficients is not a positive integer.
2.460596
2.345567
1.049041
r if len(coeffs) == 0: if total == 0: yield tuple() return if len(coeffs) == 1: if total % coeffs[0] == 0: yield (total // coeffs[0], ) return if len(coeffs) == 2: yield from base_solution_linear(coeffs[0], coeffs[1], total) return # calculate gcd(coeffs[1:]) remainder_gcd = math.gcd(coeffs[1], coeffs[2]) for coeff in coeffs[3:]: remainder_gcd = math.gcd(remainder_gcd, coeff) # solve coeffs[0] * x + remainder_gcd * y = total for coeff0_solution, remainder_gcd_solution in base_solution_linear(coeffs[0], remainder_gcd, total): new_coeffs = [c // remainder_gcd for c in coeffs[1:]] # use the solutions for y to solve the remaining variables recursively for remainder_solution in solve_linear_diop(remainder_gcd_solution, *new_coeffs): yield (coeff0_solution, ) + remainder_solution
def solve_linear_diop(total: int, *coeffs: int) -> Iterator[Tuple[int, ...]]
r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`.
2.777324
2.580874
1.076118
generator_count = len(factories) if generator_count == 0: yield initial_data return generators = [None] * generator_count # type: List[Optional[Iterator[T]]] next_data = initial_data generator_index = 0 while True: try: while generator_index < generator_count: if generators[generator_index] is None: generators[generator_index] = factories[generator_index](next_data) next_data = next(generators[generator_index]) generator_index += 1 yield next_data generator_index -= 1 except StopIteration: generators[generator_index] = None generator_index -= 1 if generator_index < 0: break
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]
Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the next factory. Example: Lets say for every number from 0 to 4, we want to count up to that number. Then we can do something like this using list comprehensions: >>> [i for n in range(1, 5) for i in range(1, n + 1)] [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] You can use this function to achieve the same thing: >>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1)))) [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] The advantage is, that this is independent of the number of dependant generators you have. Also, this function does not use recursion so it is safe to use even with large generator counts. Args: initial_data: The initial data that is passed to the first generator factory. *factories: The generator factories. Each of them gets passed its predecessors data and has to return an iterable. The data from this iterable is passed to the next factory. Yields: Every data item yielded by the generators of the final factory.
1.841482
2.09144
0.880485
if variable_name not in self: self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement else: existing_value = self[variable_name] if isinstance(existing_value, tuple): if isinstance(replacement, Multiset): if Multiset(existing_value) != replacement: raise ValueError elif replacement != existing_value: raise ValueError elif isinstance(existing_value, Multiset): if not isinstance(replacement, (tuple, list, Multiset)): raise ValueError compare_value = Multiset(replacement) if existing_value == compare_value: if not isinstance(replacement, Multiset): self[variable_name] = replacement else: raise ValueError elif replacement != existing_value: raise ValueError
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None
Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name.
2.565126
2.3594
1.087194
new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution'
Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable.
3.891778
4.692647
0.829335
if getattr(pattern, 'variable_name', False): try: self.try_add_variable(pattern.variable_name, subject) except ValueError: return False return True elif isinstance(pattern, expressions.Operation): assert isinstance(subject, type(pattern)) assert op_len(subject) == op_len(pattern) op_expression = cast(expressions.Operation, subject) for subj, patt in zip(op_iter(op_expression), op_iter(pattern)): if not self.extract_substitution(subj, patt): return False return True
def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool
Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all the substitutions for the variables can be unified. So, in case it returns ``False``, the substitution is invalid for the match. ..warning:: This method mutates the substitution and will even do so in case the extraction fails. Create a copy before using this method if you need to preserve the original substitution. Example: With an empty initial substitution and a linear pattern, the extraction will always succeed: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, y_)) True >>> print(subst) {x ↦ a, y ↦ b} Clashing values for existing variables will fail: >>> subst.extract_substitution(b, x_) False For non-linear patterns, the extraction can also fail with an empty substitution: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, x_)) False >>> print(subst) {x ↦ a} Note that the initial substitution got mutated even though the extraction failed! Args: subject: A :term:`syntactic` subject that matches the pattern. pattern: A :term:`syntactic` pattern that matches the subject. Returns: ``True`` iff the substitution could be extracted successfully.
3.563608
3.901681
0.913352
new_subst = Substitution(self) for other in others: for variable_name, replacement in other.items(): new_subst.try_add_variable(variable_name, replacement) return new_subst
def union(self, *others: 'Substitution') -> 'Substitution'
Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError: if a variable occurs in multiple substitutions but cannot be merged because the substitutions conflict.
3.277776
4.089457
0.801519
return Substitution((renaming.get(name, name), value) for name, value in self.items())
def rename(self, renaming: Dict[str, str]) -> 'Substitution'
Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy of the substitution where variable names have been replaced according to the given renaming dictionary. Names that are not contained in the dictionary are left unchanged.
4.30852
7.639821
0.563956
return isinstance(term, type) and issubclass(term, Operation)
def is_operation(term: Any) -> bool
Return True iff the given term is a subclass of :class:`.Operation`.
6.873792
3.346395
2.054088
return isinstance(term, type) and issubclass(term, Symbol)
def is_symbol_wildcard(term: Any) -> bool
Return True iff the given term is a subclass of :class:`.Symbol`.
8.980778
3.654099
2.457727
return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None)
def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]
Return the transition target for the given symbol type from the the given state or None if it does not exist.
5.329668
4.58464
1.162505
return term.name + '(' elif is_symbol_wildcard(term): return '*{!s}'.format(term.__name__) elif isinstance(term, Wildcard): return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or '') elif term == Wildcard: return '*' else: return str(term)
def _term_str(term: TermAtom) -> str: # pragma: no cover if is_operation(term)
Return a string representation of a term atom.
4.924381
4.605997
1.069124
for term in self._terms: if isinstance(term, Wildcard) and not term.fixed_size: return False if is_operation(term) and issubclass(term, (AssociativeOperation, CommutativeOperation)): return False return True
def is_syntactic(self)
True, iff the flatterm is :term:`syntactic`.
5.982324
5.369843
1.114059
return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm'
Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms.
15.701852
22.73834
0.690545
if isinstance(expression, Operation): yield type(expression) for operand in op_iter(expression): yield from cls._flatterm_iter(operand) yield OPERATION_END elif isinstance(expression, SymbolWildcard): yield expression.symbol_type elif isinstance(expression, (Symbol, Wildcard)): yield expression else: assert False, "Unreachable unless a new unsupported expression type is added."
def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]
Generator that yields the atoms of the expressions in prefix notation with operation end markers.
5.073122
4.509571
1.124968
last_wildcard = None # type: Optional[Wildcard] for term in flatterm: if isinstance(term, Wildcard) and not isinstance(term, SymbolWildcard): if last_wildcard is not None: new_min_count = last_wildcard.min_count + term.min_count new_fixed_size = last_wildcard.fixed_size and term.fixed_size last_wildcard = Wildcard(new_min_count, new_fixed_size) else: last_wildcard = Wildcard(term.min_count, term.fixed_size) else: if last_wildcard is not None: yield last_wildcard last_wildcard = None yield term if last_wildcard is not None: yield last_wildcard
def _combined_wildcards_iter(flatterm: Iterator[TermAtom]) -> Iterator[TermAtom]
Combine consecutive wildcards in a flatterm into a single one.
1.98983
1.891322
1.052085
labels = set() # type: Set[TransitionLabel] if self.state1 is not None and self.fixed != 1: labels.update(self.state1.keys()) if self.state2 is not None and self.fixed != 2: labels.update(self.state2.keys()) if self.fixed != 0: if self.fixed == 1 and self.state2 is None: labels.add(OPERATION_END) elif self.fixed == 2 and self.state1 is None: labels.add(OPERATION_END) labels.add(Wildcard) return labels
def labels(self) -> Set[TransitionLabel]
Return the set of transition labels to examine for this queue state. This is the union of the transition label sets for both states. However, if one of the states is fixed, it is excluded from this union and a wildcard transition is included instead. Also, when already in a failed state (one of the states is ``None``), the :const:`OPERATION_END` is also included.
2.631496
2.11056
1.246824
index = len(self._patterns) self._patterns.append((pattern, final_label)) flatterm = FlatTerm(pattern.expression) if not isinstance(pattern, FlatTerm) else pattern if flatterm.is_syntactic or len(flatterm) == 1: net = self._generate_syntactic_net(flatterm, index) else: net = self._generate_net(flatterm, index) if self._root: self._root = self._product_net(self._root, net) else: self._root = net return index
def add(self, pattern: Union[Pattern, FlatTerm], final_label: T=None) -> int
Add a pattern to the discrimination net. Args: pattern: The pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to a `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly. final_label: A label that is returned if the pattern matches when using :meth:`match`. This will default to the pattern itself. Returns: The index of the newly added pattern. This is used internally to later to get the pattern and its final label once a match is found.
3.330115
3.288321
1.01271
# Capture the last sequence wildcard for every level of operation nesting on a stack # Used to add backtracking edges in case the "match" fails later last_wildcards = [None] # Generate a fail state for every level of nesting to backtrack to a sequence wildcard in a parent Expression # in case no match can be found fail_states = [None] operand_counts = [0] root = state = _State() states = {root.id: root} for term in flatterm: if operand_counts[-1] >= 0: operand_counts[-1] += 1 # For wildcards, generate a chain of #min_count Wildcard edges # If the wildcard is unbounded (fixed_size = False), # add a wildcard self loop at the end if isinstance(term, Wildcard): # Generate a chain of #min_count Wildcard edges for _ in range(term.min_count): state = cls._create_child_state(state, Wildcard) states[state.id] = state # If it is a sequence wildcard, add a self loop if not term.fixed_size: state[Wildcard] = state last_wildcards[-1] = state operand_counts[-1] = -1 else: state = cls._create_child_state(state, term) states[state.id] = state if is_operation(term): fail_state = None if last_wildcards[-1] or fail_states[-1]: last_fail_state = ( fail_states[-1] if not isinstance(fail_states[-1], list) else fail_states[-1][operand_counts[-1]] ) if term.arity.fixed_size: fail_state = _State() states[fail_state.id] = fail_state new_fail_states = [fail_state] for _ in range(term.arity.min_count): new_fail_state = _State() states[new_fail_state.id] = new_fail_state fail_state[Wildcard] = new_fail_state fail_state = new_fail_state new_fail_states.append(new_fail_state) fail_state[OPERATION_END] = last_wildcards[-1] or last_fail_state fail_state = new_fail_states else: fail_state = _State() states[fail_state.id] = fail_state fail_state[OPERATION_END] = last_wildcards[-1] or last_fail_state fail_state[Wildcard] = fail_state fail_states.append(fail_state) last_wildcards.append(None) operand_counts.append(0) elif term == OPERATION_END: fail_states.pop() last_wildcards.pop() operand_counts.pop() if last_wildcards[-1] != state: if last_wildcards[-1]: state[EPSILON] = last_wildcards[-1] elif fail_states[-1]: last_fail_state = ( fail_states[-1] if not isinstance(fail_states[-1], list) else fail_states[-1][operand_counts[-1]] ) state[EPSILON] = last_fail_state state.payload = [final_label] return cls._convert_nfa_to_dfa(root, states)
def _generate_net(cls, flatterm: FlatTerm, final_label: T) -> _State[T]
Generates a DFA matching the given pattern.
3.021951
2.991003
1.010347
for index in self._match(subject): pattern, label = self._patterns[index] subst = Substitution() if subst.extract_substitution(subject, pattern.expression): for constraint in pattern.constraints: if not constraint(subst): break else: yield label, subst
def match(self, subject: Union[Expression, FlatTerm]) -> Iterator[Tuple[T, Substitution]]
Match the given subject against all patterns in the net. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(final label, substitution)`, where the first component is the final label associated with the pattern as given when using :meth:`add()` and the second one is the match substitution.
5.441216
5.550204
0.980363
try: next(self.match(subject)) except StopIteration: return False return True
def is_match(self, subject: Union[Expression, FlatTerm]) -> bool
Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject.
5.941684
5.193182
1.144132
raise ImportError('The graphviz package is required to draw the graph.') dot = Digraph() nodes = set() queue = [self._root] while queue: state = queue.pop(0) if not state.payload: dot.node('n{!s}'.format(state.id), '', {'shape': ('circle' if state else 'doublecircle')}) else: dot.node('n{!s}'.format(state.id), '\n'.join(map(str, state.payload)), {'shape': 'box'}) for next_state in state.values(): if next_state.id not in nodes: queue.append(next_state) nodes.add(state.id) nodes = set() queue = [self._root] while queue: state = queue.pop(0) if state.id in nodes: continue nodes.add(state.id) for (label, other) in state.items(): dot.edge('n{!s}'.format(state.id), 'n{!s}'.format(other.id), _term_str(label)) if other.id not in nodes: queue.append(other) return dot
def as_graph(self) -> Digraph: # pragma: no cover if Digraph is None
Renders the discrimination net as graphviz digraph.
2.632157
2.607529
1.009445
inner = pattern.expression if self.operation is None: if not isinstance(inner, Operation) or isinstance(inner, CommutativeOperation): raise TypeError("Pattern must be a non-commutative operation.") self.operation = type(inner) elif not isinstance(inner, self.operation): raise TypeError( "All patterns must be the same operation, expected {} but got {}".format(self.operation, type(inner)) ) if op_len(inner) < 3: raise ValueError("Pattern has not enough operands.") operands = list(op_iter(inner)) first_name = self._check_wildcard_and_get_name(operands[0]) last_name = self._check_wildcard_and_get_name(operands[-1]) index = len(self._patterns) self._patterns.append((pattern, first_name, last_name)) flatterm = FlatTerm.merged(*(FlatTerm(o) for o in operands[1:-1])) self._net.add(flatterm, index) return index
def add(self, pattern: Pattern) -> int
Add a pattern that will be recognized by the matcher. Args: pattern: The pattern to add. Returns: An internal index for the pattern. Raises: ValueError: If the pattern does not have the correct form. TypeError: If the pattern is not a non-commutative operation.
4.110425
3.830736
1.073012
if not isinstance(pattern.expression, Operation) or isinstance(pattern.expression, CommutativeOperation): return False if op_len(pattern.expression) < 3: return False first, *_, last = op_iter(pattern.expression) try: cls._check_wildcard_and_get_name(first) cls._check_wildcard_and_get_name(last) except ValueError: return False return True
def can_match(cls, pattern: Pattern) -> bool
Check if a pattern can be matched with a sequence matcher. Args: pattern: The pattern to check. Returns: True, iff the pattern can be matched with a sequence matcher.
4.771484
4.757958
1.002843
if not isinstance(subject, self.operation): return subjects = list(op_iter(subject)) flatterms = [FlatTerm(o) for o in subjects] for i in range(len(flatterms)): flatterm = FlatTerm.merged(*flatterms[i:]) for index in self._net._match(flatterm, collect=True): match_index = self._net._patterns[index][1] pattern, first_name, last_name = self._patterns[match_index] operand_count = op_len(pattern.expression) - 2 expr_operands = subjects[i:i + operand_count] patt_operands = list(op_iter(pattern.expression))[1:-1] substitution = Substitution() if not all(itertools.starmap(substitution.extract_substitution, zip(expr_operands, patt_operands))): continue try: if first_name is not None: substitution.try_add_variable(first_name, tuple(subjects[:i])) if last_name is not None: substitution.try_add_variable(last_name, tuple(subjects[i + operand_count:])) except ValueError: continue for constraint in pattern.constraints: if not constraint(substitution): break else: yield pattern, substitution
def match(self, subject: Expression) -> Iterator[Tuple[Pattern, Substitution]]
Match the given subject against all patterns in the sequence matcher. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(pattern, substitution)` for every matching pattern.
4.245328
4.402036
0.964401
r if not is_constant(subject): raise ValueError("The subject for matching must be constant.") global_constraints = [c for c in pattern.constraints if not c.variables] local_constraints = set(c for c in pattern.constraints if c.variables) for subst in _match([subject], pattern.expression, Substitution(), local_constraints): for constraint in global_constraints: if not constraint(subst): break else: yield subst
def match(subject: Expression, pattern: Pattern) -> Iterator[Substitution]
r"""Tries to match the given *pattern* to the given *subject*. Yields each match in form of a substitution. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible match substitutions. Raises: ValueError: If the subject is not constant.
4.507358
4.336548
1.039388
if not is_constant(subject): raise ValueError("The subject for matching must be constant.") for child, pos in preorder_iter_with_position(subject): if match_head(child, pattern): for subst in match(child, pattern): yield subst, pos
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]
Tries to match the given *pattern* to the any subexpression of the given *subject*. Yields each match in form of a substitution and a position tuple. The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself, :code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of the first child etc. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible substitution and position pairs. Raises: ValueError: If the subject is not constant.
5.505833
5.127566
1.073771
i = 0 var_index = 0 opt_index = 0 result = [] for operand in op_iter(operation): wrap_associative = False if isinstance(operand, Wildcard): count = operand.min_count if operand.optional is None else 0 if not operand.fixed_size or isinstance(operation, AssociativeOperation): count += sequence_var_partition[var_index] var_index += 1 wrap_associative = operand.fixed_size and operand.min_count elif operand.optional is not None: count = optional_parts[opt_index] opt_index += 1 else: count = 1 operand_expressions = list(op_iter(subjects))[i:i + count] i += count if wrap_associative and len(operand_expressions) > wrap_associative: fixed = wrap_associative - 1 operand_expressions = tuple(operand_expressions[:fixed]) + ( create_operation_expression(operation, operand_expressions[fixed:]), ) result.append(operand_expressions) return result
def _build_full_partition( optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation ) -> List[Sequence[Expression]]
Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned).
3.525845
3.71761
0.948417
for _ in self._match(self.matcher.root): yield list(self._internal_iter())
def grouped(self)
Yield the matches grouped by their final state in the automaton, i.e. structurally identical patterns only differing in constraints will be yielded together. Each group is yielded as a list of tuples consisting of a pattern and a match substitution. Yields: The grouped matches.
23.196154
21.159632
1.096246
if label is None: label = pattern for i, (p, l, _) in enumerate(self.patterns): if pattern == p and label == l: return i # TODO: Avoid renaming in the pattern, use variable indices instead renaming = self._collect_variable_renaming(pattern.expression) if self.rename else {} self._internal_add(pattern, label, renaming)
def add(self, pattern: Pattern, label=None) -> None
Add a new pattern to the matcher. The optional label defaults to the pattern itself and is yielded during matching. The same pattern can be added with different labels which means that every match for the pattern will result in every associated label being yielded with that match individually. Equivalent patterns with the same label are not added again. However, patterns that are structurally equivalent, but have different constraints or different variable names are distinguished by the matcher. Args: pattern: The pattern to add. label: An optional label for the pattern. Defaults to the pattern itself.
7.323541
7.072753
1.035458