code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if semitone > 0 and semitone < 128: self.pianoroll[:, semitone:] = self.pianoroll[:, :(128 - semitone)] self.pianoroll[:, :semitone] = 0 elif semitone < 0 and semitone > -128: self.pianoroll[:, :(128 + semitone)] = self.pianoroll[:, -semitone:] self.pianoroll[:, (128 + semitone):] = 0
def transpose(self, semitone)
Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose the pianoroll.
1.815887
1.756106
1.034042
length = self.get_active_length() self.pianoroll = self.pianoroll[:length]
def trim_trailing_silence(self)
Trim the trailing silence of the pianoroll.
6.649867
4.331671
1.535174
W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[0]: break axes[r, c].imshow(W[i, feature_map], cmap='gray', interpolation='none') return plt
def plot_conv_weights(layer, figsize=(6, 6))
Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer
2.272535
2.451734
0.92691
if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_activity(x) shape = activity.shape nrows = np.ceil(np.sqrt(shape[1])).astype(int) ncols = nrows figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize, squeeze=False) axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray', interpolation='none') axes[0, ncols // 2].set_title('original') for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([]) ax.axis('off') for i, (r, c) in enumerate(product(range(nrows), range(ncols))): if i >= shape[1]: break ndim = activity[0][i].ndim if ndim != 2: raise ValueError("Wrong number of dimensions, image data should " "have 2, instead got {}".format(ndim)) axes[r + 1, c].imshow(-activity[0][i], cmap='gray', interpolation='none') return plt
def plot_conv_activity(layer, x, figsize=(6, 8))
Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1.
2.865567
2.825922
1.014029
if (x.ndim != 4) or x.shape[0] != 1: raise ValueError("This function requires the input data to be of " "shape (1, c, x, y), instead got {}".format(x.shape)) if square_length % 2 == 0: raise ValueError("Square length has to be an odd number, instead " "got {}.".format(square_length)) num_classes = get_output_shape(net.layers_[-1])[1] img = x[0].copy() bs, col, s0, s1 = x.shape heat_array = np.zeros((s0, s1)) pad = square_length // 2 + 1 x_occluded = np.zeros((s1, col, s0, s1), dtype=img.dtype) probs = np.zeros((s0, s1, num_classes)) # generate occluded images for i in range(s0): # batch s1 occluded images for faster prediction for j in range(s1): x_pad = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant') x_pad[:, i:i + square_length, j:j + square_length] = 0. x_occluded[j] = x_pad[:, pad:-pad, pad:-pad] y_proba = net.predict_proba(x_occluded) probs[i] = y_proba.reshape(s1, num_classes) # from predicted probabilities, pick only those of target class for i in range(s0): for j in range(s1): heat_array[i, j] = probs[i, j, target] return heat_array
def occlusion_heatmap(net, x, target, square_length=7)
An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. Results ------- heat_array : np.array (with same size as image) An 2D np.array that at each point (i, j) contains the predicted probability of the correct class if the image is occluded by a square with center (i, j).
2.884256
2.756663
1.046285
return _plot_heat_map( net, X, figsize, lambda net, X, n: occlusion_heatmap( net, X, target[n], square_length))
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None))
Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7) The length of the side of the square that occludes the image. Must be an odd number. figsize : tuple (int, int) Size of the figure. Plots ----- Figure with 3 subplots: the original image, the occlusion heatmap, and both images super-imposed.
6.338583
8.628243
0.734632
COLORS = ['#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B', '#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416', '#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D', '#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12'] hashed = int(hash(layer_type)) % 5 if "conv" in layer_type.lower(): return COLORS[:5][hashed] if layer_type in lasagne.layers.pool.__all__: return COLORS[5:10][hashed] if layer_type in lasagne.layers.recurrent.__all__: return COLORS[10:15][hashed] else: return COLORS[15:20][hashed]
def get_hex_color(layer_type)
Determines the hex color for a layer. :parameters: - layer_type : string Class name of the layer :returns: - color : string containing a hex color for filling block.
5.432846
5.624865
0.965862
import pydotplus as pydot pydot_graph = pydot.Dot('Network', graph_type='digraph') pydot_nodes = {} pydot_edges = [] for i, layer in enumerate(layers): layer_name = getattr(layer, 'name', None) if layer_name is None: layer_name = layer.__class__.__name__ layer_type = '{0}'.format(layer_name) key = repr(layer) label = layer_type color = get_hex_color(layer_type) if verbose: for attr in ['num_filters', 'num_units', 'ds', 'filter_shape', 'stride', 'strides', 'p']: if hasattr(layer, attr): label += '\n{0}: {1}'.format(attr, getattr(layer, attr)) if hasattr(layer, 'nonlinearity'): try: nonlinearity = layer.nonlinearity.__name__ except AttributeError: nonlinearity = layer.nonlinearity.__class__.__name__ label += '\nnonlinearity: {0}'.format(nonlinearity) if output_shape: label += '\nOutput shape: {0}'.format(layer.output_shape) pydot_nodes[key] = pydot.Node( key, label=label, shape='record', fillcolor=color, style='filled') if hasattr(layer, 'input_layers'): for input_layer in layer.input_layers: pydot_edges.append([repr(input_layer), key]) if hasattr(layer, 'input_layer'): pydot_edges.append([repr(layer.input_layer), key]) for node in pydot_nodes.values(): pydot_graph.add_node(node) for edges in pydot_edges: pydot_graph.add_edge( pydot.Edge(pydot_nodes[edges[0]], pydot_nodes[edges[1]])) return pydot_graph
def make_pydot_graph(layers, output_shape=True, verbose=False)
:parameters: - layers : list List of the layers, as obtained from lasagne.layers.get_all_layers - output_shape: (default `True`) If `True`, the output shape of each layer will be displayed. - verbose: (default `False`) If `True`, layer attributes like filter shape, stride, etc. will be displayed. :returns: - pydot_graph : PyDot object containing the graph
1.925423
1.858983
1.03574
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) ext = filename[filename.rfind('.') + 1:] with io.open(filename, 'wb') as fid: fid.write(dot.create(format=ext))
def draw_to_file(layers, filename, **kwargs)
Draws a network diagram to a file :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - filename : string The filename to save output to - **kwargs: see docstring of make_pydot_graph for other options
3.234382
2.991383
1.081233
from IPython.display import Image layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers') else layers) dot = make_pydot_graph(layers, **kwargs) return Image(dot.create_png())
def draw_to_notebook(layers, **kwargs)
Draws a network diagram in an IPython notebook :parameters: - layers : list or NeuralNet instance List of layers or the neural net to draw. - **kwargs : see the docstring of make_pydot_graph for other options
3.869769
3.186751
1.21433
real_filter = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: real_filter[j] = img_size continue if is_conv2d(layer): if not first_conv_layer: new_filter = np.array(layer.filter_size) * expon real_filter[j] = new_filter else: new_filter = np.array(layer.filter_size) * expon real_filter[j] = new_filter first_conv_layer = False elif is_maxpool2d(layer): real_filter[j] = real_filter[i] expon *= np.array(layer.pool_size) else: conv_mode = False real_filter[j] = img_size real_filter[0] = img_size return real_filter
def get_real_filter(layers, img_size)
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
2.404897
2.381938
1.009639
receptive_field = np.zeros((len(layers), 2)) conv_mode = True first_conv_layer = True expon = np.ones((1, 2)) for i, layer in enumerate(layers[1:]): j = i + 1 if not conv_mode: receptive_field[j] = img_size continue if is_conv2d(layer): if not first_conv_layer: last_field = receptive_field[i] new_field = (last_field + expon * (np.array(layer.filter_size) - 1)) receptive_field[j] = new_field else: receptive_field[j] = layer.filter_size first_conv_layer = False elif is_maxpool2d(layer): receptive_field[j] = receptive_field[i] expon *= np.array(layer.pool_size) else: conv_mode = False receptive_field[j] = img_size receptive_field[0] = img_size return receptive_field
def get_receptive_field(layers, img_size)
Get the real filter sizes of each layer involved in convoluation. See Xudong Cao: https://www.kaggle.com/c/datasciencebowl/forums/t/13166/happy-lantern-festival-report-and-code This does not yet take into consideration feature pooling, padding, striding and similar gimmicks.
2.357003
2.372477
0.993478
from decaf.util import transform # soft dep _JEFFNET_FLIP = True # first, extract the 256x256 center. image = transform.scale_and_extract(transform.as_rgb(image), 256) # convert to [0,255] float32 image = image.astype(np.float32) * 255. if _JEFFNET_FLIP: # Flip the image if necessary, maintaining the c_contiguous order image = image[::-1, :].copy() # subtract the mean image -= self.net_._data_mean return image
def prepare_image(self, image)
Returns image of shape `(256, 256, 3)`, as expected by `transform` when `classify_direct = True`.
8.974966
8.861794
1.012771
# Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1])) for i, val in enumerate(actual): actual2[i, val] = 1 actual = actual2 clip = np.clip(predicted, eps, 1 - eps) rows = actual.shape[0] vsota = np.sum(actual * np.log(clip)) return -1.0 / rows * vsota
def multiclass_logloss(actual, predicted, eps=1e-15)
Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class
2.695222
2.863923
0.941094
if get_output_kw is None: get_output_kw = {} output_layer = layers[-1] network_output = get_output( output_layer, deterministic=deterministic, **get_output_kw) loss = aggregate(loss_function(network_output, target)) if l1: loss += regularization.regularize_layer_params( layers.values(), regularization.l1) * l1 if l2: loss += regularization.regularize_layer_params( layers.values(), regularization.l2) * l2 return loss
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None)
Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss
2.111102
2.269402
0.930246
if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs = self._create_iter_funcs( self.layers_, self.objective, self.update, self.y_tensor_type, ) self.train_iter_, self.eval_iter_, self.predict_iter_ = iter_funcs self._initialized = True
def initialize(self)
Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action.
5.457115
4.611377
1.183402
if layers is not None: self.layers = layers self.layers_ = Layers() #If a Layer, or a list of Layers was passed in if isinstance(self.layers[0], Layer): for out_layer in self.layers: for i, layer in enumerate(get_all_layers(out_layer)): if layer not in self.layers_.values(): name = layer.name or self._layer_name(layer.__class__, i) self.layers_[name] = layer if self._get_params_for(name) != {}: raise ValueError( "You can't use keyword params when passing a Lasagne " "instance object as the 'layers' parameter of " "'NeuralNet'." ) self._output_layers = self.layers return self.layers # 'self.layers' are a list of '(Layer class, kwargs)', so # we'll have to actually instantiate the layers given the # arguments: layer = None for i, layer_def in enumerate(self.layers): if isinstance(layer_def[1], dict): # Newer format: (Layer, {'layer': 'kwargs'}) layer_factory, layer_kw = layer_def layer_kw = layer_kw.copy() else: # The legacy format: ('name', Layer) layer_name, layer_factory = layer_def layer_kw = {'name': layer_name} if isinstance(layer_factory, str): layer_factory = locate(layer_factory) assert layer_factory is not None if 'name' not in layer_kw: layer_kw['name'] = self._layer_name(layer_factory, i) more_params = self._get_params_for(layer_kw['name']) layer_kw.update(more_params) if layer_kw['name'] in self.layers_: raise ValueError( "Two layers with name {}.".format(layer_kw['name'])) # Any layers that aren't subclasses of InputLayer are # assumed to require an 'incoming' paramter. By default, # we'll use the previous layer as input: try: is_input_layer = issubclass(layer_factory, InputLayer) except TypeError: is_input_layer = False if not is_input_layer: if 'incoming' in layer_kw: layer_kw['incoming'] = self.layers_[ layer_kw['incoming']] elif 'incomings' in layer_kw: layer_kw['incomings'] = [ self.layers_[name] for name in layer_kw['incomings']] else: layer_kw['incoming'] = layer # Deal with additional string parameters that may # reference other layers; currently only 'mask_input'. for param in self.layer_reference_params: if param in layer_kw: val = layer_kw[param] if isinstance(val, basestring): layer_kw[param] = self.layers_[val] for attr in ('W', 'b'): if isinstance(layer_kw.get(attr), str): name = layer_kw[attr] layer_kw[attr] = getattr(self.layers_[name], attr, None) try: layer_wrapper = layer_kw.pop('layer_wrapper', None) layer = layer_factory(**layer_kw) except TypeError as e: msg = ("Failed to instantiate {} with args {}.\n" "Maybe parameter names have changed?".format( layer_factory, layer_kw)) chain_exception(TypeError(msg), e) self.layers_[layer_kw['name']] = layer if layer_wrapper is not None: layer = layer_wrapper(layer) self.layers_["LW_%s" % layer_kw['name']] = layer self._output_layers = [layer] return [layer]
def initialize_layers(self, layers=None)
Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def`
3.187181
3.177203
1.00314
if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder() y = self.enc_.fit_transform(y).astype(np.int32) self.classes_ = self.enc_.classes_ self.initialize() try: self.train_loop(X, y, epochs=epochs) except KeyboardInterrupt: pass return self
def fit(self, X, y, epochs=None)
Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance
3.036812
3.190042
0.951966
return self.fit(X, y, epochs=1)
def partial_fit(self, X, y, classes=None)
Runs a single epoch using the provided data :return: This instance
8.373415
15.985553
0.523811
f = lambda: self.update_or_create(defaults=defaults, **kwargs)[0] ret = SimpleLazyObject(f) self._lazy_entries.append(ret) return ret
def _register(self, defaults=None, **kwargs)
Fetch (update or create) an instance, lazily. We're doing this lazily, so that it becomes possible to define custom enums in your code, even before the Django ORM is fully initialized. Domain.objects.SHOPPING = Domain.objects.register( ref='shopping', name='Webshop') Domain.objects.USERS = Domain.objects.register( ref='users', name='User Accounts')
6.178538
5.671319
1.089436
from_date = kwargs.pop('from_date', None) to_date = kwargs.pop('to_date', None) date = kwargs.pop('date', None) qs = self if from_date: qs = qs.filter(date__gte=from_date) if to_date: qs = qs.filter(date__lte=to_date) if date: qs = qs.filter(date=date) return super(ByDateQuerySetMixin, qs).narrow(**kwargs)
def narrow(self, **kwargs)
Up-to including
2.098675
2.075589
1.011122
if json_file_path: with open(json_file_path) as json_file: env_vars = json.loads(json_file.read()) export_variables(env_vars)
def set_environment_variables(json_file_path)
Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file :type json_file_path: str
2.543538
3.115645
0.816376
diff = end - start millis = diff.days * 24 * 60 * 60 * 1000 millis += diff.seconds * 1000 millis += diff.microseconds / 1000 return millis
def millis_interval(start, end)
start and end are datetime instances
1.597928
1.613813
0.990157
lua, lua_globals = Script._import_lua(self.load_dependencies) lua_globals.KEYS = self._python_to_lua(keys) lua_globals.ARGV = self._python_to_lua(args) def _call(*call_args): # redis-py and native redis commands are mostly compatible argument # wise, but some exceptions need to be handled here: if str(call_args[0]).lower() == 'lrem': response = client.call( call_args[0], call_args[1], call_args[3], # "count", default is 0 call_args[2]) else: response = client.call(*call_args) return self._python_to_lua(response) lua_globals.redis = {"call": _call} return self._lua_to_python(lua.execute(self.script), return_status=True)
def _execute_lua(self, keys, args, client)
Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script
5.42215
5.129649
1.057022
try: import lua except ImportError: raise RuntimeError("Lua not installed") lua_globals = lua.globals() if load_dependencies: Script._import_lua_dependencies(lua, lua_globals) return lua, lua_globals
def _import_lua(load_dependencies=True)
Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available
4.223494
4.508201
0.936847
if sys.platform not in ('darwin', 'windows'): import ctypes ctypes.CDLL('liblua5.2.so', mode=ctypes.RTLD_GLOBAL) try: lua_globals.cjson = lua.eval('require "cjson"') except RuntimeError: raise RuntimeError("cjson not installed")
def _import_lua_dependencies(lua, lua_globals)
Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib. Pending: - base lib. - table lib. - string lib. - math lib. - debug lib. - cmsgpack lib.
4.748038
4.961277
0.957019
import lua lua_globals = lua.globals() if lval is None: # Lua None --> Python None return None if lua_globals.type(lval) == "table": # Lua table --> Python list pval = [] for i in lval: if return_status: if i == 'ok': return lval[i] if i == 'err': raise ResponseError(lval[i]) pval.append(Script._lua_to_python(lval[i])) return pval elif isinstance(lval, long): # Lua number --> Python long return long(lval) elif isinstance(lval, float): # Lua number --> Python float return float(lval) elif lua_globals.type(lval) == "userdata": # Lua userdata --> Python string return str(lval) elif lua_globals.type(lval) == "string": # Lua string --> Python string return lval elif lua_globals.type(lval) == "boolean": # Lua boolean --> Python bool return bool(lval) raise RuntimeError("Invalid Lua type: " + str(lua_globals.type(lval)))
def _lua_to_python(lval, return_status=False)
Convert Lua object(s) into Python object(s), as at times Lua object(s) are not compatible with Python functions
2.141753
2.173976
0.985178
import lua if pval is None: # Python None --> Lua None return lua.eval("") if isinstance(pval, (list, tuple, set)): # Python list --> Lua table # e.g.: in lrange # in Python returns: [v1, v2, v3] # in Lua returns: {v1, v2, v3} lua_list = lua.eval("{}") lua_table = lua.eval("table") for item in pval: lua_table.insert(lua_list, Script._python_to_lua(item)) return lua_list elif isinstance(pval, dict): # Python dict --> Lua dict # e.g.: in hgetall # in Python returns: {k1:v1, k2:v2, k3:v3} # in Lua returns: {k1, v1, k2, v2, k3, v3} lua_dict = lua.eval("{}") lua_table = lua.eval("table") for k, v in pval.iteritems(): lua_table.insert(lua_dict, Script._python_to_lua(k)) lua_table.insert(lua_dict, Script._python_to_lua(v)) return lua_dict elif isinstance(pval, str): # Python string --> Lua userdata return pval elif isinstance(pval, bool): # Python bool--> Lua boolean return lua.eval(str(pval).lower()) elif isinstance(pval, (int, long, float)): # Python int --> Lua number lua_globals = lua.globals() return lua_globals.tonumber(str(pval)) raise RuntimeError("Invalid Python type: " + str(type(pval)))
def _python_to_lua(pval)
Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions
2.293434
2.287284
1.002689
return MockRedisLock(self, key, timeout, sleep)
def lock(self, key, timeout=0, sleep=0)
Emulate lock.
11.895388
10.62402
1.119669
# making sure the pattern is unicode/str. try: pattern = pattern.decode('utf-8') # This throws an AttributeError in python 3, or an # UnicodeEncodeError in python 2 except (AttributeError, UnicodeEncodeError): pass # Make regex out of glob styled pattern. regex = fnmatch.translate(pattern) regex = re.compile(re.sub(r'(^|[^\\])\.', r'\1[^/]', regex)) # Find every key that matches the pattern return [key for key in self.redis.keys() if regex.match(key.decode('utf-8'))]
def keys(self, pattern='*')
Emulate keys.
4.95464
4.969916
0.996926
key_counter = 0 for key in map(self._encode, keys): if key in self.redis: del self.redis[key] key_counter += 1 if key in self.timeouts: del self.timeouts[key] return key_counter
def delete(self, *keys)
Emulate delete.
3.115037
2.992405
1.040981
delta = delta if isinstance(delta, timedelta) else timedelta(seconds=delta) return self._expire(self._encode(key), delta)
def expire(self, key, delta)
Emulate expire
4.022678
4.305753
0.934257
return self._expire(self._encode(key), timedelta(milliseconds=milliseconds))
def pexpire(self, key, milliseconds)
Emulate pexpire
8.790903
8.940207
0.9833
expire_time = datetime.fromtimestamp(when) key = self._encode(key) if key in self.redis: self.timeouts[key] = expire_time return True return False
def expireat(self, key, when)
Emulate expireat
4.048809
4.139026
0.978203
value = self.pttl(key) if value is None or value < 0: return value return value // 1000
def ttl(self, key)
Emulate ttl Even though the official redis commands documentation at http://redis.io/commands/ttl states "Return value: Integer reply: TTL in seconds, -2 when key does not exist or -1 when key does not have a timeout." the redis-py lib returns None for both these cases. The lib behavior has been emulated here. :param key: key for which ttl is requested. :returns: the number of seconds till timeout, None if the key does not exist or if the key has no timeout(as per the redis-py lib behavior).
4.544522
5.001854
0.908567
key = self._encode(key) if key not in self.redis: # as of redis 2.8, -2 is returned if the key does not exist return long(-2) if self.strict else None if key not in self.timeouts: # as of redis 2.8, -1 is returned if the key is persistent # redis-py returns None; command docs say -1 return long(-1) if self.strict else None time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now()) return long(max(-1, time_to_live))
def pttl(self, key)
Emulate pttl :param key: key for which pttl is requested. :returns: the number of milliseconds till timeout, None if the key does not exist or if the key has no timeout(as per the redis-py lib behavior).
5.221857
4.555845
1.146188
# Deep copy to avoid RuntimeError: dictionary changed size during iteration _timeouts = deepcopy(self.timeouts) for key, value in _timeouts.items(): if value - self.clock.now() < timedelta(0): del self.timeouts[key] # removing the expired key if key in self.redis: self.redis.pop(key, None)
def do_expire(self)
Expire objects assuming now == time
5.216288
5.128189
1.017179
key = self._encode(key) value = self._encode(value) if nx and xx: return None mode = "nx" if nx else "xx" if xx else None if self._should_set(key, mode): expire = None if ex is not None: expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex) if px is not None: expire = px if isinstance(px, timedelta) else timedelta(milliseconds=px) if expire is not None and expire.total_seconds() <= 0: raise ResponseError("invalid expire time in SETEX") result = self._set(key, value) if expire: self._expire(key, expire) return result
def set(self, key, value, ex=None, px=None, nx=False, xx=False)
Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, the lib function returns None.
2.665718
2.827184
0.942888
if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: # nx means set only if key is absent # false if the key already exists return False elif key not in self.redis: # at this point mode can only be xx # xx means set only if the key already exists # false if is absent return False # for all other cases, return true return True
def _should_set(self, key, mode)
Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx).
5.901371
4.791352
1.231671
if not self.strict: # when not strict mode swap value and time args order time, value = value, time return self.set(name, value, ex=time)
def setex(self, name, time, value)
Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object.
9.733255
9.721261
1.001234
return self.set(key, value, px=time)
def psetex(self, key, time, value)
Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object.
8.876455
10.73619
0.826779
return self.set(key, value, nx=True)
def setnx(self, key, value)
Set the value of ``key`` to ``value`` if key doesn't exist
4.900398
6.110189
0.802004
mapping = kwargs if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSET requires **kwargs or a single dict arg') mapping.update(args[0]) if len(mapping) == 0: raise ResponseError("wrong number of arguments for 'mset' command") for key, value in mapping.items(): self.set(key, value) return True
def mset(self, *args, **kwargs)
Sets key/values based on a mapping. Mapping can be supplied as a single dictionary argument or as kwargs.
3.485207
3.135663
1.111474
if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSETNX requires **kwargs or a single dict arg') mapping = args[0] else: mapping = kwargs if len(mapping) == 0: raise ResponseError("wrong number of arguments for 'msetnx' command") for key in mapping.keys(): if self._encode(key) in self.redis: return False for key, value in mapping.items(): self.set(key, value) return True
def msetnx(self, *args, **kwargs)
Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful.
3.052188
2.85584
1.068753
key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): bits.extend(b"\x00" * (index + 1 - len(bits))) prev_val = 1 if (bits[index] & mask) else 0 if value: bits[index] |= mask else: bits[index] &= ~mask self.redis[key] = bytes(bits) return prev_val
def setbit(self, key, offset, value)
Set the bit at ``offset`` in ``key`` to ``value``.
2.882514
2.779822
1.036942
key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): return 0 return 1 if (bits[index] & mask) else 0
def getbit(self, key, offset)
Returns the bit value at ``offset`` in ``key``.
4.131922
3.82451
1.08038
redis_hash = self._get_hash(hashkey, 'HEXISTS') return self._encode(attribute) in redis_hash
def hexists(self, hashkey, attribute)
Emulate hexists.
8.409638
7.622225
1.103305
redis_hash = self._get_hash(hashkey, 'HGET') return redis_hash.get(self._encode(attribute))
def hget(self, hashkey, attribute)
Emulate hget.
5.449056
5.206554
1.046576
redis_hash = self._get_hash(hashkey, 'HDEL') count = 0 for key in keys: attribute = self._encode(key) if attribute in redis_hash: count += 1 del redis_hash[attribute] if not redis_hash: self.delete(hashkey) return count
def hdel(self, hashkey, *keys)
Emulate hdel
3.318301
3.187784
1.040943
redis_hash = self._get_hash(hashkey, 'HMSET', create=True) for key, value in value.items(): attribute = self._encode(key) redis_hash[attribute] = self._encode(value) return True
def hmset(self, hashkey, value)
Emulate hmset.
4.189156
3.975833
1.053655
redis_hash = self._get_hash(hashkey, 'HMGET') attributes = self._list_or_args(keys, args) return [redis_hash.get(self._encode(attribute)) for attribute in attributes]
def hmget(self, hashkey, keys, *args)
Emulate hmget.
4.869767
4.63193
1.051347
redis_hash = self._get_hash(hashkey, 'HSET', create=True) attribute = self._encode(attribute) attribute_present = attribute in redis_hash redis_hash[attribute] = self._encode(value) return long(0) if attribute_present else long(1)
def hset(self, hashkey, attribute, value)
Emulate hset.
4.136513
3.921566
1.054811
redis_hash = self._get_hash(hashkey, 'HSETNX', create=True) attribute = self._encode(attribute) if attribute in redis_hash: return long(0) else: redis_hash[attribute] = self._encode(value) return long(1)
def hsetnx(self, hashkey, attribute, value)
Emulate hsetnx.
3.307741
3.145225
1.051671
return self._hincrby(hashkey, attribute, 'HINCRBY', long, increment)
def hincrby(self, hashkey, attribute, increment=1)
Emulate hincrby.
8.208976
7.064982
1.161925
return self._hincrby(hashkey, attribute, 'HINCRBYFLOAT', float, increment)
def hincrbyfloat(self, hashkey, attribute, increment=1.0)
Emulate hincrbyfloat.
5.769592
5.114338
1.128121
redis_hash = self._get_hash(hashkey, command, create=True) attribute = self._encode(attribute) previous_value = type_(redis_hash.get(attribute, '0')) redis_hash[attribute] = self._encode(previous_value + increment) return type_(redis_hash[attribute])
def _hincrby(self, hashkey, attribute, command, type_, increment)
Shared hincrby and hincrbyfloat routine
3.180192
3.148496
1.010067
redis_list = self._get_list(key, 'LRANGE') start, stop = self._translate_range(len(redis_list), start, stop) return redis_list[start:stop + 1]
def lrange(self, key, start, stop)
Emulate lrange.
3.672624
3.500614
1.049137
redis_list = self._get_list(key, 'LINDEX') if self._encode(key) not in self.redis: return None try: return redis_list[index] except (IndexError): # Redis returns nil if the index doesn't exist return None
def lindex(self, key, index)
Emulate lindex.
5.711464
5.458762
1.046293
if not isinstance(timeout, (int, long)): raise RuntimeError('timeout is not an integer or out of range') if timeout is None or timeout == 0: timeout = self.blocking_timeout if isinstance(keys, basestring): keys = [keys] else: keys = list(keys) elapsed_time = 0 start = time.time() while elapsed_time < timeout: key, val = self._pop_first_available(pop_func, keys) if val: return key, val # small delay to avoid high cpu utilization time.sleep(self.blocking_sleep_interval) elapsed_time = time.time() - start return None
def _blocking_pop(self, pop_func, keys, timeout)
Emulate blocking pop functionality
2.853331
2.739295
1.04163
return self._blocking_pop(self.lpop, keys, timeout)
def blpop(self, keys, timeout=0)
Emulate blpop
6.968836
6.48158
1.075176
return self._blocking_pop(self.rpop, keys, timeout)
def brpop(self, keys, timeout=0)
Emulate brpop
7.048976
6.816422
1.034117
redis_list = self._get_list(key, 'LPUSH', create=True) # Creates the list at this key if it doesn't exist, and appends args to its beginning args_reversed = [self._encode(arg) for arg in args] args_reversed.reverse() updated_list = args_reversed + redis_list self.redis[self._encode(key)] = updated_list # Return the length of the list after the push operation return len(updated_list)
def lpush(self, key, *args)
Emulate lpush.
4.502924
4.479955
1.005127
redis_list = self._get_list(key, 'RPOP') if self._encode(key) not in self.redis: return None try: value = redis_list.pop() if len(redis_list) == 0: self.delete(key) return value except (IndexError): # Redis returns nil if popping from an empty list return None
def rpop(self, key)
Emulate lpop.
3.937569
3.890657
1.012058
redis_list = self._get_list(key, 'RPUSH', create=True) # Creates the list at this key if it doesn't exist, and appends args to it redis_list.extend(map(self._encode, args)) # Return the length of the list after the push operation return len(redis_list)
def rpush(self, key, *args)
Emulate rpush.
4.684162
4.629417
1.011825
value = self._encode(value) redis_list = self._get_list(key, 'LREM') removed_count = 0 if self._encode(key) in self.redis: if count == 0: # Remove all ocurrences while redis_list.count(value): redis_list.remove(value) removed_count += 1 elif count > 0: counter = 0 # remove first 'count' ocurrences while redis_list.count(value): redis_list.remove(value) counter += 1 removed_count += 1 if counter >= count: break elif count < 0: # remove last 'count' ocurrences counter = -count new_list = [] for v in reversed(redis_list): if v == value and counter > 0: counter -= 1 removed_count += 1 else: new_list.append(v) redis_list[:] = list(reversed(new_list)) if removed_count > 0 and len(redis_list) == 0: self.delete(key) return removed_count
def lrem(self, key, value, count=0)
Emulate lrem.
2.264975
2.24281
1.009883
redis_list = self._get_list(key, 'LTRIM') if redis_list: start, stop = self._translate_range(len(redis_list), start, stop) self.redis[self._encode(key)] = redis_list[start:stop + 1] return True
def ltrim(self, key, start, stop)
Emulate ltrim.
4.16575
4.063553
1.02515
transfer_item = self.rpop(source) if transfer_item is not None: self.lpush(destination, transfer_item) return transfer_item
def rpoplpush(self, source, destination)
Emulate rpoplpush
3.11776
2.93223
1.063272
transfer_item = self.brpop(source, timeout) if transfer_item is None: return None key, val = transfer_item self.lpush(destination, val) return val
def brpoplpush(self, source, destination, timeout=0)
Emulate brpoplpush
3.648807
3.779923
0.965313
redis_list = self._get_list(key, 'LSET') if redis_list is None: raise ResponseError("no such key") try: redis_list[index] = self._encode(value) except IndexError: raise ResponseError("index out of range")
def lset(self, key, index, value)
Emulate lset.
3.561448
3.615141
0.985148
if count is None: count = 10 cursor = int(cursor) count = int(count) if not count: raise ValueError('if specified, count must be > 0: %s' % count) values = values_function() if cursor + count >= len(values): # we reached the end, back to zero result_cursor = 0 else: result_cursor = cursor + count values = values[cursor:cursor+count] if match is not None: regex = re.compile(b'^' + re.escape(self._encode(match)).replace(b'\\*', b'.*') + b'$') if not key: key = lambda v: v values = [v for v in values if regex.match(key(v))] return [result_cursor, values]
def _common_scan(self, values_function, cursor='0', match=None, count=10, key=None)
Common scanning skeleton. :param key: optional function used to identify what 'match' is applied to
2.997206
3.242826
0.924258
def value_function(): return sorted(self.redis.keys()) # sorted list for consistent order return self._common_scan(value_function, cursor=cursor, match=match, count=count)
def scan(self, cursor='0', match=None, count=10)
Emulate scan.
8.749838
8.221672
1.064241
def value_function(): members = list(self.smembers(name)) members.sort() # sort for consistent order return members return self._common_scan(value_function, cursor=cursor, match=match, count=count)
def sscan(self, name, cursor='0', match=None, count=10)
Emulate sscan.
5.712489
5.73743
0.995653
def value_function(): values = self.zrange(name, 0, -1, withscores=True) values.sort(key=lambda x: x[1]) # sort for consistent order return values return self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0])
def zscan(self, name, cursor='0', match=None, count=10)
Emulate zscan.
4.093981
3.988491
1.026449
def value_function(): values = self.hgetall(name) values = list(values.items()) # list of tuples for sorting and matching values.sort(key=lambda x: x[0]) # sort for consistent order return values scanned = self._common_scan(value_function, cursor=cursor, match=match, count=count, key=lambda v: v[0]) # noqa scanned[1] = dict(scanned[1]) # from list of tuples back to dict return scanned
def hscan(self, name, cursor='0', match=None, count=10)
Emulate hscan.
4.901102
4.820451
1.016731
cursor = '0' while cursor != 0: cursor, data = self.hscan(name, cursor=cursor, match=match, count=count) for item in data.items(): yield item
def hscan_iter(self, name, match=None, count=10)
Emulate hscan_iter.
3.028274
3.082029
0.982559
if len(values) == 0: raise ResponseError("wrong number of arguments for 'sadd' command") redis_set = self._get_set(key, 'SADD', create=True) before_count = len(redis_set) redis_set.update(map(self._encode, values)) after_count = len(redis_set) return after_count - before_count
def sadd(self, key, *values)
Emulate sadd.
3.108409
3.00435
1.034636
func = lambda left, right: left.difference(right) return self._apply_to_sets(func, "SDIFF", keys, *args)
def sdiff(self, keys, *args)
Emulate sdiff.
6.329595
6.209756
1.019298
result = self.sdiff(keys, *args) self.redis[self._encode(dest)] = result return len(result)
def sdiffstore(self, dest, keys, *args)
Emulate sdiffstore.
5.21082
5.179725
1.006003
func = lambda left, right: left.intersection(right) return self._apply_to_sets(func, "SINTER", keys, *args)
def sinter(self, keys, *args)
Emulate sinter.
5.793647
5.585682
1.037232
result = self.sinter(keys, *args) self.redis[self._encode(dest)] = result return len(result)
def sinterstore(self, dest, keys, *args)
Emulate sinterstore.
5.022069
5.106255
0.983513
redis_set = self._get_set(name, 'SISMEMBER') if not redis_set: return 0 result = self._encode(value) in redis_set return 1 if result else 0
def sismember(self, name, value)
Emulate sismember.
4.105098
4.122867
0.99569
src_set = self._get_set(src, 'SMOVE') dst_set = self._get_set(dst, 'SMOVE') value = self._encode(value) if value not in src_set: return False src_set.discard(value) dst_set.add(value) self.redis[self._encode(src)], self.redis[self._encode(dst)] = src_set, dst_set return True
def smove(self, src, dst, value)
Emulate smove.
2.540918
2.59336
0.979778
redis_set = self._get_set(name, 'SPOP') if not redis_set: return None member = choice(list(redis_set)) redis_set.remove(member) if len(redis_set) == 0: self.delete(name) return member
def spop(self, name)
Emulate spop.
3.358571
3.387132
0.991568
redis_set = self._get_set(name, 'SRANDMEMBER') if not redis_set: return None if number is None else [] if number is None: return choice(list(redis_set)) elif number > 0: return sample(list(redis_set), min(number, len(redis_set))) else: return [choice(list(redis_set)) for _ in xrange(abs(number))]
def srandmember(self, name, number=None)
Emulate srandmember.
2.389296
2.424817
0.985351
redis_set = self._get_set(key, 'SREM') if not redis_set: return 0 before_count = len(redis_set) for value in values: redis_set.discard(self._encode(value)) after_count = len(redis_set) if before_count > 0 and len(redis_set) == 0: self.delete(key) return before_count - after_count
def srem(self, key, *values)
Emulate srem.
2.577483
2.530292
1.01865
func = lambda left, right: left.union(right) return self._apply_to_sets(func, "SUNION", keys, *args)
def sunion(self, keys, *args)
Emulate sunion.
5.924787
5.896973
1.004717
result = self.sunion(keys, *args) self.redis[self._encode(dest)] = result return len(result)
def sunionstore(self, dest, keys, *args)
Emulate sunionstore.
6.560392
6.468973
1.014132
sha = self.script_load(script) return self.evalsha(sha, numkeys, *keys_and_args)
def eval(self, script, numkeys, *keys_and_args)
Emulate eval
4.966303
5.412577
0.917549
if not self.script_exists(sha)[0]: raise RedisError("Sha not registered") script_callable = Script(self, self.shas[sha], self.load_lua_dependencies) numkeys = max(numkeys, 0) keys = keys_and_args[:numkeys] args = keys_and_args[numkeys:] return script_callable(keys, args)
def evalsha(self, sha, numkeys, *keys_and_args)
Emulates evalsha
4.612409
4.234754
1.08918
sha_digest = sha1(script.encode("utf-8")).hexdigest() self.shas[sha_digest] = script return sha_digest
def script_load(self, script)
Emulate script_load
4.346419
4.509873
0.963756
command = self._normalize_command_name(command) args = self._normalize_command_args(command, *args) redis_function = getattr(self, command) value = redis_function(*args) return self._normalize_command_response(command, value)
def call(self, command, *args)
Sends call to the function, whose name is specified by command. Used by Script invocations and normalizes calls using standard Redis arguments to use the expected redis-py arguments.
3.463098
2.858663
1.21144
if command == 'zadd' and not self.strict and len(args) >= 3: # Reorder score and name zadd_args = [x for tup in zip(args[2::2], args[1::2]) for x in tup] return [args[0]] + zadd_args if command in ('zrangebyscore', 'zrevrangebyscore'): # expected format is: <command> name min max start num with_scores score_cast_func if len(args) <= 3: # just plain min/max return args start, num = None, None withscores = False for i, arg in enumerate(args[3:], 3): # keywords are case-insensitive lower_arg = self._encode(arg).lower() # handle "limit" if lower_arg == b"limit" and i + 2 < len(args): start, num = args[i + 1], args[i + 2] # handle "withscores" if lower_arg == b"withscores": withscores = True # do not expect to set score_cast_func return args[:3] + (start, num, withscores) return args
def _normalize_command_args(self, command, *args)
Modifies the command arguments to match the strictness of the redis client.
4.231186
4.057319
1.042853
result = {} for name, value in self.redis_config.items(): if fnmatch.fnmatch(name, pattern): try: result[name] = int(value) except ValueError: result[name] = value return result
def config_get(self, pattern='*')
Get one or more configuration parameters.
2.548526
2.459568
1.036168
return self._get_by_type(key, operation, create, b'list', [])
def _get_list(self, key, operation, create=False)
Get (and maybe create) a list by name.
11.059985
9.866437
1.120971
return self._get_by_type(key, operation, create, b'set', set())
def _get_set(self, key, operation, create=False)
Get (and maybe create) a set by name.
12.735645
11.257001
1.131353
return self._get_by_type(name, operation, create, b'hash', {})
def _get_hash(self, name, operation, create=False)
Get (and maybe create) a hash by name.
12.868874
11.624179
1.107078
return self._get_by_type(name, operation, create, b'zset', SortedSet(), return_default=False)
def _get_zset(self, name, operation, create=False)
Get (and maybe create) a sorted set by name.
11.975964
10.343861
1.157785
key = self._encode(key) if self.type(key) in [type_, b'none']: if create: return self.redis.setdefault(key, default) else: return self.redis.get(key, default if return_default else None) raise TypeError("{} requires a {}".format(operation, type_))
def _get_by_type(self, key, operation, create, type_, default, return_default=True)
Get (and maybe create) a redis data structure by name and type.
4.409762
4.066925
1.084299
if start < 0: start += len_ start = max(0, min(start, len_)) if end < 0: end += len_ end = max(-1, min(end, len_ - 1)) return start, end
def _translate_range(self, len_, start, end)
Translate range to valid bounds.
2.382325
2.065995
1.153113
if start > len_ or num <= 0: return 0, 0 return min(start, len_), num
def _translate_limit(self, len_, start, num)
Translate limit to valid bounds.
5.199772
4.012846
1.295782