repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
sequencelengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
sequencelengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
hovren/crisp
crisp/camera.py
AtanCameraModel.invert
def invert(self, points): """Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points """ X = points if not points.ndim == 1 else points.reshape((points.size, 1)) wx, wy = self.wc # Switch to polar coordinates rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2) phi = np.arctan2(X[1,:] - wy, X[0,:]-wx) # 'atan' method r = np.tan(rn * self.lgamma) / self.lgamma; # Switch back to rectangular coordinates Y = np.ones(X.shape) Y[0,:] = wx + r * np.cos(phi) Y[1,:]= wy + r * np.sin(phi) return Y
python
def invert(self, points): """Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points """ X = points if not points.ndim == 1 else points.reshape((points.size, 1)) wx, wy = self.wc # Switch to polar coordinates rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2) phi = np.arctan2(X[1,:] - wy, X[0,:]-wx) # 'atan' method r = np.tan(rn * self.lgamma) / self.lgamma; # Switch back to rectangular coordinates Y = np.ones(X.shape) Y[0,:] = wx + r * np.cos(phi) Y[1,:]= wy + r * np.sin(phi) return Y
[ "def", "invert", "(", "self", ",", "points", ")", ":", "X", "=", "points", "if", "not", "points", ".", "ndim", "==", "1", "else", "points", ".", "reshape", "(", "(", "points", ".", "size", ",", "1", ")", ")", "wx", ",", "wy", "=", "self", ".", "wc", "# Switch to polar coordinates", "rn", "=", "np", ".", "sqrt", "(", "(", "X", "[", "0", ",", ":", "]", "-", "wx", ")", "**", "2", "+", "(", "X", "[", "1", ",", ":", "]", "-", "wy", ")", "**", "2", ")", "phi", "=", "np", ".", "arctan2", "(", "X", "[", "1", ",", ":", "]", "-", "wy", ",", "X", "[", "0", ",", ":", "]", "-", "wx", ")", "# 'atan' method", "r", "=", "np", ".", "tan", "(", "rn", "*", "self", ".", "lgamma", ")", "/", "self", ".", "lgamma", "# Switch back to rectangular coordinates", "Y", "=", "np", ".", "ones", "(", "X", ".", "shape", ")", "Y", "[", "0", ",", ":", "]", "=", "wx", "+", "r", "*", "np", ".", "cos", "(", "phi", ")", "Y", "[", "1", ",", ":", "]", "=", "wy", "+", "r", "*", "np", ".", "sin", "(", "phi", ")", "return", "Y" ]
Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points
[ "Invert", "the", "distortion" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L160-L187
hovren/crisp
crisp/camera.py
AtanCameraModel.project
def project(self, points): """Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane """ K = self.camera_matrix XU = points XU = XU / np.tile(XU[2], (3,1)) X = self.apply(XU) x2d = np.dot(K, X) return from_homogeneous(x2d)
python
def project(self, points): """Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane """ K = self.camera_matrix XU = points XU = XU / np.tile(XU[2], (3,1)) X = self.apply(XU) x2d = np.dot(K, X) return from_homogeneous(x2d)
[ "def", "project", "(", "self", ",", "points", ")", ":", "K", "=", "self", ".", "camera_matrix", "XU", "=", "points", "XU", "=", "XU", "/", "np", ".", "tile", "(", "XU", "[", "2", "]", ",", "(", "3", ",", "1", ")", ")", "X", "=", "self", ".", "apply", "(", "XU", ")", "x2d", "=", "np", ".", "dot", "(", "K", ",", "X", ")", "return", "from_homogeneous", "(", "x2d", ")" ]
Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane
[ "Project", "3D", "points", "to", "image", "coordinates", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L219-L239
hovren/crisp
crisp/camera.py
AtanCameraModel.unproject
def unproject(self, image_points): """Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale) """ Ki = self.inv_camera_matrix X = np.dot(Ki, to_homogeneous(image_points)) X = X / X[2] XU = self.invert(X) return XU
python
def unproject(self, image_points): """Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale) """ Ki = self.inv_camera_matrix X = np.dot(Ki, to_homogeneous(image_points)) X = X / X[2] XU = self.invert(X) return XU
[ "def", "unproject", "(", "self", ",", "image_points", ")", ":", "Ki", "=", "self", ".", "inv_camera_matrix", "X", "=", "np", ".", "dot", "(", "Ki", ",", "to_homogeneous", "(", "image_points", ")", ")", "X", "=", "X", "/", "X", "[", "2", "]", "XU", "=", "self", ".", "invert", "(", "X", ")", "return", "XU" ]
Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale)
[ "Find", "(", "up", "to", "scale", ")", "3D", "coordinate", "of", "an", "image", "point" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L241-L261
hovren/crisp
crisp/camera.py
OpenCVCameraModel.project
def project(self, points): """Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane """ rvec = tvec = np.zeros(3) image_points, jac = cv2.projectPoints(points.T.reshape(-1,1,3), rvec, tvec, self.camera_matrix, self.dist_coefs) return image_points.reshape(-1,2).T
python
def project(self, points): """Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane """ rvec = tvec = np.zeros(3) image_points, jac = cv2.projectPoints(points.T.reshape(-1,1,3), rvec, tvec, self.camera_matrix, self.dist_coefs) return image_points.reshape(-1,2).T
[ "def", "project", "(", "self", ",", "points", ")", ":", "rvec", "=", "tvec", "=", "np", ".", "zeros", "(", "3", ")", "image_points", ",", "jac", "=", "cv2", ".", "projectPoints", "(", "points", ".", "T", ".", "reshape", "(", "-", "1", ",", "1", ",", "3", ")", ",", "rvec", ",", "tvec", ",", "self", ".", "camera_matrix", ",", "self", ".", "dist_coefs", ")", "return", "image_points", ".", "reshape", "(", "-", "1", ",", "2", ")", ".", "T" ]
Project 3D points to image coordinates. This projects 3D points expressed in the camera coordinate system to image points. Parameters -------------------- points : (3, N) ndarray 3D points Returns -------------------- image_points : (2, N) ndarray The world points projected to the image plane
[ "Project", "3D", "points", "to", "image", "coordinates", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L292-L309
hovren/crisp
crisp/camera.py
OpenCVCameraModel.unproject
def unproject(self, image_points): """Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale) """ undist_image_points = cv2.undistortPoints(image_points.T.reshape(1,-1,2), self.camera_matrix, self.dist_coefs, P=self.camera_matrix) world_points = np.dot(self.inv_camera_matrix, to_homogeneous(undist_image_points.reshape(-1,2).T)) return world_points
python
def unproject(self, image_points): """Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale) """ undist_image_points = cv2.undistortPoints(image_points.T.reshape(1,-1,2), self.camera_matrix, self.dist_coefs, P=self.camera_matrix) world_points = np.dot(self.inv_camera_matrix, to_homogeneous(undist_image_points.reshape(-1,2).T)) return world_points
[ "def", "unproject", "(", "self", ",", "image_points", ")", ":", "undist_image_points", "=", "cv2", ".", "undistortPoints", "(", "image_points", ".", "T", ".", "reshape", "(", "1", ",", "-", "1", ",", "2", ")", ",", "self", ".", "camera_matrix", ",", "self", ".", "dist_coefs", ",", "P", "=", "self", ".", "camera_matrix", ")", "world_points", "=", "np", ".", "dot", "(", "self", ".", "inv_camera_matrix", ",", "to_homogeneous", "(", "undist_image_points", ".", "reshape", "(", "-", "1", ",", "2", ")", ".", "T", ")", ")", "return", "world_points" ]
Find (up to scale) 3D coordinate of an image point This is the inverse of the `project` function. The resulting 3D points are only valid up to an unknown scale. Parameters ---------------------- image_points : (2, N) ndarray Image points Returns ---------------------- points : (3, N) ndarray 3D coordinates (valid up to scale)
[ "Find", "(", "up", "to", "scale", ")", "3D", "coordinate", "of", "an", "image", "point" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L311-L329
hovren/crisp
crisp/camera.py
Kinect.timestamps_from_file_list
def timestamps_from_file_list(file_list): "Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps)." timestamps = np.array([Kinect.timestamp_from_filename(fname) for fname in file_list]) # Handle overflow diff = np.diff(timestamps) idxs = np.flatnonzero(diff < 0) ITEM_SIZE = 2**32 for i in idxs: timestamps[i+1:] += ITEM_SIZE return timestamps.flatten()
python
def timestamps_from_file_list(file_list): "Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps)." timestamps = np.array([Kinect.timestamp_from_filename(fname) for fname in file_list]) # Handle overflow diff = np.diff(timestamps) idxs = np.flatnonzero(diff < 0) ITEM_SIZE = 2**32 for i in idxs: timestamps[i+1:] += ITEM_SIZE return timestamps.flatten()
[ "def", "timestamps_from_file_list", "(", "file_list", ")", ":", "timestamps", "=", "np", ".", "array", "(", "[", "Kinect", ".", "timestamp_from_filename", "(", "fname", ")", "for", "fname", "in", "file_list", "]", ")", "# Handle overflow", "diff", "=", "np", ".", "diff", "(", "timestamps", ")", "idxs", "=", "np", ".", "flatnonzero", "(", "diff", "<", "0", ")", "ITEM_SIZE", "=", "2", "**", "32", "for", "i", "in", "idxs", ":", "timestamps", "[", "i", "+", "1", ":", "]", "+=", "ITEM_SIZE", "return", "timestamps", ".", "flatten", "(", ")" ]
Take list of Kinect filenames (without path) and extracts timestamps while accounting for timestamp overflow (returns linear timestamps).
[ "Take", "list", "of", "Kinect", "filenames", "(", "without", "path", ")", "and", "extracts", "timestamps", "while", "accounting", "for", "timestamp", "overflow", "(", "returns", "linear", "timestamps", ")", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L472-L483
hovren/crisp
crisp/camera.py
Kinect.purge_bad_timestamp_files
def purge_bad_timestamp_files(file_list): "Given a list of image files, find bad frames, remove them and modify file_list" MAX_INITIAL_BAD_FRAMES = 15 bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list)) # Trivial case if not bad_ts: return file_list # No bad frames after the initial allowed last_bad = max(bad_ts) if last_bad >= MAX_INITIAL_BAD_FRAMES: raise Exception('Only 15 initial bad frames are allowed, but last bad frame is %d' % last_bad) # Remove all frames up to the last bad frame for i in range(last_bad + 1): os.remove(file_list[i]) # Purge from the list file_list = file_list[last_bad+1:] return file_list
python
def purge_bad_timestamp_files(file_list): "Given a list of image files, find bad frames, remove them and modify file_list" MAX_INITIAL_BAD_FRAMES = 15 bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list)) # Trivial case if not bad_ts: return file_list # No bad frames after the initial allowed last_bad = max(bad_ts) if last_bad >= MAX_INITIAL_BAD_FRAMES: raise Exception('Only 15 initial bad frames are allowed, but last bad frame is %d' % last_bad) # Remove all frames up to the last bad frame for i in range(last_bad + 1): os.remove(file_list[i]) # Purge from the list file_list = file_list[last_bad+1:] return file_list
[ "def", "purge_bad_timestamp_files", "(", "file_list", ")", ":", "MAX_INITIAL_BAD_FRAMES", "=", "15", "bad_ts", "=", "Kinect", ".", "detect_bad_timestamps", "(", "Kinect", ".", "timestamps_from_file_list", "(", "file_list", ")", ")", "# Trivial case", "if", "not", "bad_ts", ":", "return", "file_list", "# No bad frames after the initial allowed", "last_bad", "=", "max", "(", "bad_ts", ")", "if", "last_bad", ">=", "MAX_INITIAL_BAD_FRAMES", ":", "raise", "Exception", "(", "'Only 15 initial bad frames are allowed, but last bad frame is %d'", "%", "last_bad", ")", "# Remove all frames up to the last bad frame", "for", "i", "in", "range", "(", "last_bad", "+", "1", ")", ":", "os", ".", "remove", "(", "file_list", "[", "i", "]", ")", "# Purge from the list", "file_list", "=", "file_list", "[", "last_bad", "+", "1", ":", "]", "return", "file_list" ]
Given a list of image files, find bad frames, remove them and modify file_list
[ "Given", "a", "list", "of", "image", "files", "find", "bad", "frames", "remove", "them", "and", "modify", "file_list" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L498-L519
hovren/crisp
crisp/camera.py
Kinect.depth_file_for_nir_file
def depth_file_for_nir_file(video_filename, depth_file_list): """Returns the corresponding depth filename given a NIR filename""" (root, filename) = os.path.split(video_filename) needle_ts = int(filename.split('-')[2].split('.')[0]) haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list)) haystack_idx = np.flatnonzero(haystack_ts_list == needle_ts)[0] depth_filename = depth_file_list[haystack_idx] return depth_filename
python
def depth_file_for_nir_file(video_filename, depth_file_list): """Returns the corresponding depth filename given a NIR filename""" (root, filename) = os.path.split(video_filename) needle_ts = int(filename.split('-')[2].split('.')[0]) haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list)) haystack_idx = np.flatnonzero(haystack_ts_list == needle_ts)[0] depth_filename = depth_file_list[haystack_idx] return depth_filename
[ "def", "depth_file_for_nir_file", "(", "video_filename", ",", "depth_file_list", ")", ":", "(", "root", ",", "filename", ")", "=", "os", ".", "path", ".", "split", "(", "video_filename", ")", "needle_ts", "=", "int", "(", "filename", ".", "split", "(", "'-'", ")", "[", "2", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "haystack_ts_list", "=", "np", ".", "array", "(", "Kinect", ".", "timestamps_from_file_list", "(", "depth_file_list", ")", ")", "haystack_idx", "=", "np", ".", "flatnonzero", "(", "haystack_ts_list", "==", "needle_ts", ")", "[", "0", "]", "depth_filename", "=", "depth_file_list", "[", "haystack_idx", "]", "return", "depth_filename" ]
Returns the corresponding depth filename given a NIR filename
[ "Returns", "the", "corresponding", "depth", "filename", "given", "a", "NIR", "filename" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L522-L529
hovren/crisp
crisp/camera.py
Kinect.depth_file_for_rgb_file
def depth_file_for_rgb_file(rgb_filename, rgb_file_list, depth_file_list): """Returns the *closest* depth file from an RGB filename""" (root, filename) = os.path.split(rgb_filename) rgb_timestamps = np.array(Kinect.timestamps_from_file_list(rgb_file_list)) depth_timestamps = np.array(Kinect.timestamps_from_file_list(depth_file_list)) needle_ts = rgb_timestamps[rgb_file_list.index(rgb_filename)] haystack_idx = np.argmin(np.abs(depth_timestamps - needle_ts)) depth_filename = depth_file_list[haystack_idx] return depth_filename
python
def depth_file_for_rgb_file(rgb_filename, rgb_file_list, depth_file_list): """Returns the *closest* depth file from an RGB filename""" (root, filename) = os.path.split(rgb_filename) rgb_timestamps = np.array(Kinect.timestamps_from_file_list(rgb_file_list)) depth_timestamps = np.array(Kinect.timestamps_from_file_list(depth_file_list)) needle_ts = rgb_timestamps[rgb_file_list.index(rgb_filename)] haystack_idx = np.argmin(np.abs(depth_timestamps - needle_ts)) depth_filename = depth_file_list[haystack_idx] return depth_filename
[ "def", "depth_file_for_rgb_file", "(", "rgb_filename", ",", "rgb_file_list", ",", "depth_file_list", ")", ":", "(", "root", ",", "filename", ")", "=", "os", ".", "path", ".", "split", "(", "rgb_filename", ")", "rgb_timestamps", "=", "np", ".", "array", "(", "Kinect", ".", "timestamps_from_file_list", "(", "rgb_file_list", ")", ")", "depth_timestamps", "=", "np", ".", "array", "(", "Kinect", ".", "timestamps_from_file_list", "(", "depth_file_list", ")", ")", "needle_ts", "=", "rgb_timestamps", "[", "rgb_file_list", ".", "index", "(", "rgb_filename", ")", "]", "haystack_idx", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "depth_timestamps", "-", "needle_ts", ")", ")", "depth_filename", "=", "depth_file_list", "[", "haystack_idx", "]", "return", "depth_filename" ]
Returns the *closest* depth file from an RGB filename
[ "Returns", "the", "*", "closest", "*", "depth", "file", "from", "an", "RGB", "filename" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L532-L540
hovren/crisp
crisp/camera.py
Kinect.find_nir_file_with_missing_depth
def find_nir_file_with_missing_depth(video_file_list, depth_file_list): "Remove all files without its own counterpart. Returns new lists of files" new_video_list = [] new_depth_list = [] for fname in video_file_list: try: depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list) new_video_list.append(fname) new_depth_list.append(depth_file) except IndexError: # Missing file pass # Purge bad files bad_nir = [f for f in video_file_list if f not in new_video_list] bad_depth = [f for f in depth_file_list if f not in new_depth_list] return (new_video_list, new_depth_list, bad_nir, bad_depth)
python
def find_nir_file_with_missing_depth(video_file_list, depth_file_list): "Remove all files without its own counterpart. Returns new lists of files" new_video_list = [] new_depth_list = [] for fname in video_file_list: try: depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list) new_video_list.append(fname) new_depth_list.append(depth_file) except IndexError: # Missing file pass # Purge bad files bad_nir = [f for f in video_file_list if f not in new_video_list] bad_depth = [f for f in depth_file_list if f not in new_depth_list] return (new_video_list, new_depth_list, bad_nir, bad_depth)
[ "def", "find_nir_file_with_missing_depth", "(", "video_file_list", ",", "depth_file_list", ")", ":", "new_video_list", "=", "[", "]", "new_depth_list", "=", "[", "]", "for", "fname", "in", "video_file_list", ":", "try", ":", "depth_file", "=", "Kinect", ".", "depth_file_for_nir_file", "(", "fname", ",", "depth_file_list", ")", "new_video_list", ".", "append", "(", "fname", ")", "new_depth_list", ".", "append", "(", "depth_file", ")", "except", "IndexError", ":", "# Missing file", "pass", "# Purge bad files", "bad_nir", "=", "[", "f", "for", "f", "in", "video_file_list", "if", "f", "not", "in", "new_video_list", "]", "bad_depth", "=", "[", "f", "for", "f", "in", "depth_file_list", "if", "f", "not", "in", "new_depth_list", "]", "return", "(", "new_video_list", ",", "new_depth_list", ",", "bad_nir", ",", "bad_depth", ")" ]
Remove all files without its own counterpart. Returns new lists of files
[ "Remove", "all", "files", "without", "its", "own", "counterpart", ".", "Returns", "new", "lists", "of", "files" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L543-L559
hovren/crisp
crisp/camera.py
Kinect.disparity_image_to_distance
def disparity_image_to_distance(self, dval_img): "Convert image of Kinect disparity values to distance (linear method)" dist_img = dval_img / 2048.0 dist_img = 1 / (self.opars[0]*dist_img + self.opars[1]) return dist_img
python
def disparity_image_to_distance(self, dval_img): "Convert image of Kinect disparity values to distance (linear method)" dist_img = dval_img / 2048.0 dist_img = 1 / (self.opars[0]*dist_img + self.opars[1]) return dist_img
[ "def", "disparity_image_to_distance", "(", "self", ",", "dval_img", ")", ":", "dist_img", "=", "dval_img", "/", "2048.0", "dist_img", "=", "1", "/", "(", "self", ".", "opars", "[", "0", "]", "*", "dist_img", "+", "self", ".", "opars", "[", "1", "]", ")", "return", "dist_img" ]
Convert image of Kinect disparity values to distance (linear method)
[ "Convert", "image", "of", "Kinect", "disparity", "values", "to", "distance", "(", "linear", "method", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L561-L565
hovren/crisp
crisp/videoslice.py
fill_sampling
def fill_sampling(slice_list, N): """Given a list of slices, draw N samples such that each slice contributes as much as possible Parameters -------------------------- slice_list : list of Slice List of slices N : int Number of samples to draw """ A = [len(s.inliers) for s in slice_list] N_max = np.sum(A) if N > N_max: raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max)) samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group remaining = N while remaining > 0: remaining_groups = np.flatnonzero(samples_from - np.array(A)) if remaining < len(remaining_groups): np.random.shuffle(remaining_groups) for g in remaining_groups[:remaining]: samples_from[g] += 1 else: # Give each group the allowed number of samples. Constrain to their max size. to_each = max(1, int(remaining / len(remaining_groups))) samples_from = np.min(np.vstack((samples_from + to_each, A)), axis=0) # Update remaining count remaining = int(N - np.sum(samples_from)) if not remaining == 0: raise ValueError("Still {:d} samples left! This is an error in the selection.") # Construct index list of selected samples samples = [] for s, a, n in zip(slice_list, A, samples_from): if a == n: samples.append(np.array(s.inliers)) # all elif a == 0: samples.append(np.arange([])) else: chosen = np.random.choice(s.inliers, n, replace=False) samples.append(np.array(chosen)) return samples
python
def fill_sampling(slice_list, N): """Given a list of slices, draw N samples such that each slice contributes as much as possible Parameters -------------------------- slice_list : list of Slice List of slices N : int Number of samples to draw """ A = [len(s.inliers) for s in slice_list] N_max = np.sum(A) if N > N_max: raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max)) samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group remaining = N while remaining > 0: remaining_groups = np.flatnonzero(samples_from - np.array(A)) if remaining < len(remaining_groups): np.random.shuffle(remaining_groups) for g in remaining_groups[:remaining]: samples_from[g] += 1 else: # Give each group the allowed number of samples. Constrain to their max size. to_each = max(1, int(remaining / len(remaining_groups))) samples_from = np.min(np.vstack((samples_from + to_each, A)), axis=0) # Update remaining count remaining = int(N - np.sum(samples_from)) if not remaining == 0: raise ValueError("Still {:d} samples left! This is an error in the selection.") # Construct index list of selected samples samples = [] for s, a, n in zip(slice_list, A, samples_from): if a == n: samples.append(np.array(s.inliers)) # all elif a == 0: samples.append(np.arange([])) else: chosen = np.random.choice(s.inliers, n, replace=False) samples.append(np.array(chosen)) return samples
[ "def", "fill_sampling", "(", "slice_list", ",", "N", ")", ":", "A", "=", "[", "len", "(", "s", ".", "inliers", ")", "for", "s", "in", "slice_list", "]", "N_max", "=", "np", ".", "sum", "(", "A", ")", "if", "N", ">", "N_max", ":", "raise", "ValueError", "(", "\"Tried to draw {:d} samples from a pool of only {:d} items\"", ".", "format", "(", "N", ",", "N_max", ")", ")", "samples_from", "=", "np", ".", "zeros", "(", "(", "len", "(", "A", ")", ",", ")", ",", "dtype", "=", "'int'", ")", "# Number of samples to draw from each group", "remaining", "=", "N", "while", "remaining", ">", "0", ":", "remaining_groups", "=", "np", ".", "flatnonzero", "(", "samples_from", "-", "np", ".", "array", "(", "A", ")", ")", "if", "remaining", "<", "len", "(", "remaining_groups", ")", ":", "np", ".", "random", ".", "shuffle", "(", "remaining_groups", ")", "for", "g", "in", "remaining_groups", "[", ":", "remaining", "]", ":", "samples_from", "[", "g", "]", "+=", "1", "else", ":", "# Give each group the allowed number of samples. Constrain to their max size.", "to_each", "=", "max", "(", "1", ",", "int", "(", "remaining", "/", "len", "(", "remaining_groups", ")", ")", ")", "samples_from", "=", "np", ".", "min", "(", "np", ".", "vstack", "(", "(", "samples_from", "+", "to_each", ",", "A", ")", ")", ",", "axis", "=", "0", ")", "# Update remaining count", "remaining", "=", "int", "(", "N", "-", "np", ".", "sum", "(", "samples_from", ")", ")", "if", "not", "remaining", "==", "0", ":", "raise", "ValueError", "(", "\"Still {:d} samples left! This is an error in the selection.\"", ")", "# Construct index list of selected samples", "samples", "=", "[", "]", "for", "s", ",", "a", ",", "n", "in", "zip", "(", "slice_list", ",", "A", ",", "samples_from", ")", ":", "if", "a", "==", "n", ":", "samples", ".", "append", "(", "np", ".", "array", "(", "s", ".", "inliers", ")", ")", "# all", "elif", "a", "==", "0", ":", "samples", ".", "append", "(", "np", ".", "arange", "(", "[", "]", ")", ")", "else", ":", "chosen", "=", "np", ".", "random", ".", "choice", "(", "s", ".", "inliers", ",", "n", ",", "replace", "=", "False", ")", "samples", ".", "append", "(", "np", ".", "array", "(", "chosen", ")", ")", "return", "samples" ]
Given a list of slices, draw N samples such that each slice contributes as much as possible Parameters -------------------------- slice_list : list of Slice List of slices N : int Number of samples to draw
[ "Given", "a", "list", "of", "slices", "draw", "N", "samples", "such", "that", "each", "slice", "contributes", "as", "much", "as", "possible" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/videoslice.py#L117-L162
hovren/crisp
crisp/videoslice.py
Slice.estimate_rotation
def estimate_rotation(self, camera, ransac_threshold=7.0): """Estimate the rotation between first and last frame It uses RANSAC where the error metric is the reprojection error of the points from the last frame to the first frame. Parameters ----------------- camera : CameraModel Camera model ransac_threshold : float Distance threshold (in pixels) for a reprojected point to count as an inlier """ if self.axis is None: x = self.points[:, 0, :].T y = self.points[:, -1, :].T inlier_ratio = 0.5 R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y, camera, ransac_threshold, inlier_ratio=inlier_ratio, do_translation=False) if R is not None: self.axis, self.angle = rotations.rotation_matrix_to_axis_angle(R) if self.angle < 0: # Constrain to positive angles self.angle = -self.angle self.axis = -self.axis self.inliers = idx return self.axis is not None
python
def estimate_rotation(self, camera, ransac_threshold=7.0): """Estimate the rotation between first and last frame It uses RANSAC where the error metric is the reprojection error of the points from the last frame to the first frame. Parameters ----------------- camera : CameraModel Camera model ransac_threshold : float Distance threshold (in pixels) for a reprojected point to count as an inlier """ if self.axis is None: x = self.points[:, 0, :].T y = self.points[:, -1, :].T inlier_ratio = 0.5 R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y, camera, ransac_threshold, inlier_ratio=inlier_ratio, do_translation=False) if R is not None: self.axis, self.angle = rotations.rotation_matrix_to_axis_angle(R) if self.angle < 0: # Constrain to positive angles self.angle = -self.angle self.axis = -self.axis self.inliers = idx return self.axis is not None
[ "def", "estimate_rotation", "(", "self", ",", "camera", ",", "ransac_threshold", "=", "7.0", ")", ":", "if", "self", ".", "axis", "is", "None", ":", "x", "=", "self", ".", "points", "[", ":", ",", "0", ",", ":", "]", ".", "T", "y", "=", "self", ".", "points", "[", ":", ",", "-", "1", ",", ":", "]", ".", "T", "inlier_ratio", "=", "0.5", "R", ",", "t", ",", "dist", ",", "idx", "=", "rotations", ".", "estimate_rotation_procrustes_ransac", "(", "x", ",", "y", ",", "camera", ",", "ransac_threshold", ",", "inlier_ratio", "=", "inlier_ratio", ",", "do_translation", "=", "False", ")", "if", "R", "is", "not", "None", ":", "self", ".", "axis", ",", "self", ".", "angle", "=", "rotations", ".", "rotation_matrix_to_axis_angle", "(", "R", ")", "if", "self", ".", "angle", "<", "0", ":", "# Constrain to positive angles", "self", ".", "angle", "=", "-", "self", ".", "angle", "self", ".", "axis", "=", "-", "self", ".", "axis", "self", ".", "inliers", "=", "idx", "return", "self", ".", "axis", "is", "not", "None" ]
Estimate the rotation between first and last frame It uses RANSAC where the error metric is the reprojection error of the points from the last frame to the first frame. Parameters ----------------- camera : CameraModel Camera model ransac_threshold : float Distance threshold (in pixels) for a reprojected point to count as an inlier
[ "Estimate", "the", "rotation", "between", "first", "and", "last", "frame" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/videoslice.py#L31-L61
hovren/crisp
crisp/videoslice.py
Slice.from_stream_randomly
def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10): """Create slices from a video stream using random sampling Parameters ----------------- video_stream : VideoStream A video stream step_bounds : tuple Range bounds (inclusive) of possible step lengths length_bounds : tuple Range bounds (inclusive) of possible slice lengths max_start : int Maximum frame number to start from min_distance : float Minimum (initial) distance between tracked points min_slice_points : int Minimum number of points to keep a slice Returns ------------------- list of Slice List of slices """ new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1])) new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1])) seq_frames = [] slices = [] seq_start_points = None next_seq_start = new_step() if max_start is None else min(new_step(), max_start) next_seq_length = new_length() for i, im in enumerate(video_stream): if next_seq_start <= i < next_seq_start + next_seq_length: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) seq_frames.append(im) if len(seq_frames) == 1: max_corners = 400 quality_level = 0.07 seq_start_points = cv2.goodFeaturesToTrack(im, max_corners, quality_level, min_distance) elif len(seq_frames) == next_seq_length: points, status = tracking.track_retrack(seq_frames, seq_start_points) if points.shape[0] >= min_slice_points: s = Slice(next_seq_start, i, points) slices.append(s) logger.debug('{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'.format(len(slices)-1, points.shape[1], points.shape[0], next_seq_start, i)) seq_frames = [] next_seq_start = i + new_step() next_seq_length = new_length() return slices
python
def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10): """Create slices from a video stream using random sampling Parameters ----------------- video_stream : VideoStream A video stream step_bounds : tuple Range bounds (inclusive) of possible step lengths length_bounds : tuple Range bounds (inclusive) of possible slice lengths max_start : int Maximum frame number to start from min_distance : float Minimum (initial) distance between tracked points min_slice_points : int Minimum number of points to keep a slice Returns ------------------- list of Slice List of slices """ new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1])) new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1])) seq_frames = [] slices = [] seq_start_points = None next_seq_start = new_step() if max_start is None else min(new_step(), max_start) next_seq_length = new_length() for i, im in enumerate(video_stream): if next_seq_start <= i < next_seq_start + next_seq_length: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) seq_frames.append(im) if len(seq_frames) == 1: max_corners = 400 quality_level = 0.07 seq_start_points = cv2.goodFeaturesToTrack(im, max_corners, quality_level, min_distance) elif len(seq_frames) == next_seq_length: points, status = tracking.track_retrack(seq_frames, seq_start_points) if points.shape[0] >= min_slice_points: s = Slice(next_seq_start, i, points) slices.append(s) logger.debug('{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'.format(len(slices)-1, points.shape[1], points.shape[0], next_seq_start, i)) seq_frames = [] next_seq_start = i + new_step() next_seq_length = new_length() return slices
[ "def", "from_stream_randomly", "(", "video_stream", ",", "step_bounds", "=", "(", "5", ",", "15", ")", ",", "length_bounds", "=", "(", "2", ",", "15", ")", ",", "max_start", "=", "None", ",", "min_distance", "=", "10", ",", "min_slice_points", "=", "10", ")", ":", "new_step", "=", "lambda", ":", "int", "(", "np", ".", "random", ".", "uniform", "(", "low", "=", "step_bounds", "[", "0", "]", ",", "high", "=", "step_bounds", "[", "1", "]", ")", ")", "new_length", "=", "lambda", ":", "int", "(", "np", ".", "random", ".", "uniform", "(", "low", "=", "length_bounds", "[", "0", "]", ",", "high", "=", "length_bounds", "[", "1", "]", ")", ")", "seq_frames", "=", "[", "]", "slices", "=", "[", "]", "seq_start_points", "=", "None", "next_seq_start", "=", "new_step", "(", ")", "if", "max_start", "is", "None", "else", "min", "(", "new_step", "(", ")", ",", "max_start", ")", "next_seq_length", "=", "new_length", "(", ")", "for", "i", ",", "im", "in", "enumerate", "(", "video_stream", ")", ":", "if", "next_seq_start", "<=", "i", "<", "next_seq_start", "+", "next_seq_length", ":", "im", "=", "cv2", ".", "cvtColor", "(", "im", ",", "cv2", ".", "COLOR_BGR2GRAY", ")", "seq_frames", ".", "append", "(", "im", ")", "if", "len", "(", "seq_frames", ")", "==", "1", ":", "max_corners", "=", "400", "quality_level", "=", "0.07", "seq_start_points", "=", "cv2", ".", "goodFeaturesToTrack", "(", "im", ",", "max_corners", ",", "quality_level", ",", "min_distance", ")", "elif", "len", "(", "seq_frames", ")", "==", "next_seq_length", ":", "points", ",", "status", "=", "tracking", ".", "track_retrack", "(", "seq_frames", ",", "seq_start_points", ")", "if", "points", ".", "shape", "[", "0", "]", ">=", "min_slice_points", ":", "s", "=", "Slice", "(", "next_seq_start", ",", "i", ",", "points", ")", "slices", ".", "append", "(", "s", ")", "logger", ".", "debug", "(", "'{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'", ".", "format", "(", "len", "(", "slices", ")", "-", "1", ",", "points", ".", "shape", "[", "1", "]", ",", "points", ".", "shape", "[", "0", "]", ",", "next_seq_start", ",", "i", ")", ")", "seq_frames", "=", "[", "]", "next_seq_start", "=", "i", "+", "new_step", "(", ")", "next_seq_length", "=", "new_length", "(", ")", "return", "slices" ]
Create slices from a video stream using random sampling Parameters ----------------- video_stream : VideoStream A video stream step_bounds : tuple Range bounds (inclusive) of possible step lengths length_bounds : tuple Range bounds (inclusive) of possible slice lengths max_start : int Maximum frame number to start from min_distance : float Minimum (initial) distance between tracked points min_slice_points : int Minimum number of points to keep a slice Returns ------------------- list of Slice List of slices
[ "Create", "slices", "from", "a", "video", "stream", "using", "random", "sampling" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/videoslice.py#L64-L115
hovren/crisp
crisp/rotations.py
procrustes
def procrustes(X, Y, remove_mean=False): """Orthogonal procrustes problem solver The procrustes problem finds the best rotation R, and translation t where X = R*Y + t The number of points in X and Y must be at least 2. For the minimal case of two points, a third point is temporarily created and used for the estimation. Parameters ----------------- X : (3, N) ndarray First set of points Y : (3, N) ndarray Second set of points remove_mean : bool If true, the mean is removed from X and Y before solving the procrustes problem. Can yield better results in some applications. Returns ----------------- R : (3,3) ndarray Rotation component t : (3,) ndarray Translation component (None if remove_mean is False) """ assert X.shape == Y.shape assert X.shape[0] > 1 # Minimal case, create third point using cross product if X.shape[0] == 2: X3 = np.cross(X[:,0], X[:,1], axis=0) X = np.hstack((X, X3 / np.linalg.norm(X3))) Y3 = np.cross(Y[:,0], Y[:,1], axis=0) Y = np.hstack((Y, Y3 / np.linalg.norm(Y3))) D, N = X.shape[:2] if remove_mean: mx = np.mean(X, axis=1).reshape(D, 1) my = np.mean(Y, axis=1).reshape(D, 1) Xhat = X - mx Yhat = Y - my else: Xhat = X Yhat = Y (U, S, V) = np.linalg.svd((Xhat).dot(Yhat.T)) Dtmp = np.eye(Xhat.shape[0]) Dtmp[-1,-1] = np.linalg.det(U.dot(V)) R_est = U.dot(Dtmp).dot(V) # Now X=R_est*(Y-my)+mx=R_est*Y+t_est if remove_mean: t_est= mx - R_est.dot(my) else: t_est = None return (R_est, t_est)
python
def procrustes(X, Y, remove_mean=False): """Orthogonal procrustes problem solver The procrustes problem finds the best rotation R, and translation t where X = R*Y + t The number of points in X and Y must be at least 2. For the minimal case of two points, a third point is temporarily created and used for the estimation. Parameters ----------------- X : (3, N) ndarray First set of points Y : (3, N) ndarray Second set of points remove_mean : bool If true, the mean is removed from X and Y before solving the procrustes problem. Can yield better results in some applications. Returns ----------------- R : (3,3) ndarray Rotation component t : (3,) ndarray Translation component (None if remove_mean is False) """ assert X.shape == Y.shape assert X.shape[0] > 1 # Minimal case, create third point using cross product if X.shape[0] == 2: X3 = np.cross(X[:,0], X[:,1], axis=0) X = np.hstack((X, X3 / np.linalg.norm(X3))) Y3 = np.cross(Y[:,0], Y[:,1], axis=0) Y = np.hstack((Y, Y3 / np.linalg.norm(Y3))) D, N = X.shape[:2] if remove_mean: mx = np.mean(X, axis=1).reshape(D, 1) my = np.mean(Y, axis=1).reshape(D, 1) Xhat = X - mx Yhat = Y - my else: Xhat = X Yhat = Y (U, S, V) = np.linalg.svd((Xhat).dot(Yhat.T)) Dtmp = np.eye(Xhat.shape[0]) Dtmp[-1,-1] = np.linalg.det(U.dot(V)) R_est = U.dot(Dtmp).dot(V) # Now X=R_est*(Y-my)+mx=R_est*Y+t_est if remove_mean: t_est= mx - R_est.dot(my) else: t_est = None return (R_est, t_est)
[ "def", "procrustes", "(", "X", ",", "Y", ",", "remove_mean", "=", "False", ")", ":", "assert", "X", ".", "shape", "==", "Y", ".", "shape", "assert", "X", ".", "shape", "[", "0", "]", ">", "1", "# Minimal case, create third point using cross product", "if", "X", ".", "shape", "[", "0", "]", "==", "2", ":", "X3", "=", "np", ".", "cross", "(", "X", "[", ":", ",", "0", "]", ",", "X", "[", ":", ",", "1", "]", ",", "axis", "=", "0", ")", "X", "=", "np", ".", "hstack", "(", "(", "X", ",", "X3", "/", "np", ".", "linalg", ".", "norm", "(", "X3", ")", ")", ")", "Y3", "=", "np", ".", "cross", "(", "Y", "[", ":", ",", "0", "]", ",", "Y", "[", ":", ",", "1", "]", ",", "axis", "=", "0", ")", "Y", "=", "np", ".", "hstack", "(", "(", "Y", ",", "Y3", "/", "np", ".", "linalg", ".", "norm", "(", "Y3", ")", ")", ")", "D", ",", "N", "=", "X", ".", "shape", "[", ":", "2", "]", "if", "remove_mean", ":", "mx", "=", "np", ".", "mean", "(", "X", ",", "axis", "=", "1", ")", ".", "reshape", "(", "D", ",", "1", ")", "my", "=", "np", ".", "mean", "(", "Y", ",", "axis", "=", "1", ")", ".", "reshape", "(", "D", ",", "1", ")", "Xhat", "=", "X", "-", "mx", "Yhat", "=", "Y", "-", "my", "else", ":", "Xhat", "=", "X", "Yhat", "=", "Y", "(", "U", ",", "S", ",", "V", ")", "=", "np", ".", "linalg", ".", "svd", "(", "(", "Xhat", ")", ".", "dot", "(", "Yhat", ".", "T", ")", ")", "Dtmp", "=", "np", ".", "eye", "(", "Xhat", ".", "shape", "[", "0", "]", ")", "Dtmp", "[", "-", "1", ",", "-", "1", "]", "=", "np", ".", "linalg", ".", "det", "(", "U", ".", "dot", "(", "V", ")", ")", "R_est", "=", "U", ".", "dot", "(", "Dtmp", ")", ".", "dot", "(", "V", ")", "# Now X=R_est*(Y-my)+mx=R_est*Y+t_est", "if", "remove_mean", ":", "t_est", "=", "mx", "-", "R_est", ".", "dot", "(", "my", ")", "else", ":", "t_est", "=", "None", "return", "(", "R_est", ",", "t_est", ")" ]
Orthogonal procrustes problem solver The procrustes problem finds the best rotation R, and translation t where X = R*Y + t The number of points in X and Y must be at least 2. For the minimal case of two points, a third point is temporarily created and used for the estimation. Parameters ----------------- X : (3, N) ndarray First set of points Y : (3, N) ndarray Second set of points remove_mean : bool If true, the mean is removed from X and Y before solving the procrustes problem. Can yield better results in some applications. Returns ----------------- R : (3,3) ndarray Rotation component t : (3,) ndarray Translation component (None if remove_mean is False)
[ "Orthogonal", "procrustes", "problem", "solver", "The", "procrustes", "problem", "finds", "the", "best", "rotation", "R", "and", "translation", "t", "where", "X", "=", "R", "*", "Y", "+", "t", "The", "number", "of", "points", "in", "X", "and", "Y", "must", "be", "at", "least", "2", ".", "For", "the", "minimal", "case", "of", "two", "points", "a", "third", "point", "is", "temporarily", "created", "and", "used", "for", "the", "estimation", ".", "Parameters", "-----------------", "X", ":", "(", "3", "N", ")", "ndarray", "First", "set", "of", "points", "Y", ":", "(", "3", "N", ")", "ndarray", "Second", "set", "of", "points", "remove_mean", ":", "bool", "If", "true", "the", "mean", "is", "removed", "from", "X", "and", "Y", "before", "solving", "the", "procrustes", "problem", ".", "Can", "yield", "better", "results", "in", "some", "applications", ".", "Returns", "-----------------", "R", ":", "(", "3", "3", ")", "ndarray", "Rotation", "component", "t", ":", "(", "3", ")", "ndarray", "Translation", "component", "(", "None", "if", "remove_mean", "is", "False", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L21-L84
hovren/crisp
crisp/rotations.py
rotation_matrix_to_axis_angle
def rotation_matrix_to_axis_angle(R): """Convert a 3D rotation matrix to a 3D axis angle representation Parameters --------------- R : (3,3) array Rotation matrix Returns ---------------- v : (3,) array (Unit-) rotation angle theta : float Angle of rotations, in radians Note -------------- This uses the algorithm as described in Multiple View Geometry, p. 584 """ assert R.shape == (3,3) assert_almost_equal(np.linalg.det(R), 1.0, err_msg="Not a rotation matrix: determinant was not 1") S, V = np.linalg.eig(R) k = np.argmin(np.abs(S - 1.)) s = S[k] assert_almost_equal(s, 1.0, err_msg="Not a rotation matrix: No eigen value s=1") v = np.real(V[:, k]) # Result is generally complex vhat = np.array([R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1]]) sintheta = 0.5 * np.dot(v, vhat) costheta = 0.5 * (np.trace(R) - 1) theta = np.arctan2(sintheta, costheta) return (v, theta)
python
def rotation_matrix_to_axis_angle(R): """Convert a 3D rotation matrix to a 3D axis angle representation Parameters --------------- R : (3,3) array Rotation matrix Returns ---------------- v : (3,) array (Unit-) rotation angle theta : float Angle of rotations, in radians Note -------------- This uses the algorithm as described in Multiple View Geometry, p. 584 """ assert R.shape == (3,3) assert_almost_equal(np.linalg.det(R), 1.0, err_msg="Not a rotation matrix: determinant was not 1") S, V = np.linalg.eig(R) k = np.argmin(np.abs(S - 1.)) s = S[k] assert_almost_equal(s, 1.0, err_msg="Not a rotation matrix: No eigen value s=1") v = np.real(V[:, k]) # Result is generally complex vhat = np.array([R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1]]) sintheta = 0.5 * np.dot(v, vhat) costheta = 0.5 * (np.trace(R) - 1) theta = np.arctan2(sintheta, costheta) return (v, theta)
[ "def", "rotation_matrix_to_axis_angle", "(", "R", ")", ":", "assert", "R", ".", "shape", "==", "(", "3", ",", "3", ")", "assert_almost_equal", "(", "np", ".", "linalg", ".", "det", "(", "R", ")", ",", "1.0", ",", "err_msg", "=", "\"Not a rotation matrix: determinant was not 1\"", ")", "S", ",", "V", "=", "np", ".", "linalg", ".", "eig", "(", "R", ")", "k", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "S", "-", "1.", ")", ")", "s", "=", "S", "[", "k", "]", "assert_almost_equal", "(", "s", ",", "1.0", ",", "err_msg", "=", "\"Not a rotation matrix: No eigen value s=1\"", ")", "v", "=", "np", ".", "real", "(", "V", "[", ":", ",", "k", "]", ")", "# Result is generally complex", "vhat", "=", "np", ".", "array", "(", "[", "R", "[", "2", ",", "1", "]", "-", "R", "[", "1", ",", "2", "]", ",", "R", "[", "0", ",", "2", "]", "-", "R", "[", "2", ",", "0", "]", ",", "R", "[", "1", ",", "0", "]", "-", "R", "[", "0", ",", "1", "]", "]", ")", "sintheta", "=", "0.5", "*", "np", ".", "dot", "(", "v", ",", "vhat", ")", "costheta", "=", "0.5", "*", "(", "np", ".", "trace", "(", "R", ")", "-", "1", ")", "theta", "=", "np", ".", "arctan2", "(", "sintheta", ",", "costheta", ")", "return", "(", "v", ",", "theta", ")" ]
Convert a 3D rotation matrix to a 3D axis angle representation Parameters --------------- R : (3,3) array Rotation matrix Returns ---------------- v : (3,) array (Unit-) rotation angle theta : float Angle of rotations, in radians Note -------------- This uses the algorithm as described in Multiple View Geometry, p. 584
[ "Convert", "a", "3D", "rotation", "matrix", "to", "a", "3D", "axis", "angle", "representation", "Parameters", "---------------", "R", ":", "(", "3", "3", ")", "array", "Rotation", "matrix", "Returns", "----------------", "v", ":", "(", "3", ")", "array", "(", "Unit", "-", ")", "rotation", "angle", "theta", ":", "float", "Angle", "of", "rotations", "in", "radians", "Note", "--------------", "This", "uses", "the", "algorithm", "as", "described", "in", "Multiple", "View", "Geometry", "p", ".", "584" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L88-L120
hovren/crisp
crisp/rotations.py
axis_angle_to_rotation_matrix
def axis_angle_to_rotation_matrix(v, theta): """Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix """ if np.abs(theta) < np.spacing(1): return np.eye(3) else: v = v.reshape(3,1) np.testing.assert_almost_equal(np.linalg.norm(v), 1.) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) vvt = np.dot(v, v.T) R = np.eye(3)*np.cos(theta) + (1 - np.cos(theta))*vvt + vx * np.sin(theta) return R
python
def axis_angle_to_rotation_matrix(v, theta): """Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix """ if np.abs(theta) < np.spacing(1): return np.eye(3) else: v = v.reshape(3,1) np.testing.assert_almost_equal(np.linalg.norm(v), 1.) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) vvt = np.dot(v, v.T) R = np.eye(3)*np.cos(theta) + (1 - np.cos(theta))*vvt + vx * np.sin(theta) return R
[ "def", "axis_angle_to_rotation_matrix", "(", "v", ",", "theta", ")", ":", "if", "np", ".", "abs", "(", "theta", ")", "<", "np", ".", "spacing", "(", "1", ")", ":", "return", "np", ".", "eye", "(", "3", ")", "else", ":", "v", "=", "v", ".", "reshape", "(", "3", ",", "1", ")", "np", ".", "testing", ".", "assert_almost_equal", "(", "np", ".", "linalg", ".", "norm", "(", "v", ")", ",", "1.", ")", "vx", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "v", "[", "2", "]", ",", "v", "[", "1", "]", "]", ",", "[", "v", "[", "2", "]", ",", "0", ",", "-", "v", "[", "0", "]", "]", ",", "[", "-", "v", "[", "1", "]", ",", "v", "[", "0", "]", ",", "0", "]", "]", ")", "vvt", "=", "np", ".", "dot", "(", "v", ",", "v", ".", "T", ")", "R", "=", "np", ".", "eye", "(", "3", ")", "*", "np", ".", "cos", "(", "theta", ")", "+", "(", "1", "-", "np", ".", "cos", "(", "theta", ")", ")", "*", "vvt", "+", "vx", "*", "np", ".", "sin", "(", "theta", ")", "return", "R" ]
Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix
[ "Convert", "rotation", "from", "axis", "-", "angle", "to", "rotation", "matrix", "Parameters", "---------------", "v", ":", "(", "3", ")", "ndarray", "Rotation", "axis", "(", "normalized", ")", "theta", ":", "float", "Rotation", "angle", "(", "radians", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L124-L149
hovren/crisp
crisp/rotations.py
quat_to_rotation_matrix
def quat_to_rotation_matrix(q): """Convert unit quaternion to rotation matrix Parameters ------------- q : (4,) ndarray Unit quaternion, scalar as first element Returns ---------------- R : (3,3) ndarray Rotation matrix """ q = q.flatten() assert q.size == 4 assert_almost_equal(np.linalg.norm(q), 1.0, err_msg="Not a unit quaternion!") qq = q ** 2 R = np.array([[qq[0] + qq[1] - qq[2] - qq[3], 2*q[1]*q[2] - 2*q[0]*q[3], 2*q[1]*q[3] + 2*q[0]*q[2]], [2*q[1]*q[2] + 2*q[0]*q[3], qq[0] - qq[1] + qq[2] - qq[3], 2*q[2]*q[3] - 2*q[0]*q[1]], [2*q[1]*q[3] - 2*q[0]*q[2], 2*q[2]*q[3] + 2*q[0]*q[1], qq[0] - qq[1] - qq[2] + qq[3]]]) return R
python
def quat_to_rotation_matrix(q): """Convert unit quaternion to rotation matrix Parameters ------------- q : (4,) ndarray Unit quaternion, scalar as first element Returns ---------------- R : (3,3) ndarray Rotation matrix """ q = q.flatten() assert q.size == 4 assert_almost_equal(np.linalg.norm(q), 1.0, err_msg="Not a unit quaternion!") qq = q ** 2 R = np.array([[qq[0] + qq[1] - qq[2] - qq[3], 2*q[1]*q[2] - 2*q[0]*q[3], 2*q[1]*q[3] + 2*q[0]*q[2]], [2*q[1]*q[2] + 2*q[0]*q[3], qq[0] - qq[1] + qq[2] - qq[3], 2*q[2]*q[3] - 2*q[0]*q[1]], [2*q[1]*q[3] - 2*q[0]*q[2], 2*q[2]*q[3] + 2*q[0]*q[1], qq[0] - qq[1] - qq[2] + qq[3]]]) return R
[ "def", "quat_to_rotation_matrix", "(", "q", ")", ":", "q", "=", "q", ".", "flatten", "(", ")", "assert", "q", ".", "size", "==", "4", "assert_almost_equal", "(", "np", ".", "linalg", ".", "norm", "(", "q", ")", ",", "1.0", ",", "err_msg", "=", "\"Not a unit quaternion!\"", ")", "qq", "=", "q", "**", "2", "R", "=", "np", ".", "array", "(", "[", "[", "qq", "[", "0", "]", "+", "qq", "[", "1", "]", "-", "qq", "[", "2", "]", "-", "qq", "[", "3", "]", ",", "2", "*", "q", "[", "1", "]", "*", "q", "[", "2", "]", "-", "2", "*", "q", "[", "0", "]", "*", "q", "[", "3", "]", ",", "2", "*", "q", "[", "1", "]", "*", "q", "[", "3", "]", "+", "2", "*", "q", "[", "0", "]", "*", "q", "[", "2", "]", "]", ",", "[", "2", "*", "q", "[", "1", "]", "*", "q", "[", "2", "]", "+", "2", "*", "q", "[", "0", "]", "*", "q", "[", "3", "]", ",", "qq", "[", "0", "]", "-", "qq", "[", "1", "]", "+", "qq", "[", "2", "]", "-", "qq", "[", "3", "]", ",", "2", "*", "q", "[", "2", "]", "*", "q", "[", "3", "]", "-", "2", "*", "q", "[", "0", "]", "*", "q", "[", "1", "]", "]", ",", "[", "2", "*", "q", "[", "1", "]", "*", "q", "[", "3", "]", "-", "2", "*", "q", "[", "0", "]", "*", "q", "[", "2", "]", ",", "2", "*", "q", "[", "2", "]", "*", "q", "[", "3", "]", "+", "2", "*", "q", "[", "0", "]", "*", "q", "[", "1", "]", ",", "qq", "[", "0", "]", "-", "qq", "[", "1", "]", "-", "qq", "[", "2", "]", "+", "qq", "[", "3", "]", "]", "]", ")", "return", "R" ]
Convert unit quaternion to rotation matrix Parameters ------------- q : (4,) ndarray Unit quaternion, scalar as first element Returns ---------------- R : (3,3) ndarray Rotation matrix
[ "Convert", "unit", "quaternion", "to", "rotation", "matrix", "Parameters", "-------------", "q", ":", "(", "4", ")", "ndarray", "Unit", "quaternion", "scalar", "as", "first", "element" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L153-L177
hovren/crisp
crisp/rotations.py
integrate_gyro_quaternion
def integrate_gyro_quaternion(gyro_ts, gyro_data): """Integrate angular velocities to rotations Parameters --------------- gyro_ts : ndarray Timestamps gyro_data : (3, N) ndarray Angular velocity measurements Returns --------------- rotations : (4, N) ndarray Rotation sequence as unit quaternions (first element scalar) """ #NB: Quaternion q = [a, n1, n2, n3], scalar first q_list = np.zeros((gyro_ts.shape[0], 4)) # Nx4 quaternion list q_list[0,:] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all (except first) for i in range(1, gyro_ts.size): w = gyro_data[i] dt = gyro_ts[i] - gyro_ts[i - 1] qprev = q_list[i - 1] A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew /= qnorm q_list[i] = qnew return q_list
python
def integrate_gyro_quaternion(gyro_ts, gyro_data): """Integrate angular velocities to rotations Parameters --------------- gyro_ts : ndarray Timestamps gyro_data : (3, N) ndarray Angular velocity measurements Returns --------------- rotations : (4, N) ndarray Rotation sequence as unit quaternions (first element scalar) """ #NB: Quaternion q = [a, n1, n2, n3], scalar first q_list = np.zeros((gyro_ts.shape[0], 4)) # Nx4 quaternion list q_list[0,:] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all (except first) for i in range(1, gyro_ts.size): w = gyro_data[i] dt = gyro_ts[i] - gyro_ts[i - 1] qprev = q_list[i - 1] A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew /= qnorm q_list[i] = qnew return q_list
[ "def", "integrate_gyro_quaternion", "(", "gyro_ts", ",", "gyro_data", ")", ":", "#NB: Quaternion q = [a, n1, n2, n3], scalar first", "q_list", "=", "np", ".", "zeros", "(", "(", "gyro_ts", ".", "shape", "[", "0", "]", ",", "4", ")", ")", "# Nx4 quaternion list", "q_list", "[", "0", ",", ":", "]", "=", "np", ".", "array", "(", "[", "1", ",", "0", ",", "0", ",", "0", "]", ")", "# Initial rotation (no rotation)", "# Iterate over all (except first)", "for", "i", "in", "range", "(", "1", ",", "gyro_ts", ".", "size", ")", ":", "w", "=", "gyro_data", "[", "i", "]", "dt", "=", "gyro_ts", "[", "i", "]", "-", "gyro_ts", "[", "i", "-", "1", "]", "qprev", "=", "q_list", "[", "i", "-", "1", "]", "A", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "w", "[", "0", "]", ",", "-", "w", "[", "1", "]", ",", "-", "w", "[", "2", "]", "]", ",", "[", "w", "[", "0", "]", ",", "0", ",", "w", "[", "2", "]", ",", "-", "w", "[", "1", "]", "]", ",", "[", "w", "[", "1", "]", ",", "-", "w", "[", "2", "]", ",", "0", ",", "w", "[", "0", "]", "]", ",", "[", "w", "[", "2", "]", ",", "w", "[", "1", "]", ",", "-", "w", "[", "0", "]", ",", "0", "]", "]", ")", "qnew", "=", "(", "np", ".", "eye", "(", "4", ")", "+", "(", "dt", "/", "2.0", ")", "*", "A", ")", ".", "dot", "(", "qprev", ")", "qnorm", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "qnew", "**", "2", ")", ")", "qnew", "/=", "qnorm", "q_list", "[", "i", "]", "=", "qnew", "return", "q_list" ]
Integrate angular velocities to rotations Parameters --------------- gyro_ts : ndarray Timestamps gyro_data : (3, N) ndarray Angular velocity measurements Returns --------------- rotations : (4, N) ndarray Rotation sequence as unit quaternions (first element scalar)
[ "Integrate", "angular", "velocities", "to", "rotations", "Parameters", "---------------", "gyro_ts", ":", "ndarray", "Timestamps", "gyro_data", ":", "(", "3", "N", ")", "ndarray", "Angular", "velocity", "measurements", "Returns", "---------------", "rotations", ":", "(", "4", "N", ")", "ndarray", "Rotation", "sequence", "as", "unit", "quaternions", "(", "first", "element", "scalar", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L181-L216
hovren/crisp
crisp/rotations.py
slerp
def slerp(q1, q2, u): """SLERP: Spherical linear interpolation between two unit quaternions. Parameters ------------ q1 : (4, ) ndarray Unit quaternion (first element scalar) q2 : (4, ) ndarray Unit quaternion (first element scalar) u : float Interpolation factor in range [0,1] where 0 is first quaternion and 1 is second quaternion. Returns ----------- q : (4,) ndarray The interpolated unit quaternion """ q1 = q1.flatten() q2 = q2.flatten() assert q1.shape == q2.shape assert q1.size == 4 costheta = np.dot(q1, q2) if np.isclose(u, 0.): return q1 elif np.isclose(u, 1.): return q2 elif u > 1 or u < 0: raise ValueError("u must be in range [0, 1]") # Shortest path if costheta < 0: costheta = -costheta q2 = -q2 # Almost the same, we can return any of them? if np.isclose(costheta, 1.0): return q1 theta = np.arccos(costheta) f1 = np.sin((1.0 - u)*theta) / np.sin(theta) f2 = np.sin(u*theta) / np.sin(theta) q = f1*q1 + f2*q2 q = q / np.sqrt(np.sum(q**2)) # Normalize return q
python
def slerp(q1, q2, u): """SLERP: Spherical linear interpolation between two unit quaternions. Parameters ------------ q1 : (4, ) ndarray Unit quaternion (first element scalar) q2 : (4, ) ndarray Unit quaternion (first element scalar) u : float Interpolation factor in range [0,1] where 0 is first quaternion and 1 is second quaternion. Returns ----------- q : (4,) ndarray The interpolated unit quaternion """ q1 = q1.flatten() q2 = q2.flatten() assert q1.shape == q2.shape assert q1.size == 4 costheta = np.dot(q1, q2) if np.isclose(u, 0.): return q1 elif np.isclose(u, 1.): return q2 elif u > 1 or u < 0: raise ValueError("u must be in range [0, 1]") # Shortest path if costheta < 0: costheta = -costheta q2 = -q2 # Almost the same, we can return any of them? if np.isclose(costheta, 1.0): return q1 theta = np.arccos(costheta) f1 = np.sin((1.0 - u)*theta) / np.sin(theta) f2 = np.sin(u*theta) / np.sin(theta) q = f1*q1 + f2*q2 q = q / np.sqrt(np.sum(q**2)) # Normalize return q
[ "def", "slerp", "(", "q1", ",", "q2", ",", "u", ")", ":", "q1", "=", "q1", ".", "flatten", "(", ")", "q2", "=", "q2", ".", "flatten", "(", ")", "assert", "q1", ".", "shape", "==", "q2", ".", "shape", "assert", "q1", ".", "size", "==", "4", "costheta", "=", "np", ".", "dot", "(", "q1", ",", "q2", ")", "if", "np", ".", "isclose", "(", "u", ",", "0.", ")", ":", "return", "q1", "elif", "np", ".", "isclose", "(", "u", ",", "1.", ")", ":", "return", "q2", "elif", "u", ">", "1", "or", "u", "<", "0", ":", "raise", "ValueError", "(", "\"u must be in range [0, 1]\"", ")", "# Shortest path", "if", "costheta", "<", "0", ":", "costheta", "=", "-", "costheta", "q2", "=", "-", "q2", "# Almost the same, we can return any of them?", "if", "np", ".", "isclose", "(", "costheta", ",", "1.0", ")", ":", "return", "q1", "theta", "=", "np", ".", "arccos", "(", "costheta", ")", "f1", "=", "np", ".", "sin", "(", "(", "1.0", "-", "u", ")", "*", "theta", ")", "/", "np", ".", "sin", "(", "theta", ")", "f2", "=", "np", ".", "sin", "(", "u", "*", "theta", ")", "/", "np", ".", "sin", "(", "theta", ")", "q", "=", "f1", "*", "q1", "+", "f2", "*", "q2", "q", "=", "q", "/", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "q", "**", "2", ")", ")", "# Normalize", "return", "q" ]
SLERP: Spherical linear interpolation between two unit quaternions. Parameters ------------ q1 : (4, ) ndarray Unit quaternion (first element scalar) q2 : (4, ) ndarray Unit quaternion (first element scalar) u : float Interpolation factor in range [0,1] where 0 is first quaternion and 1 is second quaternion. Returns ----------- q : (4,) ndarray The interpolated unit quaternion
[ "SLERP", ":", "Spherical", "linear", "interpolation", "between", "two", "unit", "quaternions", ".", "Parameters", "------------", "q1", ":", "(", "4", ")", "ndarray", "Unit", "quaternion", "(", "first", "element", "scalar", ")", "q2", ":", "(", "4", ")", "ndarray", "Unit", "quaternion", "(", "first", "element", "scalar", ")", "u", ":", "float", "Interpolation", "factor", "in", "range", "[", "0", "1", "]", "where", "0", "is", "first", "quaternion", "and", "1", "is", "second", "quaternion", ".", "Returns", "-----------", "q", ":", "(", "4", ")", "ndarray", "The", "interpolated", "unit", "quaternion" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L220-L266
hovren/crisp
crisp/rotations.py
estimate_rotation_procrustes_ransac
def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False): """Calculate rotation between two sets of image coordinates using ransac. Inlier criteria is the reprojection error of y into image 1. Parameters ------------------------- x : array 2xN image coordinates in image 1 y : array 2xN image coordinates in image 2 camera : Camera model threshold : float pixel distance threshold to accept as inlier do_translation : bool Try to estimate the translation as well Returns ------------------------ R : array 3x3 The rotation that best fulfills X = RY t : array 3x1 translation if do_translation is False residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion) inliers : array Indices of the points (in X and Y) that are RANSAC inliers """ assert x.shape == y.shape assert x.shape[0] == 2 X = camera.unproject(x) Y = camera.unproject(y) data = np.vstack((X, Y, x)) assert data.shape[0] == 8 model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation) def eval_func(model, data): Y = data[3:6].reshape(3,-1) x = data[6:].reshape(2,-1) R, t = model Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t xhat = camera.project(Xhat) dist = np.sqrt(np.sum((x-xhat)**2, axis=0)) return dist inlier_selection_prob = 0.99999 model_points = 2 ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points)) model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True) if model_est is not None: (R, t) = model_est dist = eval_func((R, t), data) else: dist = None R, t = None, None ransac_consensus_idx = [] return R, t, dist, ransac_consensus_idx
python
def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False): """Calculate rotation between two sets of image coordinates using ransac. Inlier criteria is the reprojection error of y into image 1. Parameters ------------------------- x : array 2xN image coordinates in image 1 y : array 2xN image coordinates in image 2 camera : Camera model threshold : float pixel distance threshold to accept as inlier do_translation : bool Try to estimate the translation as well Returns ------------------------ R : array 3x3 The rotation that best fulfills X = RY t : array 3x1 translation if do_translation is False residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion) inliers : array Indices of the points (in X and Y) that are RANSAC inliers """ assert x.shape == y.shape assert x.shape[0] == 2 X = camera.unproject(x) Y = camera.unproject(y) data = np.vstack((X, Y, x)) assert data.shape[0] == 8 model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation) def eval_func(model, data): Y = data[3:6].reshape(3,-1) x = data[6:].reshape(2,-1) R, t = model Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t xhat = camera.project(Xhat) dist = np.sqrt(np.sum((x-xhat)**2, axis=0)) return dist inlier_selection_prob = 0.99999 model_points = 2 ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points)) model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True) if model_est is not None: (R, t) = model_est dist = eval_func((R, t), data) else: dist = None R, t = None, None ransac_consensus_idx = [] return R, t, dist, ransac_consensus_idx
[ "def", "estimate_rotation_procrustes_ransac", "(", "x", ",", "y", ",", "camera", ",", "threshold", ",", "inlier_ratio", "=", "0.75", ",", "do_translation", "=", "False", ")", ":", "assert", "x", ".", "shape", "==", "y", ".", "shape", "assert", "x", ".", "shape", "[", "0", "]", "==", "2", "X", "=", "camera", ".", "unproject", "(", "x", ")", "Y", "=", "camera", ".", "unproject", "(", "y", ")", "data", "=", "np", ".", "vstack", "(", "(", "X", ",", "Y", ",", "x", ")", ")", "assert", "data", ".", "shape", "[", "0", "]", "==", "8", "model_func", "=", "lambda", "data", ":", "procrustes", "(", "data", "[", ":", "3", "]", ",", "data", "[", "3", ":", "6", "]", ",", "remove_mean", "=", "do_translation", ")", "def", "eval_func", "(", "model", ",", "data", ")", ":", "Y", "=", "data", "[", "3", ":", "6", "]", ".", "reshape", "(", "3", ",", "-", "1", ")", "x", "=", "data", "[", "6", ":", "]", ".", "reshape", "(", "2", ",", "-", "1", ")", "R", ",", "t", "=", "model", "Xhat", "=", "np", ".", "dot", "(", "R", ",", "Y", ")", "if", "t", "is", "None", "else", "np", ".", "dot", "(", "R", ",", "Y", ")", "+", "t", "xhat", "=", "camera", ".", "project", "(", "Xhat", ")", "dist", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "x", "-", "xhat", ")", "**", "2", ",", "axis", "=", "0", ")", ")", "return", "dist", "inlier_selection_prob", "=", "0.99999", "model_points", "=", "2", "ransac_iterations", "=", "int", "(", "np", ".", "log", "(", "1", "-", "inlier_selection_prob", ")", "/", "np", ".", "log", "(", "1", "-", "inlier_ratio", "**", "model_points", ")", ")", "model_est", ",", "ransac_consensus_idx", "=", "ransac", ".", "RANSAC", "(", "model_func", ",", "eval_func", ",", "data", ",", "model_points", ",", "ransac_iterations", ",", "threshold", ",", "recalculate", "=", "True", ")", "if", "model_est", "is", "not", "None", ":", "(", "R", ",", "t", ")", "=", "model_est", "dist", "=", "eval_func", "(", "(", "R", ",", "t", ")", ",", "data", ")", "else", ":", "dist", "=", "None", "R", ",", "t", "=", "None", ",", "None", "ransac_consensus_idx", "=", "[", "]", "return", "R", ",", "t", ",", "dist", ",", "ransac_consensus_idx" ]
Calculate rotation between two sets of image coordinates using ransac. Inlier criteria is the reprojection error of y into image 1. Parameters ------------------------- x : array 2xN image coordinates in image 1 y : array 2xN image coordinates in image 2 camera : Camera model threshold : float pixel distance threshold to accept as inlier do_translation : bool Try to estimate the translation as well Returns ------------------------ R : array 3x3 The rotation that best fulfills X = RY t : array 3x1 translation if do_translation is False residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion) inliers : array Indices of the points (in X and Y) that are RANSAC inliers
[ "Calculate", "rotation", "between", "two", "sets", "of", "image", "coordinates", "using", "ransac", ".", "Inlier", "criteria", "is", "the", "reprojection", "error", "of", "y", "into", "image", "1", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L270-L325
hovren/crisp
crisp/ransac.py
RANSAC
def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False): """Apply RANSAC. This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set. Parameters ------------ model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector) eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model data : array (DxN) where D is dimensionality and N number of samples """ M = None max_consensus = 0 all_idx = list(range(data.shape[1])) final_consensus = [] for k in range(num_iter): np.random.shuffle(all_idx) model_set = all_idx[:num_points] x = data[:, model_set] m = model_func(x) model_error = eval_func(m, data) assert model_error.ndim == 1 assert model_error.size == data.shape[1] consensus_idx = np.flatnonzero(model_error < threshold) if len(consensus_idx) > max_consensus: M = m max_consensus = len(consensus_idx) final_consensus = consensus_idx # Recalculate using current consensus set? if recalculate and len(final_consensus) > 0: final_consensus_set = data[:, final_consensus] M = model_func(final_consensus_set) return (M, final_consensus)
python
def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False): """Apply RANSAC. This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set. Parameters ------------ model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector) eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model data : array (DxN) where D is dimensionality and N number of samples """ M = None max_consensus = 0 all_idx = list(range(data.shape[1])) final_consensus = [] for k in range(num_iter): np.random.shuffle(all_idx) model_set = all_idx[:num_points] x = data[:, model_set] m = model_func(x) model_error = eval_func(m, data) assert model_error.ndim == 1 assert model_error.size == data.shape[1] consensus_idx = np.flatnonzero(model_error < threshold) if len(consensus_idx) > max_consensus: M = m max_consensus = len(consensus_idx) final_consensus = consensus_idx # Recalculate using current consensus set? if recalculate and len(final_consensus) > 0: final_consensus_set = data[:, final_consensus] M = model_func(final_consensus_set) return (M, final_consensus)
[ "def", "RANSAC", "(", "model_func", ",", "eval_func", ",", "data", ",", "num_points", ",", "num_iter", ",", "threshold", ",", "recalculate", "=", "False", ")", ":", "M", "=", "None", "max_consensus", "=", "0", "all_idx", "=", "list", "(", "range", "(", "data", ".", "shape", "[", "1", "]", ")", ")", "final_consensus", "=", "[", "]", "for", "k", "in", "range", "(", "num_iter", ")", ":", "np", ".", "random", ".", "shuffle", "(", "all_idx", ")", "model_set", "=", "all_idx", "[", ":", "num_points", "]", "x", "=", "data", "[", ":", ",", "model_set", "]", "m", "=", "model_func", "(", "x", ")", "model_error", "=", "eval_func", "(", "m", ",", "data", ")", "assert", "model_error", ".", "ndim", "==", "1", "assert", "model_error", ".", "size", "==", "data", ".", "shape", "[", "1", "]", "consensus_idx", "=", "np", ".", "flatnonzero", "(", "model_error", "<", "threshold", ")", "if", "len", "(", "consensus_idx", ")", ">", "max_consensus", ":", "M", "=", "m", "max_consensus", "=", "len", "(", "consensus_idx", ")", "final_consensus", "=", "consensus_idx", "# Recalculate using current consensus set?", "if", "recalculate", "and", "len", "(", "final_consensus", ")", ">", "0", ":", "final_consensus_set", "=", "data", "[", ":", ",", "final_consensus", "]", "M", "=", "model_func", "(", "final_consensus_set", ")", "return", "(", "M", ",", "final_consensus", ")" ]
Apply RANSAC. This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set. Parameters ------------ model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector) eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model data : array (DxN) where D is dimensionality and N number of samples
[ "Apply", "RANSAC", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/ransac.py#L5-L41
hovren/crisp
crisp/tracking.py
track_points
def track_points(img1, img2, initial_points=None, gftt_params={}): """Track points between two images Parameters ----------------- img1 : (M, N) ndarray First image img2 : (M, N) ndarray Second image initial_points : ndarray Initial points. If empty, initial points will be calculated from img1 using goodFeaturesToTrack in OpenCV gftt_params : dict Keyword arguments for goodFeaturesToTrack Returns ----------------- points : ndarray Tracked points initial_points : ndarray Initial points used """ params = GFTT_DEFAULTS if gftt_params: params.update(gftt_params) if initial_points is None: initial_points = cv2.goodFeaturesToTrack(img1, params['max_corners'], params['quality_level'], params['min_distance']) [_points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, initial_points, np.array([])) # Filter out valid points only points = _points[np.nonzero(status)] initial_points = initial_points[np.nonzero(status)] return (points, initial_points)
python
def track_points(img1, img2, initial_points=None, gftt_params={}): """Track points between two images Parameters ----------------- img1 : (M, N) ndarray First image img2 : (M, N) ndarray Second image initial_points : ndarray Initial points. If empty, initial points will be calculated from img1 using goodFeaturesToTrack in OpenCV gftt_params : dict Keyword arguments for goodFeaturesToTrack Returns ----------------- points : ndarray Tracked points initial_points : ndarray Initial points used """ params = GFTT_DEFAULTS if gftt_params: params.update(gftt_params) if initial_points is None: initial_points = cv2.goodFeaturesToTrack(img1, params['max_corners'], params['quality_level'], params['min_distance']) [_points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, initial_points, np.array([])) # Filter out valid points only points = _points[np.nonzero(status)] initial_points = initial_points[np.nonzero(status)] return (points, initial_points)
[ "def", "track_points", "(", "img1", ",", "img2", ",", "initial_points", "=", "None", ",", "gftt_params", "=", "{", "}", ")", ":", "params", "=", "GFTT_DEFAULTS", "if", "gftt_params", ":", "params", ".", "update", "(", "gftt_params", ")", "if", "initial_points", "is", "None", ":", "initial_points", "=", "cv2", ".", "goodFeaturesToTrack", "(", "img1", ",", "params", "[", "'max_corners'", "]", ",", "params", "[", "'quality_level'", "]", ",", "params", "[", "'min_distance'", "]", ")", "[", "_points", ",", "status", ",", "err", "]", "=", "cv2", ".", "calcOpticalFlowPyrLK", "(", "img1", ",", "img2", ",", "initial_points", ",", "np", ".", "array", "(", "[", "]", ")", ")", "# Filter out valid points only", "points", "=", "_points", "[", "np", ".", "nonzero", "(", "status", ")", "]", "initial_points", "=", "initial_points", "[", "np", ".", "nonzero", "(", "status", ")", "]", "return", "(", "points", ",", "initial_points", ")" ]
Track points between two images Parameters ----------------- img1 : (M, N) ndarray First image img2 : (M, N) ndarray Second image initial_points : ndarray Initial points. If empty, initial points will be calculated from img1 using goodFeaturesToTrack in OpenCV gftt_params : dict Keyword arguments for goodFeaturesToTrack Returns ----------------- points : ndarray Tracked points initial_points : ndarray Initial points used
[ "Track", "points", "between", "two", "images", "Parameters", "-----------------", "img1", ":", "(", "M", "N", ")", "ndarray", "First", "image", "img2", ":", "(", "M", "N", ")", "ndarray", "Second", "image", "initial_points", ":", "ndarray", "Initial", "points", ".", "If", "empty", "initial", "points", "will", "be", "calculated", "from", "img1", "using", "goodFeaturesToTrack", "in", "OpenCV", "gftt_params", ":", "dict", "Keyword", "arguments", "for", "goodFeaturesToTrack", "Returns", "-----------------", "points", ":", "ndarray", "Tracked", "points", "initial_points", ":", "ndarray", "Initial", "points", "used" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/tracking.py#L32-L67
hovren/crisp
crisp/tracking.py
optical_flow_magnitude
def optical_flow_magnitude(image_sequence, max_diff=60, gftt_options={}): """Return optical flow magnitude for the given image sequence The flow magnitude is the mean value of the total (sparse) optical flow between two images. Crude outlier detection using the max_diff parameter is used. Parameters ---------------- image_sequence : sequence Sequence of image data (ndarrays) to calculate flow magnitude from max_diff : float Distance threshold for outlier rejection gftt_options : dict Keyword arguments to the OpenCV goodFeaturesToTrack function Returns ---------------- flow : ndarray The optical flow magnitude """ flow = [] prev_img = None for img in image_sequence: if img.ndim == 3 and img.shape[2] == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if prev_img is None: prev_img = img continue (next_points, prev_points) = track_points(prev_img, img, gftt_params=gftt_options) distance = np.sqrt(np.sum((next_points - prev_points)**2, 1)) distance2 = distance[np.nonzero(distance < max_diff)] # Crude outlier rejection dm = np.mean(distance2) if np.isnan(dm): dm = 0 flow.append(dm) prev_img = img return np.array(flow)
python
def optical_flow_magnitude(image_sequence, max_diff=60, gftt_options={}): """Return optical flow magnitude for the given image sequence The flow magnitude is the mean value of the total (sparse) optical flow between two images. Crude outlier detection using the max_diff parameter is used. Parameters ---------------- image_sequence : sequence Sequence of image data (ndarrays) to calculate flow magnitude from max_diff : float Distance threshold for outlier rejection gftt_options : dict Keyword arguments to the OpenCV goodFeaturesToTrack function Returns ---------------- flow : ndarray The optical flow magnitude """ flow = [] prev_img = None for img in image_sequence: if img.ndim == 3 and img.shape[2] == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if prev_img is None: prev_img = img continue (next_points, prev_points) = track_points(prev_img, img, gftt_params=gftt_options) distance = np.sqrt(np.sum((next_points - prev_points)**2, 1)) distance2 = distance[np.nonzero(distance < max_diff)] # Crude outlier rejection dm = np.mean(distance2) if np.isnan(dm): dm = 0 flow.append(dm) prev_img = img return np.array(flow)
[ "def", "optical_flow_magnitude", "(", "image_sequence", ",", "max_diff", "=", "60", ",", "gftt_options", "=", "{", "}", ")", ":", "flow", "=", "[", "]", "prev_img", "=", "None", "for", "img", "in", "image_sequence", ":", "if", "img", ".", "ndim", "==", "3", "and", "img", ".", "shape", "[", "2", "]", "==", "3", ":", "img", "=", "cv2", ".", "cvtColor", "(", "img", ",", "cv2", ".", "COLOR_BGR2GRAY", ")", "if", "prev_img", "is", "None", ":", "prev_img", "=", "img", "continue", "(", "next_points", ",", "prev_points", ")", "=", "track_points", "(", "prev_img", ",", "img", ",", "gftt_params", "=", "gftt_options", ")", "distance", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "next_points", "-", "prev_points", ")", "**", "2", ",", "1", ")", ")", "distance2", "=", "distance", "[", "np", ".", "nonzero", "(", "distance", "<", "max_diff", ")", "]", "# Crude outlier rejection", "dm", "=", "np", ".", "mean", "(", "distance2", ")", "if", "np", ".", "isnan", "(", "dm", ")", ":", "dm", "=", "0", "flow", ".", "append", "(", "dm", ")", "prev_img", "=", "img", "return", "np", ".", "array", "(", "flow", ")" ]
Return optical flow magnitude for the given image sequence The flow magnitude is the mean value of the total (sparse) optical flow between two images. Crude outlier detection using the max_diff parameter is used. Parameters ---------------- image_sequence : sequence Sequence of image data (ndarrays) to calculate flow magnitude from max_diff : float Distance threshold for outlier rejection gftt_options : dict Keyword arguments to the OpenCV goodFeaturesToTrack function Returns ---------------- flow : ndarray The optical flow magnitude
[ "Return", "optical", "flow", "magnitude", "for", "the", "given", "image", "sequence", "The", "flow", "magnitude", "is", "the", "mean", "value", "of", "the", "total", "(", "sparse", ")", "optical", "flow", "between", "two", "images", ".", "Crude", "outlier", "detection", "using", "the", "max_diff", "parameter", "is", "used", ".", "Parameters", "----------------", "image_sequence", ":", "sequence", "Sequence", "of", "image", "data", "(", "ndarrays", ")", "to", "calculate", "flow", "magnitude", "from", "max_diff", ":", "float", "Distance", "threshold", "for", "outlier", "rejection", "gftt_options", ":", "dict", "Keyword", "arguments", "to", "the", "OpenCV", "goodFeaturesToTrack", "function", "Returns", "----------------", "flow", ":", "ndarray", "The", "optical", "flow", "magnitude" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/tracking.py#L71-L110
hovren/crisp
crisp/tracking.py
track
def track(image_list, initial_points, remove_bad=True): """Track points in image list Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure """ # Precreate track array tracks = np.zeros((initial_points.shape[0], len(image_list), 2), dtype='float32') # NxMx2 tracks[:,0,:] = np.reshape(np.array(initial_points), [-1,2]) track_status = np.ones([np.size(initial_points,0),1]) # All initial points are OK empty = np.array([]) window_size = (5,5) for i in range(1, len(image_list)): img1 = image_list[i-1] img2 = image_list[i] prev_ok_track = np.flatnonzero(track_status) prev_points = tracks[prev_ok_track,i-1,:] [points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, prev_points, empty, empty, empty, window_size) if status is None: track_status[:] = 0 # All tracks are bad break valid_set = np.flatnonzero(status) now_ok_tracks = prev_ok_track[valid_set] # Remap tracks[now_ok_tracks,i,:] = points[valid_set] track_status[prev_ok_track] = status if remove_bad: final_ok = np.flatnonzero(track_status) tracks = tracks[final_ok] # Only rows/tracks with nonzero status track_status = track_status[final_ok] return (tracks, track_status)
python
def track(image_list, initial_points, remove_bad=True): """Track points in image list Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure """ # Precreate track array tracks = np.zeros((initial_points.shape[0], len(image_list), 2), dtype='float32') # NxMx2 tracks[:,0,:] = np.reshape(np.array(initial_points), [-1,2]) track_status = np.ones([np.size(initial_points,0),1]) # All initial points are OK empty = np.array([]) window_size = (5,5) for i in range(1, len(image_list)): img1 = image_list[i-1] img2 = image_list[i] prev_ok_track = np.flatnonzero(track_status) prev_points = tracks[prev_ok_track,i-1,:] [points, status, err] = cv2.calcOpticalFlowPyrLK(img1, img2, prev_points, empty, empty, empty, window_size) if status is None: track_status[:] = 0 # All tracks are bad break valid_set = np.flatnonzero(status) now_ok_tracks = prev_ok_track[valid_set] # Remap tracks[now_ok_tracks,i,:] = points[valid_set] track_status[prev_ok_track] = status if remove_bad: final_ok = np.flatnonzero(track_status) tracks = tracks[final_ok] # Only rows/tracks with nonzero status track_status = track_status[final_ok] return (tracks, track_status)
[ "def", "track", "(", "image_list", ",", "initial_points", ",", "remove_bad", "=", "True", ")", ":", "# Precreate track array", "tracks", "=", "np", ".", "zeros", "(", "(", "initial_points", ".", "shape", "[", "0", "]", ",", "len", "(", "image_list", ")", ",", "2", ")", ",", "dtype", "=", "'float32'", ")", "# NxMx2", "tracks", "[", ":", ",", "0", ",", ":", "]", "=", "np", ".", "reshape", "(", "np", ".", "array", "(", "initial_points", ")", ",", "[", "-", "1", ",", "2", "]", ")", "track_status", "=", "np", ".", "ones", "(", "[", "np", ".", "size", "(", "initial_points", ",", "0", ")", ",", "1", "]", ")", "# All initial points are OK", "empty", "=", "np", ".", "array", "(", "[", "]", ")", "window_size", "=", "(", "5", ",", "5", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "image_list", ")", ")", ":", "img1", "=", "image_list", "[", "i", "-", "1", "]", "img2", "=", "image_list", "[", "i", "]", "prev_ok_track", "=", "np", ".", "flatnonzero", "(", "track_status", ")", "prev_points", "=", "tracks", "[", "prev_ok_track", ",", "i", "-", "1", ",", ":", "]", "[", "points", ",", "status", ",", "err", "]", "=", "cv2", ".", "calcOpticalFlowPyrLK", "(", "img1", ",", "img2", ",", "prev_points", ",", "empty", ",", "empty", ",", "empty", ",", "window_size", ")", "if", "status", "is", "None", ":", "track_status", "[", ":", "]", "=", "0", "# All tracks are bad", "break", "valid_set", "=", "np", ".", "flatnonzero", "(", "status", ")", "now_ok_tracks", "=", "prev_ok_track", "[", "valid_set", "]", "# Remap", "tracks", "[", "now_ok_tracks", ",", "i", ",", ":", "]", "=", "points", "[", "valid_set", "]", "track_status", "[", "prev_ok_track", "]", "=", "status", "if", "remove_bad", ":", "final_ok", "=", "np", ".", "flatnonzero", "(", "track_status", ")", "tracks", "=", "tracks", "[", "final_ok", "]", "# Only rows/tracks with nonzero status", "track_status", "=", "track_status", "[", "final_ok", "]", "return", "(", "tracks", ",", "track_status", ")" ]
Track points in image list Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure
[ "Track", "points", "in", "image", "list", "Parameters", "----------------", "image_list", ":", "list", "List", "of", "images", "to", "track", "in", "initial_points", ":", "ndarray", "Initial", "points", "to", "use", "(", "in", "first", "image", "in", "image_list", ")", "remove_bad", ":", "bool", "If", "True", "then", "the", "resulting", "list", "of", "tracks", "will", "only", "contain", "succesfully", "tracked", "points", ".", "Else", "it", "will", "contain", "all", "points", "present", "in", "initial_points", ".", "Returns", "-----------------", "tracks", ":", "(", "N", "M", "2", ")", "ndarray", "N", "tracks", "over", "M", "images", "with", "(", "x", "y", ")", "coordinates", "of", "points", "status", ":", "(", "N", ")", "ndarray", "The", "status", "of", "each", "track", ".", "1", "means", "ok", "while", "0", "means", "tracking", "failure" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/tracking.py#L114-L159
hovren/crisp
crisp/tracking.py
track_retrack
def track_retrack(image_list, initial_points, max_retrack_distance=0.5, keep_bad=False): """Track-retracks points in image list Using track-retrack can help in only getting point tracks of high quality. The point is tracked forward, and then backwards in the image sequence. Points that end up further than max_retrack_distance from its starting point are marked as bad. Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) max_retrack_distance : float The maximum distance of the retracked point from its starting point to still count as a succesful retrack. remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points Note that M is the number of image in the input, and is the track in the forward tracking step. status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure """ (forward_track, forward_status) = track(image_list, initial_points, remove_bad=False) # Reverse the order (backward_track, backward_status) = track(image_list[::-1], forward_track[:,-1,:], remove_bad=False) # Prune bad tracks ok_track = np.flatnonzero(forward_status * backward_status) # Only good if good in both forward_first = forward_track[ok_track,0,:] backward_last = backward_track[ok_track,-1,:] # Distance retrack_distance = np.sqrt(np.sum((forward_first - backward_last)**2, 1)) # Allowed retracked_ok = np.flatnonzero(retrack_distance <= max_retrack_distance) final_ok = ok_track[retracked_ok] if keep_bad: # Let caller check status status = np.zeros(forward_status.shape) status[final_ok] = 1 return (forward_track, status) else: # Remove tracks with faulty retrack return (forward_track[final_ok], forward_status[final_ok])
python
def track_retrack(image_list, initial_points, max_retrack_distance=0.5, keep_bad=False): """Track-retracks points in image list Using track-retrack can help in only getting point tracks of high quality. The point is tracked forward, and then backwards in the image sequence. Points that end up further than max_retrack_distance from its starting point are marked as bad. Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) max_retrack_distance : float The maximum distance of the retracked point from its starting point to still count as a succesful retrack. remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points Note that M is the number of image in the input, and is the track in the forward tracking step. status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure """ (forward_track, forward_status) = track(image_list, initial_points, remove_bad=False) # Reverse the order (backward_track, backward_status) = track(image_list[::-1], forward_track[:,-1,:], remove_bad=False) # Prune bad tracks ok_track = np.flatnonzero(forward_status * backward_status) # Only good if good in both forward_first = forward_track[ok_track,0,:] backward_last = backward_track[ok_track,-1,:] # Distance retrack_distance = np.sqrt(np.sum((forward_first - backward_last)**2, 1)) # Allowed retracked_ok = np.flatnonzero(retrack_distance <= max_retrack_distance) final_ok = ok_track[retracked_ok] if keep_bad: # Let caller check status status = np.zeros(forward_status.shape) status[final_ok] = 1 return (forward_track, status) else: # Remove tracks with faulty retrack return (forward_track[final_ok], forward_status[final_ok])
[ "def", "track_retrack", "(", "image_list", ",", "initial_points", ",", "max_retrack_distance", "=", "0.5", ",", "keep_bad", "=", "False", ")", ":", "(", "forward_track", ",", "forward_status", ")", "=", "track", "(", "image_list", ",", "initial_points", ",", "remove_bad", "=", "False", ")", "# Reverse the order", "(", "backward_track", ",", "backward_status", ")", "=", "track", "(", "image_list", "[", ":", ":", "-", "1", "]", ",", "forward_track", "[", ":", ",", "-", "1", ",", ":", "]", ",", "remove_bad", "=", "False", ")", "# Prune bad tracks", "ok_track", "=", "np", ".", "flatnonzero", "(", "forward_status", "*", "backward_status", ")", "# Only good if good in both", "forward_first", "=", "forward_track", "[", "ok_track", ",", "0", ",", ":", "]", "backward_last", "=", "backward_track", "[", "ok_track", ",", "-", "1", ",", ":", "]", "# Distance", "retrack_distance", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "forward_first", "-", "backward_last", ")", "**", "2", ",", "1", ")", ")", "# Allowed", "retracked_ok", "=", "np", ".", "flatnonzero", "(", "retrack_distance", "<=", "max_retrack_distance", ")", "final_ok", "=", "ok_track", "[", "retracked_ok", "]", "if", "keep_bad", ":", "# Let caller check status", "status", "=", "np", ".", "zeros", "(", "forward_status", ".", "shape", ")", "status", "[", "final_ok", "]", "=", "1", "return", "(", "forward_track", ",", "status", ")", "else", ":", "# Remove tracks with faulty retrack", "return", "(", "forward_track", "[", "final_ok", "]", ",", "forward_status", "[", "final_ok", "]", ")" ]
Track-retracks points in image list Using track-retrack can help in only getting point tracks of high quality. The point is tracked forward, and then backwards in the image sequence. Points that end up further than max_retrack_distance from its starting point are marked as bad. Parameters ---------------- image_list : list List of images to track in initial_points : ndarray Initial points to use (in first image in image_list) max_retrack_distance : float The maximum distance of the retracked point from its starting point to still count as a succesful retrack. remove_bad : bool If True, then the resulting list of tracks will only contain succesfully tracked points. Else, it will contain all points present in initial_points. Returns ----------------- tracks : (N, M, 2) ndarray N tracks over M images with (x,y) coordinates of points Note that M is the number of image in the input, and is the track in the forward tracking step. status : (N,) ndarray The status of each track. 1 means ok, while 0 means tracking failure
[ "Track", "-", "retracks", "points", "in", "image", "list", "Using", "track", "-", "retrack", "can", "help", "in", "only", "getting", "point", "tracks", "of", "high", "quality", ".", "The", "point", "is", "tracked", "forward", "and", "then", "backwards", "in", "the", "image", "sequence", ".", "Points", "that", "end", "up", "further", "than", "max_retrack_distance", "from", "its", "starting", "point", "are", "marked", "as", "bad", ".", "Parameters", "----------------", "image_list", ":", "list", "List", "of", "images", "to", "track", "in", "initial_points", ":", "ndarray", "Initial", "points", "to", "use", "(", "in", "first", "image", "in", "image_list", ")", "max_retrack_distance", ":", "float", "The", "maximum", "distance", "of", "the", "retracked", "point", "from", "its", "starting", "point", "to", "still", "count", "as", "a", "succesful", "retrack", ".", "remove_bad", ":", "bool", "If", "True", "then", "the", "resulting", "list", "of", "tracks", "will", "only", "contain", "succesfully", "tracked", "points", ".", "Else", "it", "will", "contain", "all", "points", "present", "in", "initial_points", ".", "Returns", "-----------------", "tracks", ":", "(", "N", "M", "2", ")", "ndarray", "N", "tracks", "over", "M", "images", "with", "(", "x", "y", ")", "coordinates", "of", "points", "Note", "that", "M", "is", "the", "number", "of", "image", "in", "the", "input", "and", "is", "the", "track", "in", "the", "forward", "tracking", "step", ".", "status", ":", "(", "N", ")", "ndarray", "The", "status", "of", "each", "track", ".", "1", "means", "ok", "while", "0", "means", "tracking", "failure" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/tracking.py#L163-L215
hovren/crisp
crisp/imu.py
IMU.from_mat_file
def from_mat_file(cls, matfilename): """Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance """ M = scipy.io.loadmat(matfilename) instance = cls() instance.gyro_data = M['gyro'] instance.timestamps = M['timestamps'] return instance
python
def from_mat_file(cls, matfilename): """Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance """ M = scipy.io.loadmat(matfilename) instance = cls() instance.gyro_data = M['gyro'] instance.timestamps = M['timestamps'] return instance
[ "def", "from_mat_file", "(", "cls", ",", "matfilename", ")", ":", "M", "=", "scipy", ".", "io", ".", "loadmat", "(", "matfilename", ")", "instance", "=", "cls", "(", ")", "instance", ".", "gyro_data", "=", "M", "[", "'gyro'", "]", "instance", ".", "timestamps", "=", "M", "[", "'timestamps'", "]", "return", "instance" ]
Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance
[ "Load", "gyro", "data", "from", ".", "mat", "file", "The", "MAT", "file", "should", "contain", "the", "following", "two", "arrays", "gyro", ":", "(", "3", "N", ")", "float", "ndarray", "The", "angular", "velocity", "measurements", ".", "timestamps", ":", "(", "N", ")", "float", "ndarray", "Timestamps", "of", "the", "measurements", ".", "Parameters", "---------------", "matfilename", ":", "string", "Name", "of", "the", ".", "mat", "file", "Returns", "----------------", "A", "new", "IMU", "class", "instance" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L39-L62
hovren/crisp
crisp/imu.py
IMU.rate
def rate(self): """Get the sample rate in Hz. Returns --------- rate : float The sample rate, in Hz, calculated from the timestamps """ N = len(self.timestamps) t = self.timestamps[-1] - self.timestamps[0] rate = 1.0 * N / t return rate
python
def rate(self): """Get the sample rate in Hz. Returns --------- rate : float The sample rate, in Hz, calculated from the timestamps """ N = len(self.timestamps) t = self.timestamps[-1] - self.timestamps[0] rate = 1.0 * N / t return rate
[ "def", "rate", "(", "self", ")", ":", "N", "=", "len", "(", "self", ".", "timestamps", ")", "t", "=", "self", ".", "timestamps", "[", "-", "1", "]", "-", "self", ".", "timestamps", "[", "0", "]", "rate", "=", "1.0", "*", "N", "/", "t", "return", "rate" ]
Get the sample rate in Hz. Returns --------- rate : float The sample rate, in Hz, calculated from the timestamps
[ "Get", "the", "sample", "rate", "in", "Hz", ".", "Returns", "---------", "rate", ":", "float", "The", "sample", "rate", "in", "Hz", "calculated", "from", "the", "timestamps" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L66-L77
hovren/crisp
crisp/imu.py
IMU.zero_level_calibrate
def zero_level_calibrate(self, duration, t0=0.0): """Performs zero-level calibration from the chosen time interval. This changes the previously lodaded data in-place. Parameters -------------------- duration : float Number of timeunits to use for calibration t0 : float Starting time for calibration Returns ---------------------- gyro_data : (3, N) float ndarray The calibrated data (note that it is also changed in-place!) """ t1 = t0 + duration indices = np.flatnonzero((self.timestamps >= t0) & (self.timestamps <= t1)) m = np.mean(self.gyro_data[:, indices], axis=1) self.gyro_data -= m.reshape(3,1) return self.gyro_data
python
def zero_level_calibrate(self, duration, t0=0.0): """Performs zero-level calibration from the chosen time interval. This changes the previously lodaded data in-place. Parameters -------------------- duration : float Number of timeunits to use for calibration t0 : float Starting time for calibration Returns ---------------------- gyro_data : (3, N) float ndarray The calibrated data (note that it is also changed in-place!) """ t1 = t0 + duration indices = np.flatnonzero((self.timestamps >= t0) & (self.timestamps <= t1)) m = np.mean(self.gyro_data[:, indices], axis=1) self.gyro_data -= m.reshape(3,1) return self.gyro_data
[ "def", "zero_level_calibrate", "(", "self", ",", "duration", ",", "t0", "=", "0.0", ")", ":", "t1", "=", "t0", "+", "duration", "indices", "=", "np", ".", "flatnonzero", "(", "(", "self", ".", "timestamps", ">=", "t0", ")", "&", "(", "self", ".", "timestamps", "<=", "t1", ")", ")", "m", "=", "np", ".", "mean", "(", "self", ".", "gyro_data", "[", ":", ",", "indices", "]", ",", "axis", "=", "1", ")", "self", ".", "gyro_data", "-=", "m", ".", "reshape", "(", "3", ",", "1", ")", "return", "self", ".", "gyro_data" ]
Performs zero-level calibration from the chosen time interval. This changes the previously lodaded data in-place. Parameters -------------------- duration : float Number of timeunits to use for calibration t0 : float Starting time for calibration Returns ---------------------- gyro_data : (3, N) float ndarray The calibrated data (note that it is also changed in-place!)
[ "Performs", "zero", "-", "level", "calibration", "from", "the", "chosen", "time", "interval", ".", "This", "changes", "the", "previously", "lodaded", "data", "in", "-", "place", ".", "Parameters", "--------------------", "duration", ":", "float", "Number", "of", "timeunits", "to", "use", "for", "calibration", "t0", ":", "float", "Starting", "time", "for", "calibration", "Returns", "----------------------", "gyro_data", ":", "(", "3", "N", ")", "float", "ndarray", "The", "calibrated", "data", "(", "note", "that", "it", "is", "also", "changed", "in", "-", "place!", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L79-L102
hovren/crisp
crisp/imu.py
IMU.integrate
def integrate(self, pose_correction=np.eye(3), uniform=True): """Integrate angular velocity measurements to rotations. Parameters ------------- pose_correction : (3,3) ndarray, optional Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera). uniform : bool If True (default), assume uniform sample rate. This will use a faster integration method. Returns ------------- rotations : (4, N) ndarray Rotations as unit quaternions with scalar as first element. """ if uniform: dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt) else: N = len(self.timestamps) integrated = np.zeros((4, N)) integrated[:,0] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all for i in range(1, len(self.timestamps)): w = pose_correction.dot(self.gyro_data[:, i]) # Change to correct coordinate frame dt = float(self.timestamps[i] - self.timestamps[i - 1]) qprev = integrated[:, i - 1].flatten() A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew = qnew / qnorm if qnorm > 0 else 0 integrated[:, i] = qnew #print "%d, %s, %s, %s, %s" % (i, w, dt, qprev, qnew) return integrated
python
def integrate(self, pose_correction=np.eye(3), uniform=True): """Integrate angular velocity measurements to rotations. Parameters ------------- pose_correction : (3,3) ndarray, optional Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera). uniform : bool If True (default), assume uniform sample rate. This will use a faster integration method. Returns ------------- rotations : (4, N) ndarray Rotations as unit quaternions with scalar as first element. """ if uniform: dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt) else: N = len(self.timestamps) integrated = np.zeros((4, N)) integrated[:,0] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all for i in range(1, len(self.timestamps)): w = pose_correction.dot(self.gyro_data[:, i]) # Change to correct coordinate frame dt = float(self.timestamps[i] - self.timestamps[i - 1]) qprev = integrated[:, i - 1].flatten() A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew = qnew / qnorm if qnorm > 0 else 0 integrated[:, i] = qnew #print "%d, %s, %s, %s, %s" % (i, w, dt, qprev, qnew) return integrated
[ "def", "integrate", "(", "self", ",", "pose_correction", "=", "np", ".", "eye", "(", "3", ")", ",", "uniform", "=", "True", ")", ":", "if", "uniform", ":", "dt", "=", "float", "(", "self", ".", "timestamps", "[", "1", "]", "-", "self", ".", "timestamps", "[", "0", "]", ")", "# Must be python float for fastintegrate to work", "return", "fastintegrate", ".", "integrate_gyro_quaternion_uniform", "(", "self", ".", "gyro_data_corrected", ",", "dt", ")", "else", ":", "N", "=", "len", "(", "self", ".", "timestamps", ")", "integrated", "=", "np", ".", "zeros", "(", "(", "4", ",", "N", ")", ")", "integrated", "[", ":", ",", "0", "]", "=", "np", ".", "array", "(", "[", "1", ",", "0", ",", "0", ",", "0", "]", ")", "# Initial rotation (no rotation)", "# Iterate over all", "for", "i", "in", "range", "(", "1", ",", "len", "(", "self", ".", "timestamps", ")", ")", ":", "w", "=", "pose_correction", ".", "dot", "(", "self", ".", "gyro_data", "[", ":", ",", "i", "]", ")", "# Change to correct coordinate frame", "dt", "=", "float", "(", "self", ".", "timestamps", "[", "i", "]", "-", "self", ".", "timestamps", "[", "i", "-", "1", "]", ")", "qprev", "=", "integrated", "[", ":", ",", "i", "-", "1", "]", ".", "flatten", "(", ")", "A", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "w", "[", "0", "]", ",", "-", "w", "[", "1", "]", ",", "-", "w", "[", "2", "]", "]", ",", "[", "w", "[", "0", "]", ",", "0", ",", "w", "[", "2", "]", ",", "-", "w", "[", "1", "]", "]", ",", "[", "w", "[", "1", "]", ",", "-", "w", "[", "2", "]", ",", "0", ",", "w", "[", "0", "]", "]", ",", "[", "w", "[", "2", "]", ",", "w", "[", "1", "]", ",", "-", "w", "[", "0", "]", ",", "0", "]", "]", ")", "qnew", "=", "(", "np", ".", "eye", "(", "4", ")", "+", "(", "dt", "/", "2.0", ")", "*", "A", ")", ".", "dot", "(", "qprev", ")", "qnorm", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "qnew", "**", "2", ")", ")", "qnew", "=", "qnew", "/", "qnorm", "if", "qnorm", ">", "0", "else", "0", "integrated", "[", ":", ",", "i", "]", "=", "qnew", "#print \"%d, %s, %s, %s, %s\" % (i, w, dt, qprev, qnew)", "return", "integrated" ]
Integrate angular velocity measurements to rotations. Parameters ------------- pose_correction : (3,3) ndarray, optional Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera). uniform : bool If True (default), assume uniform sample rate. This will use a faster integration method. Returns ------------- rotations : (4, N) ndarray Rotations as unit quaternions with scalar as first element.
[ "Integrate", "angular", "velocity", "measurements", "to", "rotations", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L119-L157
hovren/crisp
crisp/imu.py
IMU.rotation_at_time
def rotation_at_time(t, timestamps, rotation_sequence): """Get the gyro rotation at time t using SLERP. Parameters ----------- t : float The query timestamp. timestamps : array_like float List of all timestamps rotation_sequence : (4, N) ndarray Rotation sequence as unit quaternions with scalar part as first element. Returns ----------- q : (4,) ndarray Unit quaternion representing the rotation at time t. """ idx = np.flatnonzero(timestamps >= (t - 0.0001))[0] t0 = timestamps[idx - 1] t1 = timestamps[idx] tau = (t - t0) / (t1 - t0) q1 = rotation_sequence[:, idx - 1] q2 = rotation_sequence[:, idx] q = rotations.slerp(q1, q2, tau) return q
python
def rotation_at_time(t, timestamps, rotation_sequence): """Get the gyro rotation at time t using SLERP. Parameters ----------- t : float The query timestamp. timestamps : array_like float List of all timestamps rotation_sequence : (4, N) ndarray Rotation sequence as unit quaternions with scalar part as first element. Returns ----------- q : (4,) ndarray Unit quaternion representing the rotation at time t. """ idx = np.flatnonzero(timestamps >= (t - 0.0001))[0] t0 = timestamps[idx - 1] t1 = timestamps[idx] tau = (t - t0) / (t1 - t0) q1 = rotation_sequence[:, idx - 1] q2 = rotation_sequence[:, idx] q = rotations.slerp(q1, q2, tau) return q
[ "def", "rotation_at_time", "(", "t", ",", "timestamps", ",", "rotation_sequence", ")", ":", "idx", "=", "np", ".", "flatnonzero", "(", "timestamps", ">=", "(", "t", "-", "0.0001", ")", ")", "[", "0", "]", "t0", "=", "timestamps", "[", "idx", "-", "1", "]", "t1", "=", "timestamps", "[", "idx", "]", "tau", "=", "(", "t", "-", "t0", ")", "/", "(", "t1", "-", "t0", ")", "q1", "=", "rotation_sequence", "[", ":", ",", "idx", "-", "1", "]", "q2", "=", "rotation_sequence", "[", ":", ",", "idx", "]", "q", "=", "rotations", ".", "slerp", "(", "q1", ",", "q2", ",", "tau", ")", "return", "q" ]
Get the gyro rotation at time t using SLERP. Parameters ----------- t : float The query timestamp. timestamps : array_like float List of all timestamps rotation_sequence : (4, N) ndarray Rotation sequence as unit quaternions with scalar part as first element. Returns ----------- q : (4,) ndarray Unit quaternion representing the rotation at time t.
[ "Get", "the", "gyro", "rotation", "at", "time", "t", "using", "SLERP", ".", "Parameters", "-----------", "t", ":", "float", "The", "query", "timestamp", ".", "timestamps", ":", "array_like", "float", "List", "of", "all", "timestamps", "rotation_sequence", ":", "(", "4", "N", ")", "ndarray", "Rotation", "sequence", "as", "unit", "quaternions", "with", "scalar", "part", "as", "first", "element", ".", "Returns", "-----------", "q", ":", "(", "4", ")", "ndarray", "Unit", "quaternion", "representing", "the", "rotation", "at", "time", "t", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L160-L185
hovren/crisp
crisp/stream.py
GyroStream.from_csv
def from_csv(cls, filename): """Create gyro stream from CSV data Load data from a CSV file. The data must be formatted with three values per line: (x, y, z) where x, y, z is the measured angular velocity (in radians) of the specified axis. Parameters ------------------- filename : str Path to the CSV file Returns --------------------- GyroStream A gyroscope stream """ instance = cls() instance.data = np.loadtxt(filename, delimiter=',') return instance
python
def from_csv(cls, filename): """Create gyro stream from CSV data Load data from a CSV file. The data must be formatted with three values per line: (x, y, z) where x, y, z is the measured angular velocity (in radians) of the specified axis. Parameters ------------------- filename : str Path to the CSV file Returns --------------------- GyroStream A gyroscope stream """ instance = cls() instance.data = np.loadtxt(filename, delimiter=',') return instance
[ "def", "from_csv", "(", "cls", ",", "filename", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "data", "=", "np", ".", "loadtxt", "(", "filename", ",", "delimiter", "=", "','", ")", "return", "instance" ]
Create gyro stream from CSV data Load data from a CSV file. The data must be formatted with three values per line: (x, y, z) where x, y, z is the measured angular velocity (in radians) of the specified axis. Parameters ------------------- filename : str Path to the CSV file Returns --------------------- GyroStream A gyroscope stream
[ "Create", "gyro", "stream", "from", "CSV", "data" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/stream.py#L36-L55
hovren/crisp
crisp/stream.py
GyroStream.from_data
def from_data(cls, data): """Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s) Returns ------------------- GyroStream Stream object """ if not data.shape[1] == 3: raise ValueError("Gyroscope data must have shape (N, 3)") instance = cls() instance.data = data return instance
python
def from_data(cls, data): """Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s) Returns ------------------- GyroStream Stream object """ if not data.shape[1] == 3: raise ValueError("Gyroscope data must have shape (N, 3)") instance = cls() instance.data = data return instance
[ "def", "from_data", "(", "cls", ",", "data", ")", ":", "if", "not", "data", ".", "shape", "[", "1", "]", "==", "3", ":", "raise", "ValueError", "(", "\"Gyroscope data must have shape (N, 3)\"", ")", "instance", "=", "cls", "(", ")", "instance", ".", "data", "=", "data", "return", "instance" ]
Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s) Returns ------------------- GyroStream Stream object
[ "Create", "gyroscope", "stream", "from", "data", "array" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/stream.py#L58-L76
hovren/crisp
crisp/stream.py
GyroStream.integrate
def integrate(self, dt): """Integrate gyro measurements to orientation using a uniform sample rate. Parameters ------------------- dt : float Sample distance in seconds Returns ---------------- orientation : (4, N) ndarray Gyroscope orientation in quaternion form (s, q1, q2, q3) """ if not dt == self.__last_dt: self.__last_q = fastintegrate.integrate_gyro_quaternion_uniform(self.data, dt) self.__last_dt = dt return self.__last_q
python
def integrate(self, dt): """Integrate gyro measurements to orientation using a uniform sample rate. Parameters ------------------- dt : float Sample distance in seconds Returns ---------------- orientation : (4, N) ndarray Gyroscope orientation in quaternion form (s, q1, q2, q3) """ if not dt == self.__last_dt: self.__last_q = fastintegrate.integrate_gyro_quaternion_uniform(self.data, dt) self.__last_dt = dt return self.__last_q
[ "def", "integrate", "(", "self", ",", "dt", ")", ":", "if", "not", "dt", "==", "self", ".", "__last_dt", ":", "self", ".", "__last_q", "=", "fastintegrate", ".", "integrate_gyro_quaternion_uniform", "(", "self", ".", "data", ",", "dt", ")", "self", ".", "__last_dt", "=", "dt", "return", "self", ".", "__last_q" ]
Integrate gyro measurements to orientation using a uniform sample rate. Parameters ------------------- dt : float Sample distance in seconds Returns ---------------- orientation : (4, N) ndarray Gyroscope orientation in quaternion form (s, q1, q2, q3)
[ "Integrate", "gyro", "measurements", "to", "orientation", "using", "a", "uniform", "sample", "rate", "." ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/stream.py#L83-L99
hovren/crisp
crisp/znccpyr.py
gaussian_kernel
def gaussian_kernel(gstd): """Generate odd sized truncated Gaussian The generated filter kernel has a cutoff at $3\sigma$ and is normalized to sum to 1 Parameters ------------- gstd : float Standard deviation of filter Returns ------------- g : ndarray Array with kernel coefficients """ Nc = np.ceil(gstd*3)*2+1 x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True) g = np.exp(-.5*((x/gstd)**2)) g = g/np.sum(g) return g
python
def gaussian_kernel(gstd): """Generate odd sized truncated Gaussian The generated filter kernel has a cutoff at $3\sigma$ and is normalized to sum to 1 Parameters ------------- gstd : float Standard deviation of filter Returns ------------- g : ndarray Array with kernel coefficients """ Nc = np.ceil(gstd*3)*2+1 x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True) g = np.exp(-.5*((x/gstd)**2)) g = g/np.sum(g) return g
[ "def", "gaussian_kernel", "(", "gstd", ")", ":", "Nc", "=", "np", ".", "ceil", "(", "gstd", "*", "3", ")", "*", "2", "+", "1", "x", "=", "np", ".", "linspace", "(", "-", "(", "Nc", "-", "1", ")", "/", "2", ",", "(", "Nc", "-", "1", ")", "/", "2", ",", "Nc", ",", "endpoint", "=", "True", ")", "g", "=", "np", ".", "exp", "(", "-", ".5", "*", "(", "(", "x", "/", "gstd", ")", "**", "2", ")", ")", "g", "=", "g", "/", "np", ".", "sum", "(", "g", ")", "return", "g" ]
Generate odd sized truncated Gaussian The generated filter kernel has a cutoff at $3\sigma$ and is normalized to sum to 1 Parameters ------------- gstd : float Standard deviation of filter Returns ------------- g : ndarray Array with kernel coefficients
[ "Generate", "odd", "sized", "truncated", "Gaussian" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L18-L39
hovren/crisp
crisp/znccpyr.py
subsample
def subsample(time_series, downsample_factor): """Subsample with Gaussian prefilter The prefilter will have the filter size $\sigma_g=.5*ssfactor$ Parameters -------------- time_series : ndarray Input signal downsample_factor : float Downsampling factor Returns -------------- ts_out : ndarray The downsampled signal """ Ns = np.int(np.floor(np.size(time_series)/downsample_factor)) g = gaussian_kernel(0.5*downsample_factor) ts_blur = np.convolve(time_series,g,'same') ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = (k+.5)*downsample_factor-.5 cfrac = cpos-np.floor(cpos) cind = np.floor(cpos) if cfrac>0: ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac else: ts_out[k]=ts_blur[cind] return ts_out
python
def subsample(time_series, downsample_factor): """Subsample with Gaussian prefilter The prefilter will have the filter size $\sigma_g=.5*ssfactor$ Parameters -------------- time_series : ndarray Input signal downsample_factor : float Downsampling factor Returns -------------- ts_out : ndarray The downsampled signal """ Ns = np.int(np.floor(np.size(time_series)/downsample_factor)) g = gaussian_kernel(0.5*downsample_factor) ts_blur = np.convolve(time_series,g,'same') ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = (k+.5)*downsample_factor-.5 cfrac = cpos-np.floor(cpos) cind = np.floor(cpos) if cfrac>0: ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac else: ts_out[k]=ts_blur[cind] return ts_out
[ "def", "subsample", "(", "time_series", ",", "downsample_factor", ")", ":", "Ns", "=", "np", ".", "int", "(", "np", ".", "floor", "(", "np", ".", "size", "(", "time_series", ")", "/", "downsample_factor", ")", ")", "g", "=", "gaussian_kernel", "(", "0.5", "*", "downsample_factor", ")", "ts_blur", "=", "np", ".", "convolve", "(", "time_series", ",", "g", ",", "'same'", ")", "ts_out", "=", "np", ".", "zeros", "(", "(", "Ns", ",", "1", ")", ",", "dtype", "=", "'float64'", ")", "for", "k", "in", "range", "(", "0", ",", "Ns", ")", ":", "cpos", "=", "(", "k", "+", ".5", ")", "*", "downsample_factor", "-", ".5", "cfrac", "=", "cpos", "-", "np", ".", "floor", "(", "cpos", ")", "cind", "=", "np", ".", "floor", "(", "cpos", ")", "if", "cfrac", ">", "0", ":", "ts_out", "[", "k", "]", "=", "ts_blur", "[", "cind", "]", "*", "(", "1", "-", "cfrac", ")", "+", "ts_blur", "[", "cind", "+", "1", "]", "*", "cfrac", "else", ":", "ts_out", "[", "k", "]", "=", "ts_blur", "[", "cind", "]", "return", "ts_out" ]
Subsample with Gaussian prefilter The prefilter will have the filter size $\sigma_g=.5*ssfactor$ Parameters -------------- time_series : ndarray Input signal downsample_factor : float Downsampling factor Returns -------------- ts_out : ndarray The downsampled signal
[ "Subsample", "with", "Gaussian", "prefilter" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L41-L71
hovren/crisp
crisp/znccpyr.py
upsample
def upsample(time_series, scaling_factor): """Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal """ Ns0 = np.size(time_series) Ns = np.int(np.floor(np.size(time_series)*scaling_factor)) ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])])) cfrac = cpos-np.floor(cpos) cind = int(np.floor(cpos)) #print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind) if cfrac>0: ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac else: ts_out[k]=time_series[cind] return ts_out
python
def upsample(time_series, scaling_factor): """Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal """ Ns0 = np.size(time_series) Ns = np.int(np.floor(np.size(time_series)*scaling_factor)) ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])])) cfrac = cpos-np.floor(cpos) cind = int(np.floor(cpos)) #print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind) if cfrac>0: ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac else: ts_out[k]=time_series[cind] return ts_out
[ "def", "upsample", "(", "time_series", ",", "scaling_factor", ")", ":", "Ns0", "=", "np", ".", "size", "(", "time_series", ")", "Ns", "=", "np", ".", "int", "(", "np", ".", "floor", "(", "np", ".", "size", "(", "time_series", ")", "*", "scaling_factor", ")", ")", "ts_out", "=", "np", ".", "zeros", "(", "(", "Ns", ",", "1", ")", ",", "dtype", "=", "'float64'", ")", "for", "k", "in", "range", "(", "0", ",", "Ns", ")", ":", "cpos", "=", "int", "(", "np", ".", "min", "(", "[", "Ns0", "-", "1", ",", "np", ".", "max", "(", "[", "0.", ",", "(", "k", "+", "0.5", ")", "/", "scaling_factor", "-", "0.5", "]", ")", "]", ")", ")", "cfrac", "=", "cpos", "-", "np", ".", "floor", "(", "cpos", ")", "cind", "=", "int", "(", "np", ".", "floor", "(", "cpos", ")", ")", "#print \"cpos=%f cfrac=%f cind=%d\", (cpos,cfrac,cind)", "if", "cfrac", ">", "0", ":", "ts_out", "[", "k", "]", "=", "time_series", "[", "cind", "]", "*", "(", "1", "-", "cfrac", ")", "+", "time_series", "[", "cind", "+", "1", "]", "*", "cfrac", "else", ":", "ts_out", "[", "k", "]", "=", "time_series", "[", "cind", "]", "return", "ts_out" ]
Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal
[ "Upsample", "using", "linear", "interpolation" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L73-L103
hovren/crisp
crisp/znccpyr.py
zncc
def zncc(ts1,ts2): """Zero mean normalised cross-correlation (ZNCC) This function does ZNCC of two signals, ts1 and ts2 Normalisation by very small values is avoided by doing max(nmin,nvalue) Parameters -------------- ts1 : ndarray Input signal 1 to be aligned with ts2 : ndarray Input signal 2 Returns -------------- best_shift : float The best shift of *ts1* to align it with *ts2* ts_out : ndarray The correlation result """ # Output is the same size as ts1 Ns1 = np.size(ts1) Ns2 = np.size(ts2) ts_out = np.zeros((Ns1,1), dtype='float64') ishift = int(np.floor(Ns2/2)) # origin of ts2 t1m = np.mean(ts1) t2m = np.mean(ts2) for k in range(0,Ns1): lstart = np.int(ishift-k) if lstart<0 : lstart=0 lend = np.int(ishift-k+Ns2) imax = np.int(np.min([Ns2,Ns1-k+ishift])) if lend>imax : lend=imax csum = 0 ts1sum = 0 ts1sum2 = 0 ts2sum = 0 ts2sum2 = 0 Nterms = lend-lstart for l in range(lstart,lend): csum += ts1[k+l-ishift]*ts2[l] ts1sum += ts1[k+l-ishift] ts1sum2 += ts1[k+l-ishift]*ts1[k+l-ishift] ts2sum += ts2[l] ts2sum2 += ts2[l]*ts2[l] ts1sum2 = np.max([t1m*t1m*100,ts1sum2])-ts1sum*ts1sum/Nterms ts2sum2 = np.max([t2m*t2m*100,ts2sum2])-ts2sum*ts2sum/Nterms #ts_out[k]=csum/np.sqrt(ts1sum2*ts2sum2) ts_out[k]=(csum-2.0*ts1sum*ts2sum/Nterms+ts1sum*ts2sum/Nterms/Nterms)/np.sqrt(ts1sum2*ts2sum2) best_shift = np.argmax(ts_out)-ishift return best_shift, ts_out
python
def zncc(ts1,ts2): """Zero mean normalised cross-correlation (ZNCC) This function does ZNCC of two signals, ts1 and ts2 Normalisation by very small values is avoided by doing max(nmin,nvalue) Parameters -------------- ts1 : ndarray Input signal 1 to be aligned with ts2 : ndarray Input signal 2 Returns -------------- best_shift : float The best shift of *ts1* to align it with *ts2* ts_out : ndarray The correlation result """ # Output is the same size as ts1 Ns1 = np.size(ts1) Ns2 = np.size(ts2) ts_out = np.zeros((Ns1,1), dtype='float64') ishift = int(np.floor(Ns2/2)) # origin of ts2 t1m = np.mean(ts1) t2m = np.mean(ts2) for k in range(0,Ns1): lstart = np.int(ishift-k) if lstart<0 : lstart=0 lend = np.int(ishift-k+Ns2) imax = np.int(np.min([Ns2,Ns1-k+ishift])) if lend>imax : lend=imax csum = 0 ts1sum = 0 ts1sum2 = 0 ts2sum = 0 ts2sum2 = 0 Nterms = lend-lstart for l in range(lstart,lend): csum += ts1[k+l-ishift]*ts2[l] ts1sum += ts1[k+l-ishift] ts1sum2 += ts1[k+l-ishift]*ts1[k+l-ishift] ts2sum += ts2[l] ts2sum2 += ts2[l]*ts2[l] ts1sum2 = np.max([t1m*t1m*100,ts1sum2])-ts1sum*ts1sum/Nterms ts2sum2 = np.max([t2m*t2m*100,ts2sum2])-ts2sum*ts2sum/Nterms #ts_out[k]=csum/np.sqrt(ts1sum2*ts2sum2) ts_out[k]=(csum-2.0*ts1sum*ts2sum/Nterms+ts1sum*ts2sum/Nterms/Nterms)/np.sqrt(ts1sum2*ts2sum2) best_shift = np.argmax(ts_out)-ishift return best_shift, ts_out
[ "def", "zncc", "(", "ts1", ",", "ts2", ")", ":", "# Output is the same size as ts1", "Ns1", "=", "np", ".", "size", "(", "ts1", ")", "Ns2", "=", "np", ".", "size", "(", "ts2", ")", "ts_out", "=", "np", ".", "zeros", "(", "(", "Ns1", ",", "1", ")", ",", "dtype", "=", "'float64'", ")", "ishift", "=", "int", "(", "np", ".", "floor", "(", "Ns2", "/", "2", ")", ")", "# origin of ts2", "t1m", "=", "np", ".", "mean", "(", "ts1", ")", "t2m", "=", "np", ".", "mean", "(", "ts2", ")", "for", "k", "in", "range", "(", "0", ",", "Ns1", ")", ":", "lstart", "=", "np", ".", "int", "(", "ishift", "-", "k", ")", "if", "lstart", "<", "0", ":", "lstart", "=", "0", "lend", "=", "np", ".", "int", "(", "ishift", "-", "k", "+", "Ns2", ")", "imax", "=", "np", ".", "int", "(", "np", ".", "min", "(", "[", "Ns2", ",", "Ns1", "-", "k", "+", "ishift", "]", ")", ")", "if", "lend", ">", "imax", ":", "lend", "=", "imax", "csum", "=", "0", "ts1sum", "=", "0", "ts1sum2", "=", "0", "ts2sum", "=", "0", "ts2sum2", "=", "0", "Nterms", "=", "lend", "-", "lstart", "for", "l", "in", "range", "(", "lstart", ",", "lend", ")", ":", "csum", "+=", "ts1", "[", "k", "+", "l", "-", "ishift", "]", "*", "ts2", "[", "l", "]", "ts1sum", "+=", "ts1", "[", "k", "+", "l", "-", "ishift", "]", "ts1sum2", "+=", "ts1", "[", "k", "+", "l", "-", "ishift", "]", "*", "ts1", "[", "k", "+", "l", "-", "ishift", "]", "ts2sum", "+=", "ts2", "[", "l", "]", "ts2sum2", "+=", "ts2", "[", "l", "]", "*", "ts2", "[", "l", "]", "ts1sum2", "=", "np", ".", "max", "(", "[", "t1m", "*", "t1m", "*", "100", ",", "ts1sum2", "]", ")", "-", "ts1sum", "*", "ts1sum", "/", "Nterms", "ts2sum2", "=", "np", ".", "max", "(", "[", "t2m", "*", "t2m", "*", "100", ",", "ts2sum2", "]", ")", "-", "ts2sum", "*", "ts2sum", "/", "Nterms", "#ts_out[k]=csum/np.sqrt(ts1sum2*ts2sum2)", "ts_out", "[", "k", "]", "=", "(", "csum", "-", "2.0", "*", "ts1sum", "*", "ts2sum", "/", "Nterms", "+", "ts1sum", "*", "ts2sum", "/", "Nterms", "/", "Nterms", ")", "/", "np", ".", "sqrt", "(", "ts1sum2", "*", "ts2sum2", ")", "best_shift", "=", "np", ".", "argmax", "(", "ts_out", ")", "-", "ishift", "return", "best_shift", ",", "ts_out" ]
Zero mean normalised cross-correlation (ZNCC) This function does ZNCC of two signals, ts1 and ts2 Normalisation by very small values is avoided by doing max(nmin,nvalue) Parameters -------------- ts1 : ndarray Input signal 1 to be aligned with ts2 : ndarray Input signal 2 Returns -------------- best_shift : float The best shift of *ts1* to align it with *ts2* ts_out : ndarray The correlation result
[ "Zero", "mean", "normalised", "cross", "-", "correlation", "(", "ZNCC", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L123-L180
hovren/crisp
crisp/znccpyr.py
find_shift_pyr
def find_shift_pyr(ts1,ts2,nlevels): """ Find shift that best aligns two time series The shift that aligns the timeseries ts1 with ts2. This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels. Parameters ---------------- ts1 : list_like The first timeseries ts2 : list_like The seconds timeseries nlevels : int Number of levels in pyramid Returns ---------------- ts1_shift : float How many samples to shift ts1 to align with ts2 """ pyr1 = create_pyramid(ts1,nlevels) pyr2 = create_pyramid(ts2,nlevels) logger.debug("pyramid size = %d" % len(pyr1)) logger.debug("size of first element %d " % np.size(pyr1[0])) logger.debug("size of last element %d " % np.size(pyr1[-1])) ishift, corrfn = zncc(pyr1[-1],pyr2[-1]) for k in range(1,nlevels+1): ishift, corrfn = refine_correlation(pyr1[-k-1],pyr2[-k-1],ishift*2) return ishift
python
def find_shift_pyr(ts1,ts2,nlevels): """ Find shift that best aligns two time series The shift that aligns the timeseries ts1 with ts2. This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels. Parameters ---------------- ts1 : list_like The first timeseries ts2 : list_like The seconds timeseries nlevels : int Number of levels in pyramid Returns ---------------- ts1_shift : float How many samples to shift ts1 to align with ts2 """ pyr1 = create_pyramid(ts1,nlevels) pyr2 = create_pyramid(ts2,nlevels) logger.debug("pyramid size = %d" % len(pyr1)) logger.debug("size of first element %d " % np.size(pyr1[0])) logger.debug("size of last element %d " % np.size(pyr1[-1])) ishift, corrfn = zncc(pyr1[-1],pyr2[-1]) for k in range(1,nlevels+1): ishift, corrfn = refine_correlation(pyr1[-k-1],pyr2[-k-1],ishift*2) return ishift
[ "def", "find_shift_pyr", "(", "ts1", ",", "ts2", ",", "nlevels", ")", ":", "pyr1", "=", "create_pyramid", "(", "ts1", ",", "nlevels", ")", "pyr2", "=", "create_pyramid", "(", "ts2", ",", "nlevels", ")", "logger", ".", "debug", "(", "\"pyramid size = %d\"", "%", "len", "(", "pyr1", ")", ")", "logger", ".", "debug", "(", "\"size of first element %d \"", "%", "np", ".", "size", "(", "pyr1", "[", "0", "]", ")", ")", "logger", ".", "debug", "(", "\"size of last element %d \"", "%", "np", ".", "size", "(", "pyr1", "[", "-", "1", "]", ")", ")", "ishift", ",", "corrfn", "=", "zncc", "(", "pyr1", "[", "-", "1", "]", ",", "pyr2", "[", "-", "1", "]", ")", "for", "k", "in", "range", "(", "1", ",", "nlevels", "+", "1", ")", ":", "ishift", ",", "corrfn", "=", "refine_correlation", "(", "pyr1", "[", "-", "k", "-", "1", "]", ",", "pyr2", "[", "-", "k", "-", "1", "]", ",", "ishift", "*", "2", ")", "return", "ishift" ]
Find shift that best aligns two time series The shift that aligns the timeseries ts1 with ts2. This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels. Parameters ---------------- ts1 : list_like The first timeseries ts2 : list_like The seconds timeseries nlevels : int Number of levels in pyramid Returns ---------------- ts1_shift : float How many samples to shift ts1 to align with ts2
[ "Find", "shift", "that", "best", "aligns", "two", "time", "series" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L245-L278
hovren/crisp
crisp/l3g4200d.py
load_L3G_arduino
def load_L3G_arduino(filename, remove_begin_spurious=False, return_parser=False): "Load gyro data collected by the arduino version of the L3G logging platform, and return the data (in rad/s), a time vector, and the sample rate (seconds)" file_data = open(filename, 'rb').read() parser = L3GArduinoParser() parser.parse(file_data[7:]) # Skip first "GYROLOG" header in file data = parser.data if parser.actual_data_rate: T = 1. / parser.actual_data_rate print("Found measured data rate %.3f ms (%.3f Hz)" % (1000*T, 1. / T)) else: T = 1. / parser.data_rate print("Using data rate provided by gyro (probably off by a few percent!) %.3f ms (%.3f Hz)" % (1000*T, 1. / T)) N = parser.data.shape[1] t = np.linspace(0, T*N, num=data.shape[1]) print(t.shape, data.shape) print("Loaded %d samples (%.2f seconds) with expected sample rate %.3f ms (%.3f Hz)" % (N, t[-1], T*1000.0, 1./T)) try: print("Actual sample rate is %.3f ms (%.3f Hz)" % (1000. / parser.actual_data_rate, parser.actual_data_rate, )) except TypeError: pass if remove_begin_spurious: to_remove = int(0.3/T) # Remove first three tenth of second data[:,:to_remove] = 0.0 if return_parser: return np.deg2rad(data), t, T, parser else: return np.deg2rad(data), t, T
python
def load_L3G_arduino(filename, remove_begin_spurious=False, return_parser=False): "Load gyro data collected by the arduino version of the L3G logging platform, and return the data (in rad/s), a time vector, and the sample rate (seconds)" file_data = open(filename, 'rb').read() parser = L3GArduinoParser() parser.parse(file_data[7:]) # Skip first "GYROLOG" header in file data = parser.data if parser.actual_data_rate: T = 1. / parser.actual_data_rate print("Found measured data rate %.3f ms (%.3f Hz)" % (1000*T, 1. / T)) else: T = 1. / parser.data_rate print("Using data rate provided by gyro (probably off by a few percent!) %.3f ms (%.3f Hz)" % (1000*T, 1. / T)) N = parser.data.shape[1] t = np.linspace(0, T*N, num=data.shape[1]) print(t.shape, data.shape) print("Loaded %d samples (%.2f seconds) with expected sample rate %.3f ms (%.3f Hz)" % (N, t[-1], T*1000.0, 1./T)) try: print("Actual sample rate is %.3f ms (%.3f Hz)" % (1000. / parser.actual_data_rate, parser.actual_data_rate, )) except TypeError: pass if remove_begin_spurious: to_remove = int(0.3/T) # Remove first three tenth of second data[:,:to_remove] = 0.0 if return_parser: return np.deg2rad(data), t, T, parser else: return np.deg2rad(data), t, T
[ "def", "load_L3G_arduino", "(", "filename", ",", "remove_begin_spurious", "=", "False", ",", "return_parser", "=", "False", ")", ":", "file_data", "=", "open", "(", "filename", ",", "'rb'", ")", ".", "read", "(", ")", "parser", "=", "L3GArduinoParser", "(", ")", "parser", ".", "parse", "(", "file_data", "[", "7", ":", "]", ")", "# Skip first \"GYROLOG\" header in file", "data", "=", "parser", ".", "data", "if", "parser", ".", "actual_data_rate", ":", "T", "=", "1.", "/", "parser", ".", "actual_data_rate", "print", "(", "\"Found measured data rate %.3f ms (%.3f Hz)\"", "%", "(", "1000", "*", "T", ",", "1.", "/", "T", ")", ")", "else", ":", "T", "=", "1.", "/", "parser", ".", "data_rate", "print", "(", "\"Using data rate provided by gyro (probably off by a few percent!) %.3f ms (%.3f Hz)\"", "%", "(", "1000", "*", "T", ",", "1.", "/", "T", ")", ")", "N", "=", "parser", ".", "data", ".", "shape", "[", "1", "]", "t", "=", "np", ".", "linspace", "(", "0", ",", "T", "*", "N", ",", "num", "=", "data", ".", "shape", "[", "1", "]", ")", "print", "(", "t", ".", "shape", ",", "data", ".", "shape", ")", "print", "(", "\"Loaded %d samples (%.2f seconds) with expected sample rate %.3f ms (%.3f Hz)\"", "%", "(", "N", ",", "t", "[", "-", "1", "]", ",", "T", "*", "1000.0", ",", "1.", "/", "T", ")", ")", "try", ":", "print", "(", "\"Actual sample rate is %.3f ms (%.3f Hz)\"", "%", "(", "1000.", "/", "parser", ".", "actual_data_rate", ",", "parser", ".", "actual_data_rate", ",", ")", ")", "except", "TypeError", ":", "pass", "if", "remove_begin_spurious", ":", "to_remove", "=", "int", "(", "0.3", "/", "T", ")", "# Remove first three tenth of second", "data", "[", ":", ",", ":", "to_remove", "]", "=", "0.0", "if", "return_parser", ":", "return", "np", ".", "deg2rad", "(", "data", ")", ",", "t", ",", "T", ",", "parser", "else", ":", "return", "np", ".", "deg2rad", "(", "data", ")", ",", "t", ",", "T" ]
Load gyro data collected by the arduino version of the L3G logging platform, and return the data (in rad/s), a time vector, and the sample rate (seconds)
[ "Load", "gyro", "data", "collected", "by", "the", "arduino", "version", "of", "the", "L3G", "logging", "platform", "and", "return", "the", "data", "(", "in", "rad", "/", "s", ")", "a", "time", "vector", "and", "the", "sample", "rate", "(", "seconds", ")" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/l3g4200d.py#L105-L134
hovren/crisp
examples/gopro_dataset_example.py
to_rot_matrix
def to_rot_matrix(r): "Convert combined axis angle vector to rotation matrix" theta = np.linalg.norm(r) v = r/theta R = crisp.rotations.axis_angle_to_rotation_matrix(v, theta) return R
python
def to_rot_matrix(r): "Convert combined axis angle vector to rotation matrix" theta = np.linalg.norm(r) v = r/theta R = crisp.rotations.axis_angle_to_rotation_matrix(v, theta) return R
[ "def", "to_rot_matrix", "(", "r", ")", ":", "theta", "=", "np", ".", "linalg", ".", "norm", "(", "r", ")", "v", "=", "r", "/", "theta", "R", "=", "crisp", ".", "rotations", ".", "axis_angle_to_rotation_matrix", "(", "v", ",", "theta", ")", "return", "R" ]
Convert combined axis angle vector to rotation matrix
[ "Convert", "combined", "axis", "angle", "vector", "to", "rotation", "matrix" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/examples/gopro_dataset_example.py#L42-L47
hovren/crisp
crisp/pose.py
estimate_pose
def estimate_pose(image_sequences, imu_sequences, K): """Estimate sync between IMU and camera based on gyro readings and optical flow. The user should first create at least two sequences of corresponding image and gyroscope data. From each sequence we calculate the rotation axis (one from images, one from IMU/gyro). The final set of len(image_sequences) corresponding rotation axes are then used to calculate the relative pose between the IMU and camera. The returned rotation is such that it transfers vectors in the gyroscope coordinate frame to the camera coordinate frame: X_camera = R * X_gyro Parameters ------------ image_sequences : list of list of ndarrays List of image sequences (list of ndarrays) to use. Must have at least two sequences. imu_sequences : list of (3, N) ndarray Sequence of gyroscope measurements (angular velocities). K : (3,3) ndarray Camera calibration matrix Returns ----------- R : (3,3) ndarray The relative pose (gyro-to-camera) such that X_camera = R * X_gyro """ assert len(image_sequences) == len(imu_sequences) assert len(image_sequences) >= 2 # Note: list(image_sequence) here makes sure any generator type input is expanded to an actual list sync_correspondences = [_get_point_correspondences(list(image_sequence)) for image_sequence in image_sequences] # ) Procrustes on corresponding pairs PROCRUSTES_MAX_POINTS = 15 # Number of tracks/points to use for procrustes logger.debug("Running procrustes on track-retrack results") image_rotation_axes = [] for i, points in enumerate(sync_correspondences): if points.size < 1: logger.error('Shape of points are %s', str(points.shape)) raise Exception("Did not get enough points when tracking") num_points_to_use = min(PROCRUSTES_MAX_POINTS, points.shape[0]) logger.debug("Using %d tracks to calculate procrustes", num_points_to_use) idxs_to_use = np.random.permutation(points.shape[0])[:num_points_to_use] assert points.shape[-1] == 2 x = points[idxs_to_use,0,:].T.reshape(2,-1) y = points[idxs_to_use,-1,:].T.reshape(2,-1) x = np.vstack((x, np.ones((1, x.shape[1])))) y = np.vstack((y, np.ones((1, y.shape[1])))) K_inv = np.linalg.inv(K) X = K_inv.dot(x) Y = K_inv.dot(y) # Depth must be positive (R, t) = rotations.procrustes(X, Y, remove_mean=False) # X = R * Y + t (v, theta) = rotations.rotation_matrix_to_axis_angle(R) image_rotation_axes.append(v) # Save rotation axis # Check the quality via the mean reprojection error mean_error = np.mean(np.sqrt(np.sum((X - R.dot(Y))**2, axis=0))) MEAN_ERROR_LIMIT = 0.1 # Arbitrarily chosen limit (in meters) logger.debug('Image sequence %d: Rotation axis %s, degrees %.2f, mean error %.3f', i, v, np.rad2deg(theta), mean_error) if mean_error > MEAN_ERROR_LIMIT: logger.warning("Procrustes solution mean error %.3f > %.3f", mean_error, MEAN_ERROR_LIMIT) # ) Gyro principal rotation axis gyro_rotation_axes = [] for i, gyro_seq in enumerate(imu_sequences): assert gyro_seq.shape[0] == 3 v = principal_rotation_axis(gyro_seq) logger.debug('Gyro sequence %d: Rotation axis %s', i, v) gyro_rotation_axes.append(v) # ) Procrustes to get rotation between coordinate frames X = np.vstack(image_rotation_axes).T Y = np.vstack(gyro_rotation_axes).T (R,t) = rotations.procrustes(X, Y, remove_mean=False) return (R, t)
python
def estimate_pose(image_sequences, imu_sequences, K): """Estimate sync between IMU and camera based on gyro readings and optical flow. The user should first create at least two sequences of corresponding image and gyroscope data. From each sequence we calculate the rotation axis (one from images, one from IMU/gyro). The final set of len(image_sequences) corresponding rotation axes are then used to calculate the relative pose between the IMU and camera. The returned rotation is such that it transfers vectors in the gyroscope coordinate frame to the camera coordinate frame: X_camera = R * X_gyro Parameters ------------ image_sequences : list of list of ndarrays List of image sequences (list of ndarrays) to use. Must have at least two sequences. imu_sequences : list of (3, N) ndarray Sequence of gyroscope measurements (angular velocities). K : (3,3) ndarray Camera calibration matrix Returns ----------- R : (3,3) ndarray The relative pose (gyro-to-camera) such that X_camera = R * X_gyro """ assert len(image_sequences) == len(imu_sequences) assert len(image_sequences) >= 2 # Note: list(image_sequence) here makes sure any generator type input is expanded to an actual list sync_correspondences = [_get_point_correspondences(list(image_sequence)) for image_sequence in image_sequences] # ) Procrustes on corresponding pairs PROCRUSTES_MAX_POINTS = 15 # Number of tracks/points to use for procrustes logger.debug("Running procrustes on track-retrack results") image_rotation_axes = [] for i, points in enumerate(sync_correspondences): if points.size < 1: logger.error('Shape of points are %s', str(points.shape)) raise Exception("Did not get enough points when tracking") num_points_to_use = min(PROCRUSTES_MAX_POINTS, points.shape[0]) logger.debug("Using %d tracks to calculate procrustes", num_points_to_use) idxs_to_use = np.random.permutation(points.shape[0])[:num_points_to_use] assert points.shape[-1] == 2 x = points[idxs_to_use,0,:].T.reshape(2,-1) y = points[idxs_to_use,-1,:].T.reshape(2,-1) x = np.vstack((x, np.ones((1, x.shape[1])))) y = np.vstack((y, np.ones((1, y.shape[1])))) K_inv = np.linalg.inv(K) X = K_inv.dot(x) Y = K_inv.dot(y) # Depth must be positive (R, t) = rotations.procrustes(X, Y, remove_mean=False) # X = R * Y + t (v, theta) = rotations.rotation_matrix_to_axis_angle(R) image_rotation_axes.append(v) # Save rotation axis # Check the quality via the mean reprojection error mean_error = np.mean(np.sqrt(np.sum((X - R.dot(Y))**2, axis=0))) MEAN_ERROR_LIMIT = 0.1 # Arbitrarily chosen limit (in meters) logger.debug('Image sequence %d: Rotation axis %s, degrees %.2f, mean error %.3f', i, v, np.rad2deg(theta), mean_error) if mean_error > MEAN_ERROR_LIMIT: logger.warning("Procrustes solution mean error %.3f > %.3f", mean_error, MEAN_ERROR_LIMIT) # ) Gyro principal rotation axis gyro_rotation_axes = [] for i, gyro_seq in enumerate(imu_sequences): assert gyro_seq.shape[0] == 3 v = principal_rotation_axis(gyro_seq) logger.debug('Gyro sequence %d: Rotation axis %s', i, v) gyro_rotation_axes.append(v) # ) Procrustes to get rotation between coordinate frames X = np.vstack(image_rotation_axes).T Y = np.vstack(gyro_rotation_axes).T (R,t) = rotations.procrustes(X, Y, remove_mean=False) return (R, t)
[ "def", "estimate_pose", "(", "image_sequences", ",", "imu_sequences", ",", "K", ")", ":", "assert", "len", "(", "image_sequences", ")", "==", "len", "(", "imu_sequences", ")", "assert", "len", "(", "image_sequences", ")", ">=", "2", "# Note: list(image_sequence) here makes sure any generator type input is expanded to an actual list", "sync_correspondences", "=", "[", "_get_point_correspondences", "(", "list", "(", "image_sequence", ")", ")", "for", "image_sequence", "in", "image_sequences", "]", "# ) Procrustes on corresponding pairs", "PROCRUSTES_MAX_POINTS", "=", "15", "# Number of tracks/points to use for procrustes", "logger", ".", "debug", "(", "\"Running procrustes on track-retrack results\"", ")", "image_rotation_axes", "=", "[", "]", "for", "i", ",", "points", "in", "enumerate", "(", "sync_correspondences", ")", ":", "if", "points", ".", "size", "<", "1", ":", "logger", ".", "error", "(", "'Shape of points are %s'", ",", "str", "(", "points", ".", "shape", ")", ")", "raise", "Exception", "(", "\"Did not get enough points when tracking\"", ")", "num_points_to_use", "=", "min", "(", "PROCRUSTES_MAX_POINTS", ",", "points", ".", "shape", "[", "0", "]", ")", "logger", ".", "debug", "(", "\"Using %d tracks to calculate procrustes\"", ",", "num_points_to_use", ")", "idxs_to_use", "=", "np", ".", "random", ".", "permutation", "(", "points", ".", "shape", "[", "0", "]", ")", "[", ":", "num_points_to_use", "]", "assert", "points", ".", "shape", "[", "-", "1", "]", "==", "2", "x", "=", "points", "[", "idxs_to_use", ",", "0", ",", ":", "]", ".", "T", ".", "reshape", "(", "2", ",", "-", "1", ")", "y", "=", "points", "[", "idxs_to_use", ",", "-", "1", ",", ":", "]", ".", "T", ".", "reshape", "(", "2", ",", "-", "1", ")", "x", "=", "np", ".", "vstack", "(", "(", "x", ",", "np", ".", "ones", "(", "(", "1", ",", "x", ".", "shape", "[", "1", "]", ")", ")", ")", ")", "y", "=", "np", ".", "vstack", "(", "(", "y", ",", "np", ".", "ones", "(", "(", "1", ",", "y", ".", "shape", "[", "1", "]", ")", ")", ")", ")", "K_inv", "=", "np", ".", "linalg", ".", "inv", "(", "K", ")", "X", "=", "K_inv", ".", "dot", "(", "x", ")", "Y", "=", "K_inv", ".", "dot", "(", "y", ")", "# Depth must be positive", "(", "R", ",", "t", ")", "=", "rotations", ".", "procrustes", "(", "X", ",", "Y", ",", "remove_mean", "=", "False", ")", "# X = R * Y + t", "(", "v", ",", "theta", ")", "=", "rotations", ".", "rotation_matrix_to_axis_angle", "(", "R", ")", "image_rotation_axes", ".", "append", "(", "v", ")", "# Save rotation axis", "# Check the quality via the mean reprojection error", "mean_error", "=", "np", ".", "mean", "(", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "X", "-", "R", ".", "dot", "(", "Y", ")", ")", "**", "2", ",", "axis", "=", "0", ")", ")", ")", "MEAN_ERROR_LIMIT", "=", "0.1", "# Arbitrarily chosen limit (in meters)", "logger", ".", "debug", "(", "'Image sequence %d: Rotation axis %s, degrees %.2f, mean error %.3f'", ",", "i", ",", "v", ",", "np", ".", "rad2deg", "(", "theta", ")", ",", "mean_error", ")", "if", "mean_error", ">", "MEAN_ERROR_LIMIT", ":", "logger", ".", "warning", "(", "\"Procrustes solution mean error %.3f > %.3f\"", ",", "mean_error", ",", "MEAN_ERROR_LIMIT", ")", "# ) Gyro principal rotation axis", "gyro_rotation_axes", "=", "[", "]", "for", "i", ",", "gyro_seq", "in", "enumerate", "(", "imu_sequences", ")", ":", "assert", "gyro_seq", ".", "shape", "[", "0", "]", "==", "3", "v", "=", "principal_rotation_axis", "(", "gyro_seq", ")", "logger", ".", "debug", "(", "'Gyro sequence %d: Rotation axis %s'", ",", "i", ",", "v", ")", "gyro_rotation_axes", ".", "append", "(", "v", ")", "# ) Procrustes to get rotation between coordinate frames", "X", "=", "np", ".", "vstack", "(", "image_rotation_axes", ")", ".", "T", "Y", "=", "np", ".", "vstack", "(", "gyro_rotation_axes", ")", ".", "T", "(", "R", ",", "t", ")", "=", "rotations", ".", "procrustes", "(", "X", ",", "Y", ",", "remove_mean", "=", "False", ")", "return", "(", "R", ",", "t", ")" ]
Estimate sync between IMU and camera based on gyro readings and optical flow. The user should first create at least two sequences of corresponding image and gyroscope data. From each sequence we calculate the rotation axis (one from images, one from IMU/gyro). The final set of len(image_sequences) corresponding rotation axes are then used to calculate the relative pose between the IMU and camera. The returned rotation is such that it transfers vectors in the gyroscope coordinate frame to the camera coordinate frame: X_camera = R * X_gyro Parameters ------------ image_sequences : list of list of ndarrays List of image sequences (list of ndarrays) to use. Must have at least two sequences. imu_sequences : list of (3, N) ndarray Sequence of gyroscope measurements (angular velocities). K : (3,3) ndarray Camera calibration matrix Returns ----------- R : (3,3) ndarray The relative pose (gyro-to-camera) such that X_camera = R * X_gyro
[ "Estimate", "sync", "between", "IMU", "and", "camera", "based", "on", "gyro", "readings", "and", "optical", "flow", ".", "The", "user", "should", "first", "create", "at", "least", "two", "sequences", "of", "corresponding", "image", "and", "gyroscope", "data", ".", "From", "each", "sequence", "we", "calculate", "the", "rotation", "axis", "(", "one", "from", "images", "one", "from", "IMU", "/", "gyro", ")", ".", "The", "final", "set", "of", "len", "(", "image_sequences", ")", "corresponding", "rotation", "axes", "are", "then", "used", "to", "calculate", "the", "relative", "pose", "between", "the", "IMU", "and", "camera", ".", "The", "returned", "rotation", "is", "such", "that", "it", "transfers", "vectors", "in", "the", "gyroscope", "coordinate", "frame", "to", "the", "camera", "coordinate", "frame", ":", "X_camera", "=", "R", "*", "X_gyro", "Parameters", "------------", "image_sequences", ":", "list", "of", "list", "of", "ndarrays", "List", "of", "image", "sequences", "(", "list", "of", "ndarrays", ")", "to", "use", ".", "Must", "have", "at", "least", "two", "sequences", ".", "imu_sequences", ":", "list", "of", "(", "3", "N", ")", "ndarray", "Sequence", "of", "gyroscope", "measurements", "(", "angular", "velocities", ")", ".", "K", ":", "(", "3", "3", ")", "ndarray", "Camera", "calibration", "matrix", "Returns", "-----------", "R", ":", "(", "3", "3", ")", "ndarray", "The", "relative", "pose", "(", "gyro", "-", "to", "-", "camera", ")", "such", "that", "X_camera", "=", "R", "*", "X_gyro" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/pose.py#L24-L106
hovren/crisp
crisp/pose.py
pick_manual
def pick_manual(image_sequence, imu_gyro, num_sequences=2): """Select N matching sequences and return data indices. Parameters --------------- image_sequence : list_like A list, or generator, of image data imu_gyro : (3, N) ndarray Gyroscope data (angular velocities) num_sequences : int The number of matching sequences to pick Returns ---------------- sync_sequences : list List of (frame_pair, gyro_pair) tuples where each pair contains (a, b) which are indices of the (inclusive) range [a, b] that was chosen """ assert num_sequences >= 2 # Create optical flow for user to select parts in logger.info("Calculating optical flow") flow = tracking.optical_flow_magnitude(image_sequence) # ) Prompt user for sync slices logger.debug("Prompting user for %d sequences" % num_sequences) imu_fake_timestamps = np.linspace(0,1,num=imu_gyro.shape[1]) sync_sequences = [timesync.manual_sync_pick(flow, imu_fake_timestamps, imu_gyro) for i in range(num_sequences)] return sync_sequences
python
def pick_manual(image_sequence, imu_gyro, num_sequences=2): """Select N matching sequences and return data indices. Parameters --------------- image_sequence : list_like A list, or generator, of image data imu_gyro : (3, N) ndarray Gyroscope data (angular velocities) num_sequences : int The number of matching sequences to pick Returns ---------------- sync_sequences : list List of (frame_pair, gyro_pair) tuples where each pair contains (a, b) which are indices of the (inclusive) range [a, b] that was chosen """ assert num_sequences >= 2 # Create optical flow for user to select parts in logger.info("Calculating optical flow") flow = tracking.optical_flow_magnitude(image_sequence) # ) Prompt user for sync slices logger.debug("Prompting user for %d sequences" % num_sequences) imu_fake_timestamps = np.linspace(0,1,num=imu_gyro.shape[1]) sync_sequences = [timesync.manual_sync_pick(flow, imu_fake_timestamps, imu_gyro) for i in range(num_sequences)] return sync_sequences
[ "def", "pick_manual", "(", "image_sequence", ",", "imu_gyro", ",", "num_sequences", "=", "2", ")", ":", "assert", "num_sequences", ">=", "2", "# Create optical flow for user to select parts in", "logger", ".", "info", "(", "\"Calculating optical flow\"", ")", "flow", "=", "tracking", ".", "optical_flow_magnitude", "(", "image_sequence", ")", "# ) Prompt user for sync slices", "logger", ".", "debug", "(", "\"Prompting user for %d sequences\"", "%", "num_sequences", ")", "imu_fake_timestamps", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "num", "=", "imu_gyro", ".", "shape", "[", "1", "]", ")", "sync_sequences", "=", "[", "timesync", ".", "manual_sync_pick", "(", "flow", ",", "imu_fake_timestamps", ",", "imu_gyro", ")", "for", "i", "in", "range", "(", "num_sequences", ")", "]", "return", "sync_sequences" ]
Select N matching sequences and return data indices. Parameters --------------- image_sequence : list_like A list, or generator, of image data imu_gyro : (3, N) ndarray Gyroscope data (angular velocities) num_sequences : int The number of matching sequences to pick Returns ---------------- sync_sequences : list List of (frame_pair, gyro_pair) tuples where each pair contains (a, b) which are indices of the (inclusive) range [a, b] that was chosen
[ "Select", "N", "matching", "sequences", "and", "return", "data", "indices", ".", "Parameters", "---------------", "image_sequence", ":", "list_like", "A", "list", "or", "generator", "of", "image", "data", "imu_gyro", ":", "(", "3", "N", ")", "ndarray", "Gyroscope", "data", "(", "angular", "velocities", ")", "num_sequences", ":", "int", "The", "number", "of", "matching", "sequences", "to", "pick", "Returns", "----------------", "sync_sequences", ":", "list", "List", "of", "(", "frame_pair", "gyro_pair", ")", "tuples", "where", "each", "pair", "contains", "(", "a", "b", ")", "which", "are", "indices", "of", "the", "(", "inclusive", ")", "range", "[", "a", "b", "]", "that", "was", "chosen" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/pose.py#L110-L138
hovren/crisp
crisp/pose.py
principal_rotation_axis
def principal_rotation_axis(gyro_data): """Get the principal rotation axis of angular velocity measurements. Parameters ------------- gyro_data : (3, N) ndarray Angular velocity measurements Returns ------------- v : (3,1) ndarray The principal rotation axis for the chosen sequence """ N = np.zeros((3,3)) for x in gyro_data.T: # Transpose because samples are stored as columns y = x.reshape(3,1) N += y.dot(y.T) (eig_val, eig_vec) = np.linalg.eig(N) i = np.argmax(eig_val) v = eig_vec[:,i] # Make sure v has correct sign s = 0 for x in gyro_data.T: # Transpose because samples are stored as columns s += v.T.dot(x.reshape(3,1)) v *= np.sign(s) return v
python
def principal_rotation_axis(gyro_data): """Get the principal rotation axis of angular velocity measurements. Parameters ------------- gyro_data : (3, N) ndarray Angular velocity measurements Returns ------------- v : (3,1) ndarray The principal rotation axis for the chosen sequence """ N = np.zeros((3,3)) for x in gyro_data.T: # Transpose because samples are stored as columns y = x.reshape(3,1) N += y.dot(y.T) (eig_val, eig_vec) = np.linalg.eig(N) i = np.argmax(eig_val) v = eig_vec[:,i] # Make sure v has correct sign s = 0 for x in gyro_data.T: # Transpose because samples are stored as columns s += v.T.dot(x.reshape(3,1)) v *= np.sign(s) return v
[ "def", "principal_rotation_axis", "(", "gyro_data", ")", ":", "N", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ")", ")", "for", "x", "in", "gyro_data", ".", "T", ":", "# Transpose because samples are stored as columns", "y", "=", "x", ".", "reshape", "(", "3", ",", "1", ")", "N", "+=", "y", ".", "dot", "(", "y", ".", "T", ")", "(", "eig_val", ",", "eig_vec", ")", "=", "np", ".", "linalg", ".", "eig", "(", "N", ")", "i", "=", "np", ".", "argmax", "(", "eig_val", ")", "v", "=", "eig_vec", "[", ":", ",", "i", "]", "# Make sure v has correct sign", "s", "=", "0", "for", "x", "in", "gyro_data", ".", "T", ":", "# Transpose because samples are stored as columns", "s", "+=", "v", ".", "T", ".", "dot", "(", "x", ".", "reshape", "(", "3", ",", "1", ")", ")", "v", "*=", "np", ".", "sign", "(", "s", ")", "return", "v" ]
Get the principal rotation axis of angular velocity measurements. Parameters ------------- gyro_data : (3, N) ndarray Angular velocity measurements Returns ------------- v : (3,1) ndarray The principal rotation axis for the chosen sequence
[ "Get", "the", "principal", "rotation", "axis", "of", "angular", "velocity", "measurements", ".", "Parameters", "-------------", "gyro_data", ":", "(", "3", "N", ")", "ndarray", "Angular", "velocity", "measurements", "Returns", "-------------", "v", ":", "(", "3", "1", ")", "ndarray", "The", "principal", "rotation", "axis", "for", "the", "chosen", "sequence" ]
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/pose.py#L142-L171
codejamninja/sphinx-markdown-builder
sphinx_markdown_builder/markdown_writer.py
MarkdownTranslator.visit_image
def visit_image(self, node): """ Image directive """ uri = node.attributes['uri'] doc_folder = os.path.dirname(self.builder.current_docname) if uri.startswith(doc_folder): # drop docname prefix uri = uri[len(doc_folder):] if uri.startswith("/"): uri = "." + uri self.add('\n\n![image](%s)\n\n' % uri)
python
def visit_image(self, node): """ Image directive """ uri = node.attributes['uri'] doc_folder = os.path.dirname(self.builder.current_docname) if uri.startswith(doc_folder): # drop docname prefix uri = uri[len(doc_folder):] if uri.startswith("/"): uri = "." + uri self.add('\n\n![image](%s)\n\n' % uri)
[ "def", "visit_image", "(", "self", ",", "node", ")", ":", "uri", "=", "node", ".", "attributes", "[", "'uri'", "]", "doc_folder", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "builder", ".", "current_docname", ")", "if", "uri", ".", "startswith", "(", "doc_folder", ")", ":", "# drop docname prefix", "uri", "=", "uri", "[", "len", "(", "doc_folder", ")", ":", "]", "if", "uri", ".", "startswith", "(", "\"/\"", ")", ":", "uri", "=", "\".\"", "+", "uri", "self", ".", "add", "(", "'\\n\\n![image](%s)\\n\\n'", "%", "uri", ")" ]
Image directive
[ "Image", "directive" ]
train
https://github.com/codejamninja/sphinx-markdown-builder/blob/a28f48df937d4b0e158ba453e5e1c66824299196/sphinx_markdown_builder/markdown_writer.py#L203-L214
codejamninja/sphinx-markdown-builder
sphinx_markdown_builder/doctree2md.py
add_pref_suff
def add_pref_suff(pref_suff_map): """ Decorator adds visit, depart methods for prefix/suffix pairs """ def dec(cls): # Need _make_method to ensure new variable picked up for each iteration # of the loop. The defined method picks up this new variable in its # scope. for key, (prefix, suffix) in pref_suff_map.items(): setattr(cls, 'visit_' + key, _make_method(prefix)) setattr(cls, 'depart_' + key, _make_method(suffix)) return cls return dec
python
def add_pref_suff(pref_suff_map): """ Decorator adds visit, depart methods for prefix/suffix pairs """ def dec(cls): # Need _make_method to ensure new variable picked up for each iteration # of the loop. The defined method picks up this new variable in its # scope. for key, (prefix, suffix) in pref_suff_map.items(): setattr(cls, 'visit_' + key, _make_method(prefix)) setattr(cls, 'depart_' + key, _make_method(suffix)) return cls return dec
[ "def", "add_pref_suff", "(", "pref_suff_map", ")", ":", "def", "dec", "(", "cls", ")", ":", "# Need _make_method to ensure new variable picked up for each iteration", "# of the loop. The defined method picks up this new variable in its", "# scope.", "for", "key", ",", "(", "prefix", ",", "suffix", ")", "in", "pref_suff_map", ".", "items", "(", ")", ":", "setattr", "(", "cls", ",", "'visit_'", "+", "key", ",", "_make_method", "(", "prefix", ")", ")", "setattr", "(", "cls", ",", "'depart_'", "+", "key", ",", "_make_method", "(", "suffix", ")", ")", "return", "cls", "return", "dec" ]
Decorator adds visit, depart methods for prefix/suffix pairs
[ "Decorator", "adds", "visit", "depart", "methods", "for", "prefix", "/", "suffix", "pairs" ]
train
https://github.com/codejamninja/sphinx-markdown-builder/blob/a28f48df937d4b0e158ba453e5e1c66824299196/sphinx_markdown_builder/doctree2md.py#L241-L253
codejamninja/sphinx-markdown-builder
sphinx_markdown_builder/doctree2md.py
add_pass_thru
def add_pass_thru(pass_thrus): """ Decorator adds explicit pass-through visit and depart methods """ def meth(self, node): pass def dec(cls): for element_name in pass_thrus: for meth_prefix in ('visit_', 'depart_'): meth_name = meth_prefix + element_name if hasattr(cls, meth_name): raise ValueError('method name {} already defined' .format(meth_name)) setattr(cls, meth_name, meth) return cls return dec
python
def add_pass_thru(pass_thrus): """ Decorator adds explicit pass-through visit and depart methods """ def meth(self, node): pass def dec(cls): for element_name in pass_thrus: for meth_prefix in ('visit_', 'depart_'): meth_name = meth_prefix + element_name if hasattr(cls, meth_name): raise ValueError('method name {} already defined' .format(meth_name)) setattr(cls, meth_name, meth) return cls return dec
[ "def", "add_pass_thru", "(", "pass_thrus", ")", ":", "def", "meth", "(", "self", ",", "node", ")", ":", "pass", "def", "dec", "(", "cls", ")", ":", "for", "element_name", "in", "pass_thrus", ":", "for", "meth_prefix", "in", "(", "'visit_'", ",", "'depart_'", ")", ":", "meth_name", "=", "meth_prefix", "+", "element_name", "if", "hasattr", "(", "cls", ",", "meth_name", ")", ":", "raise", "ValueError", "(", "'method name {} already defined'", ".", "format", "(", "meth_name", ")", ")", "setattr", "(", "cls", ",", "meth_name", ",", "meth", ")", "return", "cls", "return", "dec" ]
Decorator adds explicit pass-through visit and depart methods
[ "Decorator", "adds", "explicit", "pass", "-", "through", "visit", "and", "depart", "methods" ]
train
https://github.com/codejamninja/sphinx-markdown-builder/blob/a28f48df937d4b0e158ba453e5e1c66824299196/sphinx_markdown_builder/doctree2md.py#L256-L272
codejamninja/sphinx-markdown-builder
sphinx_markdown_builder/doctree2md.py
IndentLevel.write
def write(self): """ Add ``self.contents`` with current ``prefix`` and ``first_prefix`` Add processed ``self.contents`` to ``self.base``. The first line has ``first_prefix`` prepended, further lines have ``prefix`` prepended. Empty (all whitespace) lines get written as bare carriage returns, to avoid ugly extra whitespace. """ string = ''.join(self.content) lines = string.splitlines(True) if len(lines) == 0: return texts = [self.first_prefix + lines[0]] for line in lines[1:]: if line.strip() == '': # avoid prefix for empty lines texts.append('\n') else: texts.append(self.prefix + line) self.base.append(''.join(texts))
python
def write(self): """ Add ``self.contents`` with current ``prefix`` and ``first_prefix`` Add processed ``self.contents`` to ``self.base``. The first line has ``first_prefix`` prepended, further lines have ``prefix`` prepended. Empty (all whitespace) lines get written as bare carriage returns, to avoid ugly extra whitespace. """ string = ''.join(self.content) lines = string.splitlines(True) if len(lines) == 0: return texts = [self.first_prefix + lines[0]] for line in lines[1:]: if line.strip() == '': # avoid prefix for empty lines texts.append('\n') else: texts.append(self.prefix + line) self.base.append(''.join(texts))
[ "def", "write", "(", "self", ")", ":", "string", "=", "''", ".", "join", "(", "self", ".", "content", ")", "lines", "=", "string", ".", "splitlines", "(", "True", ")", "if", "len", "(", "lines", ")", "==", "0", ":", "return", "texts", "=", "[", "self", ".", "first_prefix", "+", "lines", "[", "0", "]", "]", "for", "line", "in", "lines", "[", "1", ":", "]", ":", "if", "line", ".", "strip", "(", ")", "==", "''", ":", "# avoid prefix for empty lines", "texts", ".", "append", "(", "'\\n'", ")", "else", ":", "texts", ".", "append", "(", "self", ".", "prefix", "+", "line", ")", "self", ".", "base", ".", "append", "(", "''", ".", "join", "(", "texts", ")", ")" ]
Add ``self.contents`` with current ``prefix`` and ``first_prefix`` Add processed ``self.contents`` to ``self.base``. The first line has ``first_prefix`` prepended, further lines have ``prefix`` prepended. Empty (all whitespace) lines get written as bare carriage returns, to avoid ugly extra whitespace.
[ "Add", "self", ".", "contents", "with", "current", "prefix", "and", "first_prefix" ]
train
https://github.com/codejamninja/sphinx-markdown-builder/blob/a28f48df937d4b0e158ba453e5e1c66824299196/sphinx_markdown_builder/doctree2md.py#L206-L225
sbuss/bitmerchant
bitmerchant/wallet/bip32.py
Wallet.identifier
def identifier(self): """Get the identifier for this node. Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256) of the public key's `key`. This corresponds exactly to the data used in traditional Bitcoin addresses. It is not advised to represent this data in base58 format though, as it may be interpreted as an address that way (and wallet software is not required to accept payment to the chain key itself). """ key = self.get_public_key_hex() return ensure_bytes(hexlify(hash160(unhexlify(ensure_bytes(key)))))
python
def identifier(self): """Get the identifier for this node. Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256) of the public key's `key`. This corresponds exactly to the data used in traditional Bitcoin addresses. It is not advised to represent this data in base58 format though, as it may be interpreted as an address that way (and wallet software is not required to accept payment to the chain key itself). """ key = self.get_public_key_hex() return ensure_bytes(hexlify(hash160(unhexlify(ensure_bytes(key)))))
[ "def", "identifier", "(", "self", ")", ":", "key", "=", "self", ".", "get_public_key_hex", "(", ")", "return", "ensure_bytes", "(", "hexlify", "(", "hash160", "(", "unhexlify", "(", "ensure_bytes", "(", "key", ")", ")", ")", ")", ")" ]
Get the identifier for this node. Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256) of the public key's `key`. This corresponds exactly to the data used in traditional Bitcoin addresses. It is not advised to represent this data in base58 format though, as it may be interpreted as an address that way (and wallet software is not required to accept payment to the chain key itself).
[ "Get", "the", "identifier", "for", "this", "node", "." ]
train
https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L153-L164
sbuss/bitmerchant
bitmerchant/wallet/bip32.py
Wallet.get_child
def get_child(self, child_number, is_prime=None, as_private=True): """Derive a child key. :param child_number: The number of the child key to compute :type child_number: int :param is_prime: If True, the child is calculated via private derivation. If False, then public derivation is used. If None, then it is figured out from the value of child_number. :type is_prime: bool, defaults to None :param as_private: If True, strips private key from the result. Defaults to False. If there is no private key present, this is ignored. :type as_private: bool Positive child_numbers (>= 0, < 2,147,483,648) produce publicly derived children. (prime=False) Negative numbers (> -2,147,483,648, < 0) use private derivation. (prime=True) NOTE: Python can't do -0, so if you want the privately derived 0th child you need to manually set is_prime=True. NOTE: negative numbered children are provided as a convenience because nobody wants to remember the above numbers. Negative numbers are considered 'prime children', which is described in the BIP32 spec as a leading 1 in a 32 bit unsigned int. This derivation is fully described at https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-functions # nopep8 """ boundary = 0x80000000 # Note: If this boundary check gets removed, then children above # the boundary should use private (prime) derivation. if abs(child_number) >= boundary: raise ValueError("Invalid child number %s" % child_number) # If is_prime isn't set, then we can infer it from the child_number if is_prime is None: # Prime children are either < 0 or > 0x80000000 if child_number < 0: child_number = abs(child_number) is_prime = True else: is_prime = False else: # Otherwise is_prime is set so the child_number should be between # 0 and 0x80000000 if child_number < 0 or child_number >= boundary: raise ValueError( "Invalid child number. Must be between 0 and %s" % boundary) if not self.private_key and is_prime: raise ValueError( "Cannot compute a prime child without a private key") if is_prime: # Even though we take child_number as an int < boundary, the # internal derivation needs it to be the larger number. child_number = child_number + boundary child_number_hex = long_to_hex(child_number, 8) if is_prime: # Let data = concat(0x00, self.key, child_number) data = b'00' + self.private_key.get_key() else: data = self.get_public_key_hex() data += child_number_hex # Compute a 64 Byte I that is the HMAC-SHA512, using self.chain_code # as the seed, and data as the message. I = hmac.new( unhexlify(ensure_bytes(self.chain_code)), msg=unhexlify(ensure_bytes(data)), digestmod=sha512).digest() # Split I into its 32 Byte components. I_L, I_R = I[:32], I[32:] if long_or_int(hexlify(I_L), 16) >= SECP256k1.order: raise InvalidPrivateKeyError("The derived key is too large.") c_i = hexlify(I_R) private_exponent = None public_pair = None if self.private_key: # Use private information for derivation # I_L is added to the current key's secret exponent (mod n), where # n is the order of the ECDSA curve in use. private_exponent = ( (long_or_int(hexlify(I_L), 16) + long_or_int(self.private_key.get_key(), 16)) % SECP256k1.order) # I_R is the child's chain code else: # Only use public information for this derivation g = SECP256k1.generator I_L_long = long_or_int(hexlify(I_L), 16) point = (_ECDSA_Public_key(g, g * I_L_long).point + self.public_key.to_point()) # I_R is the child's chain code public_pair = PublicPair(point.x(), point.y()) child = self.__class__( chain_code=c_i, depth=self.depth + 1, # we have to go deeper... parent_fingerprint=self.fingerprint, child_number=child_number_hex, private_exponent=private_exponent, public_pair=public_pair, network=self.network) if child.public_key.to_point() == INFINITY: raise InfinityPointException("The point at infinity is invalid.") if not as_private: return child.public_copy() return child
python
def get_child(self, child_number, is_prime=None, as_private=True): """Derive a child key. :param child_number: The number of the child key to compute :type child_number: int :param is_prime: If True, the child is calculated via private derivation. If False, then public derivation is used. If None, then it is figured out from the value of child_number. :type is_prime: bool, defaults to None :param as_private: If True, strips private key from the result. Defaults to False. If there is no private key present, this is ignored. :type as_private: bool Positive child_numbers (>= 0, < 2,147,483,648) produce publicly derived children. (prime=False) Negative numbers (> -2,147,483,648, < 0) use private derivation. (prime=True) NOTE: Python can't do -0, so if you want the privately derived 0th child you need to manually set is_prime=True. NOTE: negative numbered children are provided as a convenience because nobody wants to remember the above numbers. Negative numbers are considered 'prime children', which is described in the BIP32 spec as a leading 1 in a 32 bit unsigned int. This derivation is fully described at https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-functions # nopep8 """ boundary = 0x80000000 # Note: If this boundary check gets removed, then children above # the boundary should use private (prime) derivation. if abs(child_number) >= boundary: raise ValueError("Invalid child number %s" % child_number) # If is_prime isn't set, then we can infer it from the child_number if is_prime is None: # Prime children are either < 0 or > 0x80000000 if child_number < 0: child_number = abs(child_number) is_prime = True else: is_prime = False else: # Otherwise is_prime is set so the child_number should be between # 0 and 0x80000000 if child_number < 0 or child_number >= boundary: raise ValueError( "Invalid child number. Must be between 0 and %s" % boundary) if not self.private_key and is_prime: raise ValueError( "Cannot compute a prime child without a private key") if is_prime: # Even though we take child_number as an int < boundary, the # internal derivation needs it to be the larger number. child_number = child_number + boundary child_number_hex = long_to_hex(child_number, 8) if is_prime: # Let data = concat(0x00, self.key, child_number) data = b'00' + self.private_key.get_key() else: data = self.get_public_key_hex() data += child_number_hex # Compute a 64 Byte I that is the HMAC-SHA512, using self.chain_code # as the seed, and data as the message. I = hmac.new( unhexlify(ensure_bytes(self.chain_code)), msg=unhexlify(ensure_bytes(data)), digestmod=sha512).digest() # Split I into its 32 Byte components. I_L, I_R = I[:32], I[32:] if long_or_int(hexlify(I_L), 16) >= SECP256k1.order: raise InvalidPrivateKeyError("The derived key is too large.") c_i = hexlify(I_R) private_exponent = None public_pair = None if self.private_key: # Use private information for derivation # I_L is added to the current key's secret exponent (mod n), where # n is the order of the ECDSA curve in use. private_exponent = ( (long_or_int(hexlify(I_L), 16) + long_or_int(self.private_key.get_key(), 16)) % SECP256k1.order) # I_R is the child's chain code else: # Only use public information for this derivation g = SECP256k1.generator I_L_long = long_or_int(hexlify(I_L), 16) point = (_ECDSA_Public_key(g, g * I_L_long).point + self.public_key.to_point()) # I_R is the child's chain code public_pair = PublicPair(point.x(), point.y()) child = self.__class__( chain_code=c_i, depth=self.depth + 1, # we have to go deeper... parent_fingerprint=self.fingerprint, child_number=child_number_hex, private_exponent=private_exponent, public_pair=public_pair, network=self.network) if child.public_key.to_point() == INFINITY: raise InfinityPointException("The point at infinity is invalid.") if not as_private: return child.public_copy() return child
[ "def", "get_child", "(", "self", ",", "child_number", ",", "is_prime", "=", "None", ",", "as_private", "=", "True", ")", ":", "boundary", "=", "0x80000000", "# Note: If this boundary check gets removed, then children above", "# the boundary should use private (prime) derivation.", "if", "abs", "(", "child_number", ")", ">=", "boundary", ":", "raise", "ValueError", "(", "\"Invalid child number %s\"", "%", "child_number", ")", "# If is_prime isn't set, then we can infer it from the child_number", "if", "is_prime", "is", "None", ":", "# Prime children are either < 0 or > 0x80000000", "if", "child_number", "<", "0", ":", "child_number", "=", "abs", "(", "child_number", ")", "is_prime", "=", "True", "else", ":", "is_prime", "=", "False", "else", ":", "# Otherwise is_prime is set so the child_number should be between", "# 0 and 0x80000000", "if", "child_number", "<", "0", "or", "child_number", ">=", "boundary", ":", "raise", "ValueError", "(", "\"Invalid child number. Must be between 0 and %s\"", "%", "boundary", ")", "if", "not", "self", ".", "private_key", "and", "is_prime", ":", "raise", "ValueError", "(", "\"Cannot compute a prime child without a private key\"", ")", "if", "is_prime", ":", "# Even though we take child_number as an int < boundary, the", "# internal derivation needs it to be the larger number.", "child_number", "=", "child_number", "+", "boundary", "child_number_hex", "=", "long_to_hex", "(", "child_number", ",", "8", ")", "if", "is_prime", ":", "# Let data = concat(0x00, self.key, child_number)", "data", "=", "b'00'", "+", "self", ".", "private_key", ".", "get_key", "(", ")", "else", ":", "data", "=", "self", ".", "get_public_key_hex", "(", ")", "data", "+=", "child_number_hex", "# Compute a 64 Byte I that is the HMAC-SHA512, using self.chain_code", "# as the seed, and data as the message.", "I", "=", "hmac", ".", "new", "(", "unhexlify", "(", "ensure_bytes", "(", "self", ".", "chain_code", ")", ")", ",", "msg", "=", "unhexlify", "(", "ensure_bytes", "(", "data", ")", ")", ",", "digestmod", "=", "sha512", ")", ".", "digest", "(", ")", "# Split I into its 32 Byte components.", "I_L", ",", "I_R", "=", "I", "[", ":", "32", "]", ",", "I", "[", "32", ":", "]", "if", "long_or_int", "(", "hexlify", "(", "I_L", ")", ",", "16", ")", ">=", "SECP256k1", ".", "order", ":", "raise", "InvalidPrivateKeyError", "(", "\"The derived key is too large.\"", ")", "c_i", "=", "hexlify", "(", "I_R", ")", "private_exponent", "=", "None", "public_pair", "=", "None", "if", "self", ".", "private_key", ":", "# Use private information for derivation", "# I_L is added to the current key's secret exponent (mod n), where", "# n is the order of the ECDSA curve in use.", "private_exponent", "=", "(", "(", "long_or_int", "(", "hexlify", "(", "I_L", ")", ",", "16", ")", "+", "long_or_int", "(", "self", ".", "private_key", ".", "get_key", "(", ")", ",", "16", ")", ")", "%", "SECP256k1", ".", "order", ")", "# I_R is the child's chain code", "else", ":", "# Only use public information for this derivation", "g", "=", "SECP256k1", ".", "generator", "I_L_long", "=", "long_or_int", "(", "hexlify", "(", "I_L", ")", ",", "16", ")", "point", "=", "(", "_ECDSA_Public_key", "(", "g", ",", "g", "*", "I_L_long", ")", ".", "point", "+", "self", ".", "public_key", ".", "to_point", "(", ")", ")", "# I_R is the child's chain code", "public_pair", "=", "PublicPair", "(", "point", ".", "x", "(", ")", ",", "point", ".", "y", "(", ")", ")", "child", "=", "self", ".", "__class__", "(", "chain_code", "=", "c_i", ",", "depth", "=", "self", ".", "depth", "+", "1", ",", "# we have to go deeper...", "parent_fingerprint", "=", "self", ".", "fingerprint", ",", "child_number", "=", "child_number_hex", ",", "private_exponent", "=", "private_exponent", ",", "public_pair", "=", "public_pair", ",", "network", "=", "self", ".", "network", ")", "if", "child", ".", "public_key", ".", "to_point", "(", ")", "==", "INFINITY", ":", "raise", "InfinityPointException", "(", "\"The point at infinity is invalid.\"", ")", "if", "not", "as_private", ":", "return", "child", ".", "public_copy", "(", ")", "return", "child" ]
Derive a child key. :param child_number: The number of the child key to compute :type child_number: int :param is_prime: If True, the child is calculated via private derivation. If False, then public derivation is used. If None, then it is figured out from the value of child_number. :type is_prime: bool, defaults to None :param as_private: If True, strips private key from the result. Defaults to False. If there is no private key present, this is ignored. :type as_private: bool Positive child_numbers (>= 0, < 2,147,483,648) produce publicly derived children. (prime=False) Negative numbers (> -2,147,483,648, < 0) use private derivation. (prime=True) NOTE: Python can't do -0, so if you want the privately derived 0th child you need to manually set is_prime=True. NOTE: negative numbered children are provided as a convenience because nobody wants to remember the above numbers. Negative numbers are considered 'prime children', which is described in the BIP32 spec as a leading 1 in a 32 bit unsigned int. This derivation is fully described at https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-functions # nopep8
[ "Derive", "a", "child", "key", "." ]
train
https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L247-L363
sbuss/bitmerchant
bitmerchant/wallet/bip32.py
Wallet.crack_private_key
def crack_private_key(self, child_private_key): """Crack the parent private key given a child private key. BIP32 has a vulnerability/feature that allows you to recover the master private key if you're given a master public key and any of its publicly-derived child private keys. This is a pretty serious security vulnerability that looks as innocuous as this: >>> w = Wallet.new_random_wallet() >>> child = w.get_child(0, is_prime=False) >>> w_pub = w.public_copy() >>> assert w_pub.private_key is None >>> master_public_key = w_pub.serialize_b58(private=False) >>> # Now you put master_public_key on your website >>> # and give somebody a private key >>> public_master = Wallet.deserialize(master_public_key) >>> cracked_private_master = public_master.crack_private_key(child) >>> assert w == cracked_private_master # :( Implementation details from http://bitcoinmagazine.com/8396/deterministic-wallets-advantages-flaw/ # nopep8 """ if self.private_key: raise AssertionError("You already know the private key") if child_private_key.parent_fingerprint != self.fingerprint: raise ValueError("This is not a valid child") if child_private_key.child_number >= 0x80000000: raise ValueError( "Cannot crack private keys from private derivation") # Duplicate the public child derivation child_number_hex = long_to_hex(child_private_key.child_number, 8) data = self.get_public_key_hex() + child_number_hex I = hmac.new( unhexlify(ensure_bytes(self.chain_code)), msg=unhexlify(ensure_bytes(data)), digestmod=sha512).digest() I_L, I_R = I[:32], I[32:] # Public derivation is the same as private derivation plus some offset # knowing the child's private key allows us to find this offset just # by subtracting the child's private key from the parent I_L data privkey = PrivateKey(long_or_int(hexlify(I_L), 16), network=self.network) parent_private_key = child_private_key.private_key - privkey return self.__class__( chain_code=self.chain_code, depth=self.depth, parent_fingerprint=self.parent_fingerprint, child_number=self.child_number, private_key=parent_private_key, network=self.network)
python
def crack_private_key(self, child_private_key): """Crack the parent private key given a child private key. BIP32 has a vulnerability/feature that allows you to recover the master private key if you're given a master public key and any of its publicly-derived child private keys. This is a pretty serious security vulnerability that looks as innocuous as this: >>> w = Wallet.new_random_wallet() >>> child = w.get_child(0, is_prime=False) >>> w_pub = w.public_copy() >>> assert w_pub.private_key is None >>> master_public_key = w_pub.serialize_b58(private=False) >>> # Now you put master_public_key on your website >>> # and give somebody a private key >>> public_master = Wallet.deserialize(master_public_key) >>> cracked_private_master = public_master.crack_private_key(child) >>> assert w == cracked_private_master # :( Implementation details from http://bitcoinmagazine.com/8396/deterministic-wallets-advantages-flaw/ # nopep8 """ if self.private_key: raise AssertionError("You already know the private key") if child_private_key.parent_fingerprint != self.fingerprint: raise ValueError("This is not a valid child") if child_private_key.child_number >= 0x80000000: raise ValueError( "Cannot crack private keys from private derivation") # Duplicate the public child derivation child_number_hex = long_to_hex(child_private_key.child_number, 8) data = self.get_public_key_hex() + child_number_hex I = hmac.new( unhexlify(ensure_bytes(self.chain_code)), msg=unhexlify(ensure_bytes(data)), digestmod=sha512).digest() I_L, I_R = I[:32], I[32:] # Public derivation is the same as private derivation plus some offset # knowing the child's private key allows us to find this offset just # by subtracting the child's private key from the parent I_L data privkey = PrivateKey(long_or_int(hexlify(I_L), 16), network=self.network) parent_private_key = child_private_key.private_key - privkey return self.__class__( chain_code=self.chain_code, depth=self.depth, parent_fingerprint=self.parent_fingerprint, child_number=self.child_number, private_key=parent_private_key, network=self.network)
[ "def", "crack_private_key", "(", "self", ",", "child_private_key", ")", ":", "if", "self", ".", "private_key", ":", "raise", "AssertionError", "(", "\"You already know the private key\"", ")", "if", "child_private_key", ".", "parent_fingerprint", "!=", "self", ".", "fingerprint", ":", "raise", "ValueError", "(", "\"This is not a valid child\"", ")", "if", "child_private_key", ".", "child_number", ">=", "0x80000000", ":", "raise", "ValueError", "(", "\"Cannot crack private keys from private derivation\"", ")", "# Duplicate the public child derivation", "child_number_hex", "=", "long_to_hex", "(", "child_private_key", ".", "child_number", ",", "8", ")", "data", "=", "self", ".", "get_public_key_hex", "(", ")", "+", "child_number_hex", "I", "=", "hmac", ".", "new", "(", "unhexlify", "(", "ensure_bytes", "(", "self", ".", "chain_code", ")", ")", ",", "msg", "=", "unhexlify", "(", "ensure_bytes", "(", "data", ")", ")", ",", "digestmod", "=", "sha512", ")", ".", "digest", "(", ")", "I_L", ",", "I_R", "=", "I", "[", ":", "32", "]", ",", "I", "[", "32", ":", "]", "# Public derivation is the same as private derivation plus some offset", "# knowing the child's private key allows us to find this offset just", "# by subtracting the child's private key from the parent I_L data", "privkey", "=", "PrivateKey", "(", "long_or_int", "(", "hexlify", "(", "I_L", ")", ",", "16", ")", ",", "network", "=", "self", ".", "network", ")", "parent_private_key", "=", "child_private_key", ".", "private_key", "-", "privkey", "return", "self", ".", "__class__", "(", "chain_code", "=", "self", ".", "chain_code", ",", "depth", "=", "self", ".", "depth", ",", "parent_fingerprint", "=", "self", ".", "parent_fingerprint", ",", "child_number", "=", "self", ".", "child_number", ",", "private_key", "=", "parent_private_key", ",", "network", "=", "self", ".", "network", ")" ]
Crack the parent private key given a child private key. BIP32 has a vulnerability/feature that allows you to recover the master private key if you're given a master public key and any of its publicly-derived child private keys. This is a pretty serious security vulnerability that looks as innocuous as this: >>> w = Wallet.new_random_wallet() >>> child = w.get_child(0, is_prime=False) >>> w_pub = w.public_copy() >>> assert w_pub.private_key is None >>> master_public_key = w_pub.serialize_b58(private=False) >>> # Now you put master_public_key on your website >>> # and give somebody a private key >>> public_master = Wallet.deserialize(master_public_key) >>> cracked_private_master = public_master.crack_private_key(child) >>> assert w == cracked_private_master # :( Implementation details from http://bitcoinmagazine.com/8396/deterministic-wallets-advantages-flaw/ # nopep8
[ "Crack", "the", "parent", "private", "key", "given", "a", "child", "private", "key", "." ]
train
https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L375-L424
sbuss/bitmerchant
bitmerchant/wallet/bip32.py
Wallet.export_to_wif
def export_to_wif(self): """Export a key to WIF. See https://en.bitcoin.it/wiki/Wallet_import_format for a full description. """ # Add the network byte, creating the "extended key" extended_key_hex = self.private_key.get_extended_key() # BIP32 wallets have a trailing \01 byte extended_key_bytes = unhexlify(ensure_bytes(extended_key_hex)) + b'\01' # And return the base58-encoded result with a checksum return base58.b58encode_check(extended_key_bytes)
python
def export_to_wif(self): """Export a key to WIF. See https://en.bitcoin.it/wiki/Wallet_import_format for a full description. """ # Add the network byte, creating the "extended key" extended_key_hex = self.private_key.get_extended_key() # BIP32 wallets have a trailing \01 byte extended_key_bytes = unhexlify(ensure_bytes(extended_key_hex)) + b'\01' # And return the base58-encoded result with a checksum return base58.b58encode_check(extended_key_bytes)
[ "def", "export_to_wif", "(", "self", ")", ":", "# Add the network byte, creating the \"extended key\"", "extended_key_hex", "=", "self", ".", "private_key", ".", "get_extended_key", "(", ")", "# BIP32 wallets have a trailing \\01 byte", "extended_key_bytes", "=", "unhexlify", "(", "ensure_bytes", "(", "extended_key_hex", ")", ")", "+", "b'\\01'", "# And return the base58-encoded result with a checksum", "return", "base58", ".", "b58encode_check", "(", "extended_key_bytes", ")" ]
Export a key to WIF. See https://en.bitcoin.it/wiki/Wallet_import_format for a full description.
[ "Export", "a", "key", "to", "WIF", "." ]
train
https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L426-L437
CloverHealth/pytest-pgsql
deploy.py
_pypi_push
def _pypi_push(dist): """Push created package to PyPI. Requires the following defined environment variables: - TWINE_USERNAME: The PyPI username to upload this package under - TWINE_PASSWORD: The password to the user's account Args: dist (str): The distribution to push. Must be a valid directory; shell globs are NOT allowed. """ # Register all distributions and wheels with PyPI. We have to list the dist # directory and register each file individually because `twine` doesn't # handle globs. for filename in os.listdir(dist): full_path = os.path.join(dist, filename) if os.path.isfile(full_path): # This will fail if the project has never been uploaded, so use check=false _shell('twine register ' + shlex.quote(full_path), check=False) _shell('twine upload ' + shlex.quote(dist + '/*'))
python
def _pypi_push(dist): """Push created package to PyPI. Requires the following defined environment variables: - TWINE_USERNAME: The PyPI username to upload this package under - TWINE_PASSWORD: The password to the user's account Args: dist (str): The distribution to push. Must be a valid directory; shell globs are NOT allowed. """ # Register all distributions and wheels with PyPI. We have to list the dist # directory and register each file individually because `twine` doesn't # handle globs. for filename in os.listdir(dist): full_path = os.path.join(dist, filename) if os.path.isfile(full_path): # This will fail if the project has never been uploaded, so use check=false _shell('twine register ' + shlex.quote(full_path), check=False) _shell('twine upload ' + shlex.quote(dist + '/*'))
[ "def", "_pypi_push", "(", "dist", ")", ":", "# Register all distributions and wheels with PyPI. We have to list the dist", "# directory and register each file individually because `twine` doesn't", "# handle globs.", "for", "filename", "in", "os", ".", "listdir", "(", "dist", ")", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "dist", ",", "filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "full_path", ")", ":", "# This will fail if the project has never been uploaded, so use check=false", "_shell", "(", "'twine register '", "+", "shlex", ".", "quote", "(", "full_path", ")", ",", "check", "=", "False", ")", "_shell", "(", "'twine upload '", "+", "shlex", ".", "quote", "(", "dist", "+", "'/*'", ")", ")" ]
Push created package to PyPI. Requires the following defined environment variables: - TWINE_USERNAME: The PyPI username to upload this package under - TWINE_PASSWORD: The password to the user's account Args: dist (str): The distribution to push. Must be a valid directory; shell globs are NOT allowed.
[ "Push", "created", "package", "to", "PyPI", "." ]
train
https://github.com/CloverHealth/pytest-pgsql/blob/a863ed4b652053e315dfa039d978b56f03664c07/deploy.py#L19-L40
CloverHealth/pytest-pgsql
deploy.py
deploy
def deploy(target): """Deploys the package and documentation. Proceeds in the following steps: 1. Ensures proper environment variables are set and checks that we are on Circle CI 2. Tags the repository with the new version 3. Creates a standard distribution and a wheel 4. Updates version.py to have the proper version 5. Commits the ChangeLog, AUTHORS, and version.py file 6. Pushes to PyPI 7. Pushes the tags and newly committed files Raises: `EnvironmentError`: - Not running on CircleCI - `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables are missing - Attempting to deploy to production from a branch that isn't master """ # Ensure proper environment if not os.getenv(CIRCLECI_ENV_VAR): # pragma: no cover raise EnvironmentError('Must be on CircleCI to run this script') current_branch = os.getenv('CIRCLE_BRANCH') if (target == 'PROD') and (current_branch != 'master'): raise EnvironmentError( f'Refusing to deploy to production from branch {current_branch!r}. ' f'Production deploys can only be made from master.') if target in ('PROD', 'TEST'): pypi_username = os.getenv(f'{target}_PYPI_USERNAME') pypi_password = os.getenv(f'{target}_PYPI_PASSWORD') else: raise ValueError(f"Deploy target must be 'PROD' or 'TEST', got {target!r}.") if not (pypi_username and pypi_password): # pragma: no cover raise EnvironmentError( f"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' " f"environment variables. These are required to push to PyPI.") # Twine requires these environment variables to be set. Subprocesses will # inherit these when we invoke them, so no need to pass them on the command # line. We want to avoid that in case something's logging each command run. os.environ['TWINE_USERNAME'] = pypi_username os.environ['TWINE_PASSWORD'] = pypi_password # Set up git on circle to push to the current branch _shell('git config --global user.email "[email protected]"') _shell('git config --global user.name "Circle CI"') _shell('git config push.default current') # Obtain the version to deploy ret = _shell('make version', stdout=subprocess.PIPE) version = ret.stdout.decode('utf-8').strip() print(f'Deploying version {version!r}...') # Tag the version _shell(f'git tag -f -a {version} -m "Version {version}"') # Update the version _shell( f'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py') # Create a standard distribution and a wheel _shell('python setup.py sdist bdist_wheel') # Add the updated ChangeLog and AUTHORS _shell('git add ChangeLog AUTHORS */version.py') # Start the commit message with "Merge" so that PBR will ignore it in the # ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy. _shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"') # Push the distributions to PyPI. _pypi_push('dist') # Push the tag and AUTHORS / ChangeLog after successful PyPI deploy _shell('git push --follow-tags') print(f'Deployment complete. Latest version is {version}.')
python
def deploy(target): """Deploys the package and documentation. Proceeds in the following steps: 1. Ensures proper environment variables are set and checks that we are on Circle CI 2. Tags the repository with the new version 3. Creates a standard distribution and a wheel 4. Updates version.py to have the proper version 5. Commits the ChangeLog, AUTHORS, and version.py file 6. Pushes to PyPI 7. Pushes the tags and newly committed files Raises: `EnvironmentError`: - Not running on CircleCI - `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables are missing - Attempting to deploy to production from a branch that isn't master """ # Ensure proper environment if not os.getenv(CIRCLECI_ENV_VAR): # pragma: no cover raise EnvironmentError('Must be on CircleCI to run this script') current_branch = os.getenv('CIRCLE_BRANCH') if (target == 'PROD') and (current_branch != 'master'): raise EnvironmentError( f'Refusing to deploy to production from branch {current_branch!r}. ' f'Production deploys can only be made from master.') if target in ('PROD', 'TEST'): pypi_username = os.getenv(f'{target}_PYPI_USERNAME') pypi_password = os.getenv(f'{target}_PYPI_PASSWORD') else: raise ValueError(f"Deploy target must be 'PROD' or 'TEST', got {target!r}.") if not (pypi_username and pypi_password): # pragma: no cover raise EnvironmentError( f"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' " f"environment variables. These are required to push to PyPI.") # Twine requires these environment variables to be set. Subprocesses will # inherit these when we invoke them, so no need to pass them on the command # line. We want to avoid that in case something's logging each command run. os.environ['TWINE_USERNAME'] = pypi_username os.environ['TWINE_PASSWORD'] = pypi_password # Set up git on circle to push to the current branch _shell('git config --global user.email "[email protected]"') _shell('git config --global user.name "Circle CI"') _shell('git config push.default current') # Obtain the version to deploy ret = _shell('make version', stdout=subprocess.PIPE) version = ret.stdout.decode('utf-8').strip() print(f'Deploying version {version!r}...') # Tag the version _shell(f'git tag -f -a {version} -m "Version {version}"') # Update the version _shell( f'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py') # Create a standard distribution and a wheel _shell('python setup.py sdist bdist_wheel') # Add the updated ChangeLog and AUTHORS _shell('git add ChangeLog AUTHORS */version.py') # Start the commit message with "Merge" so that PBR will ignore it in the # ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy. _shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"') # Push the distributions to PyPI. _pypi_push('dist') # Push the tag and AUTHORS / ChangeLog after successful PyPI deploy _shell('git push --follow-tags') print(f'Deployment complete. Latest version is {version}.')
[ "def", "deploy", "(", "target", ")", ":", "# Ensure proper environment", "if", "not", "os", ".", "getenv", "(", "CIRCLECI_ENV_VAR", ")", ":", "# pragma: no cover", "raise", "EnvironmentError", "(", "'Must be on CircleCI to run this script'", ")", "current_branch", "=", "os", ".", "getenv", "(", "'CIRCLE_BRANCH'", ")", "if", "(", "target", "==", "'PROD'", ")", "and", "(", "current_branch", "!=", "'master'", ")", ":", "raise", "EnvironmentError", "(", "f'Refusing to deploy to production from branch {current_branch!r}. '", "f'Production deploys can only be made from master.'", ")", "if", "target", "in", "(", "'PROD'", ",", "'TEST'", ")", ":", "pypi_username", "=", "os", ".", "getenv", "(", "f'{target}_PYPI_USERNAME'", ")", "pypi_password", "=", "os", ".", "getenv", "(", "f'{target}_PYPI_PASSWORD'", ")", "else", ":", "raise", "ValueError", "(", "f\"Deploy target must be 'PROD' or 'TEST', got {target!r}.\"", ")", "if", "not", "(", "pypi_username", "and", "pypi_password", ")", ":", "# pragma: no cover", "raise", "EnvironmentError", "(", "f\"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' \"", "f\"environment variables. These are required to push to PyPI.\"", ")", "# Twine requires these environment variables to be set. Subprocesses will", "# inherit these when we invoke them, so no need to pass them on the command", "# line. We want to avoid that in case something's logging each command run.", "os", ".", "environ", "[", "'TWINE_USERNAME'", "]", "=", "pypi_username", "os", ".", "environ", "[", "'TWINE_PASSWORD'", "]", "=", "pypi_password", "# Set up git on circle to push to the current branch", "_shell", "(", "'git config --global user.email \"[email protected]\"'", ")", "_shell", "(", "'git config --global user.name \"Circle CI\"'", ")", "_shell", "(", "'git config push.default current'", ")", "# Obtain the version to deploy", "ret", "=", "_shell", "(", "'make version'", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "version", "=", "ret", ".", "stdout", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "print", "(", "f'Deploying version {version!r}...'", ")", "# Tag the version", "_shell", "(", "f'git tag -f -a {version} -m \"Version {version}\"'", ")", "# Update the version", "_shell", "(", "f'sed -i.bak \"s/^__version__ = .*/__version__ = {version!r}/\" */version.py'", ")", "# Create a standard distribution and a wheel", "_shell", "(", "'python setup.py sdist bdist_wheel'", ")", "# Add the updated ChangeLog and AUTHORS", "_shell", "(", "'git add ChangeLog AUTHORS */version.py'", ")", "# Start the commit message with \"Merge\" so that PBR will ignore it in the", "# ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy.", "_shell", "(", "'git commit --no-verify -m \"Merge autogenerated files [skip ci]\"'", ")", "# Push the distributions to PyPI.", "_pypi_push", "(", "'dist'", ")", "# Push the tag and AUTHORS / ChangeLog after successful PyPI deploy", "_shell", "(", "'git push --follow-tags'", ")", "print", "(", "f'Deployment complete. Latest version is {version}.'", ")" ]
Deploys the package and documentation. Proceeds in the following steps: 1. Ensures proper environment variables are set and checks that we are on Circle CI 2. Tags the repository with the new version 3. Creates a standard distribution and a wheel 4. Updates version.py to have the proper version 5. Commits the ChangeLog, AUTHORS, and version.py file 6. Pushes to PyPI 7. Pushes the tags and newly committed files Raises: `EnvironmentError`: - Not running on CircleCI - `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables are missing - Attempting to deploy to production from a branch that isn't master
[ "Deploys", "the", "package", "and", "documentation", "." ]
train
https://github.com/CloverHealth/pytest-pgsql/blob/a863ed4b652053e315dfa039d978b56f03664c07/deploy.py#L43-L124
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
_get_triplet
def _get_triplet(dd): """Return a triplet from a dialogue dictionary. :param dd: Dialogue dictionary. :type dd: Dict[str, str] :return: (query, response, error response) :rtype: (str, str | NoResponse, str | NoResponse) """ return _s(dd['q']), _s(dd.get('r', NoResponse)), _s(dd.get('e', NoResponse))
python
def _get_triplet(dd): """Return a triplet from a dialogue dictionary. :param dd: Dialogue dictionary. :type dd: Dict[str, str] :return: (query, response, error response) :rtype: (str, str | NoResponse, str | NoResponse) """ return _s(dd['q']), _s(dd.get('r', NoResponse)), _s(dd.get('e', NoResponse))
[ "def", "_get_triplet", "(", "dd", ")", ":", "return", "_s", "(", "dd", "[", "'q'", "]", ")", ",", "_s", "(", "dd", ".", "get", "(", "'r'", ",", "NoResponse", ")", ")", ",", "_s", "(", "dd", ".", "get", "(", "'e'", ",", "NoResponse", ")", ")" ]
Return a triplet from a dialogue dictionary. :param dd: Dialogue dictionary. :type dd: Dict[str, str] :return: (query, response, error response) :rtype: (str, str | NoResponse, str | NoResponse)
[ "Return", "a", "triplet", "from", "a", "dialogue", "dictionary", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L71-L79
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
_load
def _load(content_or_fp): """YAML Parse a file or str and check version. """ try: data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader) except Exception as e: raise type(e)('Malformed yaml file:\n%r' % format_exc()) try: ver = data['spec'] except: raise ValueError('The file does not specify a spec version') try: ver = tuple(map(int, (ver.split(".")))) except: raise ValueError("Invalid spec version format. Expect 'X.Y'" " (X and Y integers), found %s" % ver) if ver > SPEC_VERSION_TUPLE: raise ValueError('The spec version of the file is ' '%s but the parser is %s. ' 'Please update pyvisa-sim.' % (ver, SPEC_VERSION)) return data
python
def _load(content_or_fp): """YAML Parse a file or str and check version. """ try: data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader) except Exception as e: raise type(e)('Malformed yaml file:\n%r' % format_exc()) try: ver = data['spec'] except: raise ValueError('The file does not specify a spec version') try: ver = tuple(map(int, (ver.split(".")))) except: raise ValueError("Invalid spec version format. Expect 'X.Y'" " (X and Y integers), found %s" % ver) if ver > SPEC_VERSION_TUPLE: raise ValueError('The spec version of the file is ' '%s but the parser is %s. ' 'Please update pyvisa-sim.' % (ver, SPEC_VERSION)) return data
[ "def", "_load", "(", "content_or_fp", ")", ":", "try", ":", "data", "=", "yaml", ".", "load", "(", "content_or_fp", ",", "Loader", "=", "yaml", ".", "loader", ".", "BaseLoader", ")", "except", "Exception", "as", "e", ":", "raise", "type", "(", "e", ")", "(", "'Malformed yaml file:\\n%r'", "%", "format_exc", "(", ")", ")", "try", ":", "ver", "=", "data", "[", "'spec'", "]", "except", ":", "raise", "ValueError", "(", "'The file does not specify a spec version'", ")", "try", ":", "ver", "=", "tuple", "(", "map", "(", "int", ",", "(", "ver", ".", "split", "(", "\".\"", ")", ")", ")", ")", "except", ":", "raise", "ValueError", "(", "\"Invalid spec version format. Expect 'X.Y'\"", "\" (X and Y integers), found %s\"", "%", "ver", ")", "if", "ver", ">", "SPEC_VERSION_TUPLE", ":", "raise", "ValueError", "(", "'The spec version of the file is '", "'%s but the parser is %s. '", "'Please update pyvisa-sim.'", "%", "(", "ver", ",", "SPEC_VERSION", ")", ")", "return", "data" ]
YAML Parse a file or str and check version.
[ "YAML", "Parse", "a", "file", "or", "str", "and", "check", "version", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L82-L106
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
parse_resource
def parse_resource(name): """Parse a resource file """ with closing(pkg_resources.resource_stream(__name__, name)) as fp: rbytes = fp.read() return _load(StringIO(rbytes.decode('utf-8')))
python
def parse_resource(name): """Parse a resource file """ with closing(pkg_resources.resource_stream(__name__, name)) as fp: rbytes = fp.read() return _load(StringIO(rbytes.decode('utf-8')))
[ "def", "parse_resource", "(", "name", ")", ":", "with", "closing", "(", "pkg_resources", ".", "resource_stream", "(", "__name__", ",", "name", ")", ")", "as", "fp", ":", "rbytes", "=", "fp", ".", "read", "(", ")", "return", "_load", "(", "StringIO", "(", "rbytes", ".", "decode", "(", "'utf-8'", ")", ")", ")" ]
Parse a resource file
[ "Parse", "a", "resource", "file" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L109-L115
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
update_component
def update_component(name, comp, component_dict): """Get a component from a component dict. """ for dia in component_dict.get('dialogues', ()): try: comp.add_dialogue(*_get_pair(dia)) except Exception as e: msg = 'In device %s, malformed dialogue %s\n%r' raise Exception(msg % (name, dia, e)) for prop_name, prop_dict in component_dict.get('properties', {}).items(): try: getter = (_get_pair(prop_dict['getter']) if 'getter' in prop_dict else None) setter = (_get_triplet(prop_dict['setter']) if 'setter' in prop_dict else None) comp.add_property(prop_name, prop_dict.get('default', ''), getter, setter, prop_dict.get('specs', {})) except Exception as e: msg = 'In device %s, malformed property %s\n%r' raise type(e)(msg % (name, prop_name, format_exc()))
python
def update_component(name, comp, component_dict): """Get a component from a component dict. """ for dia in component_dict.get('dialogues', ()): try: comp.add_dialogue(*_get_pair(dia)) except Exception as e: msg = 'In device %s, malformed dialogue %s\n%r' raise Exception(msg % (name, dia, e)) for prop_name, prop_dict in component_dict.get('properties', {}).items(): try: getter = (_get_pair(prop_dict['getter']) if 'getter' in prop_dict else None) setter = (_get_triplet(prop_dict['setter']) if 'setter' in prop_dict else None) comp.add_property(prop_name, prop_dict.get('default', ''), getter, setter, prop_dict.get('specs', {})) except Exception as e: msg = 'In device %s, malformed property %s\n%r' raise type(e)(msg % (name, prop_name, format_exc()))
[ "def", "update_component", "(", "name", ",", "comp", ",", "component_dict", ")", ":", "for", "dia", "in", "component_dict", ".", "get", "(", "'dialogues'", ",", "(", ")", ")", ":", "try", ":", "comp", ".", "add_dialogue", "(", "*", "_get_pair", "(", "dia", ")", ")", "except", "Exception", "as", "e", ":", "msg", "=", "'In device %s, malformed dialogue %s\\n%r'", "raise", "Exception", "(", "msg", "%", "(", "name", ",", "dia", ",", "e", ")", ")", "for", "prop_name", ",", "prop_dict", "in", "component_dict", ".", "get", "(", "'properties'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "try", ":", "getter", "=", "(", "_get_pair", "(", "prop_dict", "[", "'getter'", "]", ")", "if", "'getter'", "in", "prop_dict", "else", "None", ")", "setter", "=", "(", "_get_triplet", "(", "prop_dict", "[", "'setter'", "]", ")", "if", "'setter'", "in", "prop_dict", "else", "None", ")", "comp", ".", "add_property", "(", "prop_name", ",", "prop_dict", ".", "get", "(", "'default'", ",", "''", ")", ",", "getter", ",", "setter", ",", "prop_dict", ".", "get", "(", "'specs'", ",", "{", "}", ")", ")", "except", "Exception", "as", "e", ":", "msg", "=", "'In device %s, malformed property %s\\n%r'", "raise", "type", "(", "e", ")", "(", "msg", "%", "(", "name", ",", "prop_name", ",", "format_exc", "(", ")", ")", ")" ]
Get a component from a component dict.
[ "Get", "a", "component", "from", "a", "component", "dict", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L126-L147
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
get_bases
def get_bases(definition_dict, loader): """Collect dependencies. """ bases = definition_dict.get('bases', ()) if bases: bases = (loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0], **b) for b in bases) return SimpleChainmap(definition_dict, *bases) else: return definition_dict
python
def get_bases(definition_dict, loader): """Collect dependencies. """ bases = definition_dict.get('bases', ()) if bases: bases = (loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0], **b) for b in bases) return SimpleChainmap(definition_dict, *bases) else: return definition_dict
[ "def", "get_bases", "(", "definition_dict", ",", "loader", ")", ":", "bases", "=", "definition_dict", ".", "get", "(", "'bases'", ",", "(", ")", ")", "if", "bases", ":", "bases", "=", "(", "loader", ".", "get_comp_dict", "(", "required_version", "=", "SPEC_VERSION_TUPLE", "[", "0", "]", ",", "*", "*", "b", ")", "for", "b", "in", "bases", ")", "return", "SimpleChainmap", "(", "definition_dict", ",", "*", "bases", ")", "else", ":", "return", "definition_dict" ]
Collect dependencies.
[ "Collect", "dependencies", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L150-L161
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
get_channel
def get_channel(device, ch_name, channel_dict, loader, resource_dict): """Get a channels from a channels dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ channel_dict = get_bases(channel_dict, loader) r_ids = resource_dict.get('channel_ids', {}).get(ch_name, []) ids = r_ids if r_ids else channel_dict.get('ids', {}) can_select = False if channel_dict.get('can_select') == 'False' else True channels = Channels(device, ids, can_select) update_component(ch_name, channels, channel_dict) return channels
python
def get_channel(device, ch_name, channel_dict, loader, resource_dict): """Get a channels from a channels dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ channel_dict = get_bases(channel_dict, loader) r_ids = resource_dict.get('channel_ids', {}).get(ch_name, []) ids = r_ids if r_ids else channel_dict.get('ids', {}) can_select = False if channel_dict.get('can_select') == 'False' else True channels = Channels(device, ids, can_select) update_component(ch_name, channels, channel_dict) return channels
[ "def", "get_channel", "(", "device", ",", "ch_name", ",", "channel_dict", ",", "loader", ",", "resource_dict", ")", ":", "channel_dict", "=", "get_bases", "(", "channel_dict", ",", "loader", ")", "r_ids", "=", "resource_dict", ".", "get", "(", "'channel_ids'", ",", "{", "}", ")", ".", "get", "(", "ch_name", ",", "[", "]", ")", "ids", "=", "r_ids", "if", "r_ids", "else", "channel_dict", ".", "get", "(", "'ids'", ",", "{", "}", ")", "can_select", "=", "False", "if", "channel_dict", ".", "get", "(", "'can_select'", ")", "==", "'False'", "else", "True", "channels", "=", "Channels", "(", "device", ",", "ids", ",", "can_select", ")", "update_component", "(", "ch_name", ",", "channels", ",", "channel_dict", ")", "return", "channels" ]
Get a channels from a channels dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device
[ "Get", "a", "channels", "from", "a", "channels", "dictionary", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L164-L181
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
get_device
def get_device(name, device_dict, loader, resource_dict): """Get a device from a device dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ device = Device(name, device_dict.get('delimiter', ';').encode('utf-8')) device_dict = get_bases(device_dict, loader) err = device_dict.get('error', {}) device.add_error_handler(err) for itype, eom_dict in device_dict.get('eom', {}).items(): device.add_eom(itype, *_get_pair(eom_dict)) update_component(name, device, device_dict) for ch_name, ch_dict in device_dict.get('channels', {}).items(): device.add_channels(ch_name, get_channel(device, ch_name, ch_dict, loader, resource_dict)) return device
python
def get_device(name, device_dict, loader, resource_dict): """Get a device from a device dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ device = Device(name, device_dict.get('delimiter', ';').encode('utf-8')) device_dict = get_bases(device_dict, loader) err = device_dict.get('error', {}) device.add_error_handler(err) for itype, eom_dict in device_dict.get('eom', {}).items(): device.add_eom(itype, *_get_pair(eom_dict)) update_component(name, device, device_dict) for ch_name, ch_dict in device_dict.get('channels', {}).items(): device.add_channels(ch_name, get_channel(device, ch_name, ch_dict, loader, resource_dict)) return device
[ "def", "get_device", "(", "name", ",", "device_dict", ",", "loader", ",", "resource_dict", ")", ":", "device", "=", "Device", "(", "name", ",", "device_dict", ".", "get", "(", "'delimiter'", ",", "';'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "device_dict", "=", "get_bases", "(", "device_dict", ",", "loader", ")", "err", "=", "device_dict", ".", "get", "(", "'error'", ",", "{", "}", ")", "device", ".", "add_error_handler", "(", "err", ")", "for", "itype", ",", "eom_dict", "in", "device_dict", ".", "get", "(", "'eom'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "device", ".", "add_eom", "(", "itype", ",", "*", "_get_pair", "(", "eom_dict", ")", ")", "update_component", "(", "name", ",", "device", ",", "device_dict", ")", "for", "ch_name", ",", "ch_dict", "in", "device_dict", ".", "get", "(", "'channels'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "device", ".", "add_channels", "(", "ch_name", ",", "get_channel", "(", "device", ",", "ch_name", ",", "ch_dict", ",", "loader", ",", "resource_dict", ")", ")", "return", "device" ]
Get a device from a device dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device
[ "Get", "a", "device", "from", "a", "device", "dictionary", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L184-L207
pyvisa/pyvisa-sim
pyvisa-sim/parser.py
get_devices
def get_devices(filename, bundled): """Get a Devices object from a file. :param filename: full path of the file to parse or name of the resource. :param is_resource: boolean indicating if it is a resource. :rtype: Devices """ loader = Loader(filename, bundled) data = loader.data devices = Devices() # Iterate through the resources and generate each individual device # on demand. for resource_name, resource_dict in data.get('resources', {}).items(): device_name = resource_dict['device'] dd = loader.get_device_dict(device_name, resource_dict.get('filename', None), resource_dict.get('bundled', False), SPEC_VERSION_TUPLE[0]) devices.add_device(resource_name, get_device(device_name, dd, loader, resource_dict)) return devices
python
def get_devices(filename, bundled): """Get a Devices object from a file. :param filename: full path of the file to parse or name of the resource. :param is_resource: boolean indicating if it is a resource. :rtype: Devices """ loader = Loader(filename, bundled) data = loader.data devices = Devices() # Iterate through the resources and generate each individual device # on demand. for resource_name, resource_dict in data.get('resources', {}).items(): device_name = resource_dict['device'] dd = loader.get_device_dict(device_name, resource_dict.get('filename', None), resource_dict.get('bundled', False), SPEC_VERSION_TUPLE[0]) devices.add_device(resource_name, get_device(device_name, dd, loader, resource_dict)) return devices
[ "def", "get_devices", "(", "filename", ",", "bundled", ")", ":", "loader", "=", "Loader", "(", "filename", ",", "bundled", ")", "data", "=", "loader", ".", "data", "devices", "=", "Devices", "(", ")", "# Iterate through the resources and generate each individual device", "# on demand.", "for", "resource_name", ",", "resource_dict", "in", "data", ".", "get", "(", "'resources'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "device_name", "=", "resource_dict", "[", "'device'", "]", "dd", "=", "loader", ".", "get_device_dict", "(", "device_name", ",", "resource_dict", ".", "get", "(", "'filename'", ",", "None", ")", ",", "resource_dict", ".", "get", "(", "'bundled'", ",", "False", ")", ",", "SPEC_VERSION_TUPLE", "[", "0", "]", ")", "devices", ".", "add_device", "(", "resource_name", ",", "get_device", "(", "device_name", ",", "dd", ",", "loader", ",", "resource_dict", ")", ")", "return", "devices" ]
Get a Devices object from a file. :param filename: full path of the file to parse or name of the resource. :param is_resource: boolean indicating if it is a resource. :rtype: Devices
[ "Get", "a", "Devices", "object", "from", "a", "file", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L270-L298
pyvisa/pyvisa-sim
pyvisa-sim/channels.py
ChannelProperty.init_value
def init_value(self, string_value): """Create an empty defaultdict holding the default value. """ value = self.validate_value(string_value) self._value = defaultdict(lambda: value)
python
def init_value(self, string_value): """Create an empty defaultdict holding the default value. """ value = self.validate_value(string_value) self._value = defaultdict(lambda: value)
[ "def", "init_value", "(", "self", ",", "string_value", ")", ":", "value", "=", "self", ".", "validate_value", "(", "string_value", ")", "self", ".", "_value", "=", "defaultdict", "(", "lambda", ":", "value", ")" ]
Create an empty defaultdict holding the default value.
[ "Create", "an", "empty", "defaultdict", "holding", "the", "default", "value", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L30-L35
pyvisa/pyvisa-sim
pyvisa-sim/channels.py
ChannelProperty.set_value
def set_value(self, string_value): """Set the current value for a channel. """ value = self.validate_value(string_value) self._value[self._channel._selected] = value
python
def set_value(self, string_value): """Set the current value for a channel. """ value = self.validate_value(string_value) self._value[self._channel._selected] = value
[ "def", "set_value", "(", "self", ",", "string_value", ")", ":", "value", "=", "self", ".", "validate_value", "(", "string_value", ")", "self", ".", "_value", "[", "self", ".", "_channel", ".", "_selected", "]", "=", "value" ]
Set the current value for a channel.
[ "Set", "the", "current", "value", "for", "a", "channel", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L43-L48
pyvisa/pyvisa-sim
pyvisa-sim/channels.py
Channels.add_dialogue
def add_dialogue(self, query, response): """Add dialogue to channel. :param query: query string :param response: response string """ self._dialogues['__default__'][to_bytes(query)] = to_bytes(response)
python
def add_dialogue(self, query, response): """Add dialogue to channel. :param query: query string :param response: response string """ self._dialogues['__default__'][to_bytes(query)] = to_bytes(response)
[ "def", "add_dialogue", "(", "self", ",", "query", ",", "response", ")", ":", "self", ".", "_dialogues", "[", "'__default__'", "]", "[", "to_bytes", "(", "query", ")", "]", "=", "to_bytes", "(", "response", ")" ]
Add dialogue to channel. :param query: query string :param response: response string
[ "Add", "dialogue", "to", "channel", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L93-L99
pyvisa/pyvisa-sim
pyvisa-sim/channels.py
Channels.add_property
def add_property(self, name, default_value, getter_pair, setter_triplet, specs): """Add property to channel :param name: property name :param default_value: default value as string :param getter_pair: (query, response) :param setter_triplet: (query, response, error) :param specs: specification of the Property """ self._properties[name] = ChannelProperty(self, name, default_value, specs) if getter_pair: query, response = getter_pair self._getters['__default__'][to_bytes(query)] = name, response if setter_triplet: query, response, error = setter_triplet self._setters.append((name, stringparser.Parser(query), to_bytes(response), to_bytes(error)))
python
def add_property(self, name, default_value, getter_pair, setter_triplet, specs): """Add property to channel :param name: property name :param default_value: default value as string :param getter_pair: (query, response) :param setter_triplet: (query, response, error) :param specs: specification of the Property """ self._properties[name] = ChannelProperty(self, name, default_value, specs) if getter_pair: query, response = getter_pair self._getters['__default__'][to_bytes(query)] = name, response if setter_triplet: query, response, error = setter_triplet self._setters.append((name, stringparser.Parser(query), to_bytes(response), to_bytes(error)))
[ "def", "add_property", "(", "self", ",", "name", ",", "default_value", ",", "getter_pair", ",", "setter_triplet", ",", "specs", ")", ":", "self", ".", "_properties", "[", "name", "]", "=", "ChannelProperty", "(", "self", ",", "name", ",", "default_value", ",", "specs", ")", "if", "getter_pair", ":", "query", ",", "response", "=", "getter_pair", "self", ".", "_getters", "[", "'__default__'", "]", "[", "to_bytes", "(", "query", ")", "]", "=", "name", ",", "response", "if", "setter_triplet", ":", "query", ",", "response", ",", "error", "=", "setter_triplet", "self", ".", "_setters", ".", "append", "(", "(", "name", ",", "stringparser", ".", "Parser", "(", "query", ")", ",", "to_bytes", "(", "response", ")", ",", "to_bytes", "(", "error", ")", ")", ")" ]
Add property to channel :param name: property name :param default_value: default value as string :param getter_pair: (query, response) :param setter_triplet: (query, response, error) :param specs: specification of the Property
[ "Add", "property", "to", "channel" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L101-L123
pyvisa/pyvisa-sim
pyvisa-sim/channels.py
Channels.match
def match(self, query): """Try to find a match for a query in the channel commands. """ if not self.can_select: ch_id = self._device._properties['selected_channel'].get_value() if ch_id in self._ids: self._selected = ch_id else: return response = self._match_dialog(query, self._dialogues['__default__']) if response is not None: return response response = self._match_getters(query, self._getters['__default__']) if response is not None: return response else: for ch_id in self._ids: self._selected = ch_id response = self._match_dialog(query, self._dialogues[ch_id]) if response is not None: return response response = self._match_getters(query, self._getters[ch_id]) if response is not None: return response return self._match_setters(query)
python
def match(self, query): """Try to find a match for a query in the channel commands. """ if not self.can_select: ch_id = self._device._properties['selected_channel'].get_value() if ch_id in self._ids: self._selected = ch_id else: return response = self._match_dialog(query, self._dialogues['__default__']) if response is not None: return response response = self._match_getters(query, self._getters['__default__']) if response is not None: return response else: for ch_id in self._ids: self._selected = ch_id response = self._match_dialog(query, self._dialogues[ch_id]) if response is not None: return response response = self._match_getters(query, self._getters[ch_id]) if response is not None: return response return self._match_setters(query)
[ "def", "match", "(", "self", ",", "query", ")", ":", "if", "not", "self", ".", "can_select", ":", "ch_id", "=", "self", ".", "_device", ".", "_properties", "[", "'selected_channel'", "]", ".", "get_value", "(", ")", "if", "ch_id", "in", "self", ".", "_ids", ":", "self", ".", "_selected", "=", "ch_id", "else", ":", "return", "response", "=", "self", ".", "_match_dialog", "(", "query", ",", "self", ".", "_dialogues", "[", "'__default__'", "]", ")", "if", "response", "is", "not", "None", ":", "return", "response", "response", "=", "self", ".", "_match_getters", "(", "query", ",", "self", ".", "_getters", "[", "'__default__'", "]", ")", "if", "response", "is", "not", "None", ":", "return", "response", "else", ":", "for", "ch_id", "in", "self", ".", "_ids", ":", "self", ".", "_selected", "=", "ch_id", "response", "=", "self", ".", "_match_dialog", "(", "query", ",", "self", ".", "_dialogues", "[", "ch_id", "]", ")", "if", "response", "is", "not", "None", ":", "return", "response", "response", "=", "self", ".", "_match_getters", "(", "query", ",", "self", ".", "_getters", "[", "ch_id", "]", ")", "if", "response", "is", "not", "None", ":", "return", "response", "return", "self", ".", "_match_setters", "(", "query", ")" ]
Try to find a match for a query in the channel commands.
[ "Try", "to", "find", "a", "match", "for", "a", "query", "in", "the", "channel", "commands", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L125-L160
pyvisa/pyvisa-sim
pyvisa-sim/channels.py
Channels._match_setters
def _match_setters(self, query): """Try to find a match """ q = query.decode('utf-8') for name, parser, response, error_response in self._setters: try: parsed = parser(q) logger.debug('Found response in setter of %s' % name) except ValueError: continue try: if isinstance(parsed, dict) and 'ch_id' in parsed: self._selected = parsed['ch_id'] self._properties[name].set_value(parsed['0']) else: self._properties[name].set_value(parsed) return response except ValueError: if isinstance(error_response, bytes): return error_response return self._device.error_response('command_error') return None
python
def _match_setters(self, query): """Try to find a match """ q = query.decode('utf-8') for name, parser, response, error_response in self._setters: try: parsed = parser(q) logger.debug('Found response in setter of %s' % name) except ValueError: continue try: if isinstance(parsed, dict) and 'ch_id' in parsed: self._selected = parsed['ch_id'] self._properties[name].set_value(parsed['0']) else: self._properties[name].set_value(parsed) return response except ValueError: if isinstance(error_response, bytes): return error_response return self._device.error_response('command_error') return None
[ "def", "_match_setters", "(", "self", ",", "query", ")", ":", "q", "=", "query", ".", "decode", "(", "'utf-8'", ")", "for", "name", ",", "parser", ",", "response", ",", "error_response", "in", "self", ".", "_setters", ":", "try", ":", "parsed", "=", "parser", "(", "q", ")", "logger", ".", "debug", "(", "'Found response in setter of %s'", "%", "name", ")", "except", "ValueError", ":", "continue", "try", ":", "if", "isinstance", "(", "parsed", ",", "dict", ")", "and", "'ch_id'", "in", "parsed", ":", "self", ".", "_selected", "=", "parsed", "[", "'ch_id'", "]", "self", ".", "_properties", "[", "name", "]", ".", "set_value", "(", "parsed", "[", "'0'", "]", ")", "else", ":", "self", ".", "_properties", "[", "name", "]", ".", "set_value", "(", "parsed", ")", "return", "response", "except", "ValueError", ":", "if", "isinstance", "(", "error_response", ",", "bytes", ")", ":", "return", "error_response", "return", "self", ".", "_device", ".", "error_response", "(", "'command_error'", ")", "return", "None" ]
Try to find a match
[ "Try", "to", "find", "a", "match" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L162-L185
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
SimVisaLibrary.get_debug_info
def get_debug_info(): """Return a list of lines with backend info. """ from . import __version__ from .parser import SPEC_VERSION d = OrderedDict() d['Version'] = '%s' % __version__ d['Spec version'] = SPEC_VERSION return d
python
def get_debug_info(): """Return a list of lines with backend info. """ from . import __version__ from .parser import SPEC_VERSION d = OrderedDict() d['Version'] = '%s' % __version__ d['Spec version'] = SPEC_VERSION return d
[ "def", "get_debug_info", "(", ")", ":", "from", ".", "import", "__version__", "from", ".", "parser", "import", "SPEC_VERSION", "d", "=", "OrderedDict", "(", ")", "d", "[", "'Version'", "]", "=", "'%s'", "%", "__version__", "d", "[", "'Spec version'", "]", "=", "SPEC_VERSION", "return", "d" ]
Return a list of lines with backend info.
[ "Return", "a", "list", "of", "lines", "with", "backend", "info", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L44-L53
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
SimVisaLibrary.open
def open(self, session, resource_name, access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE): """Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. :param session: Resource Manager session (should always be a session returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes) :param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. :return: Unique logical identifier reference to a session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode` """ try: open_timeout = int(open_timeout) except ValueError: raise ValueError('open_timeout (%r) must be an integer (or compatible type)' % open_timeout) try: parsed = rname.parse_resource_name(resource_name) except rname.InvalidResourceName: return 0, constants.StatusCode.error_invalid_resource_name # Loops through all session types, tries to parse the resource name and if ok, open it. cls = sessions.Session.get_session_class(parsed.interface_type_const, parsed.resource_class) sess = cls(session, resource_name, parsed) try: sess.device = self.devices[sess.attrs[constants.VI_ATTR_RSRC_NAME]] except KeyError: return 0, constants.StatusCode.error_resource_not_found return self._register(sess), constants.StatusCode.success
python
def open(self, session, resource_name, access_mode=constants.AccessModes.no_lock, open_timeout=constants.VI_TMO_IMMEDIATE): """Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. :param session: Resource Manager session (should always be a session returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes) :param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. :return: Unique logical identifier reference to a session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode` """ try: open_timeout = int(open_timeout) except ValueError: raise ValueError('open_timeout (%r) must be an integer (or compatible type)' % open_timeout) try: parsed = rname.parse_resource_name(resource_name) except rname.InvalidResourceName: return 0, constants.StatusCode.error_invalid_resource_name # Loops through all session types, tries to parse the resource name and if ok, open it. cls = sessions.Session.get_session_class(parsed.interface_type_const, parsed.resource_class) sess = cls(session, resource_name, parsed) try: sess.device = self.devices[sess.attrs[constants.VI_ATTR_RSRC_NAME]] except KeyError: return 0, constants.StatusCode.error_resource_not_found return self._register(sess), constants.StatusCode.success
[ "def", "open", "(", "self", ",", "session", ",", "resource_name", ",", "access_mode", "=", "constants", ".", "AccessModes", ".", "no_lock", ",", "open_timeout", "=", "constants", ".", "VI_TMO_IMMEDIATE", ")", ":", "try", ":", "open_timeout", "=", "int", "(", "open_timeout", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'open_timeout (%r) must be an integer (or compatible type)'", "%", "open_timeout", ")", "try", ":", "parsed", "=", "rname", ".", "parse_resource_name", "(", "resource_name", ")", "except", "rname", ".", "InvalidResourceName", ":", "return", "0", ",", "constants", ".", "StatusCode", ".", "error_invalid_resource_name", "# Loops through all session types, tries to parse the resource name and if ok, open it.", "cls", "=", "sessions", ".", "Session", ".", "get_session_class", "(", "parsed", ".", "interface_type_const", ",", "parsed", ".", "resource_class", ")", "sess", "=", "cls", "(", "session", ",", "resource_name", ",", "parsed", ")", "try", ":", "sess", ".", "device", "=", "self", ".", "devices", "[", "sess", ".", "attrs", "[", "constants", ".", "VI_ATTR_RSRC_NAME", "]", "]", "except", "KeyError", ":", "return", "0", ",", "constants", ".", "StatusCode", ".", "error_resource_not_found", "return", "self", ".", "_register", "(", "sess", ")", ",", "constants", ".", "StatusCode", ".", "success" ]
Opens a session to the specified resource. Corresponds to viOpen function of the VISA library. :param session: Resource Manager session (should always be a session returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :param access_mode: Specifies the mode by which the resource is to be accessed. (constants.AccessModes) :param open_timeout: Specifies the maximum time period (in milliseconds) that this operation waits before returning an error. :return: Unique logical identifier reference to a session, return value of the library call. :rtype: session, :class:`pyvisa.constants.StatusCode`
[ "Opens", "a", "session", "to", "the", "specified", "resource", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L87-L124
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
SimVisaLibrary.close
def close(self, session): """Closes the specified session, event, or find list. Corresponds to viClose function of the VISA library. :param session: Unique logical identifier to a session, event, or find list. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ try: del self.sessions[session] return constants.StatusCode.success except KeyError: return constants.StatusCode.error_invalid_object
python
def close(self, session): """Closes the specified session, event, or find list. Corresponds to viClose function of the VISA library. :param session: Unique logical identifier to a session, event, or find list. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ try: del self.sessions[session] return constants.StatusCode.success except KeyError: return constants.StatusCode.error_invalid_object
[ "def", "close", "(", "self", ",", "session", ")", ":", "try", ":", "del", "self", ".", "sessions", "[", "session", "]", "return", "constants", ".", "StatusCode", ".", "success", "except", "KeyError", ":", "return", "constants", ".", "StatusCode", ".", "error_invalid_object" ]
Closes the specified session, event, or find list. Corresponds to viClose function of the VISA library. :param session: Unique logical identifier to a session, event, or find list. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode`
[ "Closes", "the", "specified", "session", "event", "or", "find", "list", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L126-L139
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
SimVisaLibrary.list_resources
def list_resources(self, session, query='?*::INSTR'): """Returns a tuple of all connected devices matching query. :param query: regular expression used to match devices. """ # For each session type, ask for the list of connected resources and merge them into a single list. resources = self.devices.list_resources() resources = rname.filter(resources, query) if resources: return resources raise errors.VisaIOError(errors.StatusCode.error_resource_not_found.value)
python
def list_resources(self, session, query='?*::INSTR'): """Returns a tuple of all connected devices matching query. :param query: regular expression used to match devices. """ # For each session type, ask for the list of connected resources and merge them into a single list. resources = self.devices.list_resources() resources = rname.filter(resources, query) if resources: return resources raise errors.VisaIOError(errors.StatusCode.error_resource_not_found.value)
[ "def", "list_resources", "(", "self", ",", "session", ",", "query", "=", "'?*::INSTR'", ")", ":", "# For each session type, ask for the list of connected resources and merge them into a single list.", "resources", "=", "self", ".", "devices", ".", "list_resources", "(", ")", "resources", "=", "rname", ".", "filter", "(", "resources", ",", "query", ")", "if", "resources", ":", "return", "resources", "raise", "errors", ".", "VisaIOError", "(", "errors", ".", "StatusCode", ".", "error_resource_not_found", ".", "value", ")" ]
Returns a tuple of all connected devices matching query. :param query: regular expression used to match devices.
[ "Returns", "a", "tuple", "of", "all", "connected", "devices", "matching", "query", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L151-L166
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
SimVisaLibrary.read
def read(self, session, count): """Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. :param session: Unique logical identifier to a session. :param count: Number of bytes to be read. :return: data read, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode` """ try: sess = self.sessions[session] except KeyError: return b'', constants.StatusCode.error_invalid_object try: chunk, status = sess.read(count) if status == constants.StatusCode.error_timeout: raise errors.VisaIOError(constants.VI_ERROR_TMO) return chunk, status except AttributeError: return b'', constants.StatusCode.error_nonsupported_operation
python
def read(self, session, count): """Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. :param session: Unique logical identifier to a session. :param count: Number of bytes to be read. :return: data read, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode` """ try: sess = self.sessions[session] except KeyError: return b'', constants.StatusCode.error_invalid_object try: chunk, status = sess.read(count) if status == constants.StatusCode.error_timeout: raise errors.VisaIOError(constants.VI_ERROR_TMO) return chunk, status except AttributeError: return b'', constants.StatusCode.error_nonsupported_operation
[ "def", "read", "(", "self", ",", "session", ",", "count", ")", ":", "try", ":", "sess", "=", "self", ".", "sessions", "[", "session", "]", "except", "KeyError", ":", "return", "b''", ",", "constants", ".", "StatusCode", ".", "error_invalid_object", "try", ":", "chunk", ",", "status", "=", "sess", ".", "read", "(", "count", ")", "if", "status", "==", "constants", ".", "StatusCode", ".", "error_timeout", ":", "raise", "errors", ".", "VisaIOError", "(", "constants", ".", "VI_ERROR_TMO", ")", "return", "chunk", ",", "status", "except", "AttributeError", ":", "return", "b''", ",", "constants", ".", "StatusCode", ".", "error_nonsupported_operation" ]
Reads data from device or interface synchronously. Corresponds to viRead function of the VISA library. :param session: Unique logical identifier to a session. :param count: Number of bytes to be read. :return: data read, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode`
[ "Reads", "data", "from", "device", "or", "interface", "synchronously", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L168-L190
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
SimVisaLibrary.write
def write(self, session, data): """Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param session: Unique logical identifier to a session. :param data: data to be written. :type data: str :return: Number of bytes actually transferred, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ try: sess = self.sessions[session] except KeyError: return constants.StatusCode.error_invalid_object try: return sess.write(data) except AttributeError: return constants.StatusCode.error_nonsupported_operation
python
def write(self, session, data): """Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param session: Unique logical identifier to a session. :param data: data to be written. :type data: str :return: Number of bytes actually transferred, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ try: sess = self.sessions[session] except KeyError: return constants.StatusCode.error_invalid_object try: return sess.write(data) except AttributeError: return constants.StatusCode.error_nonsupported_operation
[ "def", "write", "(", "self", ",", "session", ",", "data", ")", ":", "try", ":", "sess", "=", "self", ".", "sessions", "[", "session", "]", "except", "KeyError", ":", "return", "constants", ".", "StatusCode", ".", "error_invalid_object", "try", ":", "return", "sess", ".", "write", "(", "data", ")", "except", "AttributeError", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_operation" ]
Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param session: Unique logical identifier to a session. :param data: data to be written. :type data: str :return: Number of bytes actually transferred, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode`
[ "Writes", "data", "to", "device", "or", "interface", "synchronously", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L192-L212
pyvisa/pyvisa-sim
pyvisa-sim/sessions.py
Session.get_session_class
def get_session_class(cls, interface_type, resource_class): """Return the session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str :return: Session """ try: return cls._session_classes[(interface_type, resource_class)] except KeyError: raise ValueError('No class registered for %s, %s' % (interface_type, resource_class))
python
def get_session_class(cls, interface_type, resource_class): """Return the session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str :return: Session """ try: return cls._session_classes[(interface_type, resource_class)] except KeyError: raise ValueError('No class registered for %s, %s' % (interface_type, resource_class))
[ "def", "get_session_class", "(", "cls", ",", "interface_type", ",", "resource_class", ")", ":", "try", ":", "return", "cls", ".", "_session_classes", "[", "(", "interface_type", ",", "resource_class", ")", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'No class registered for %s, %s'", "%", "(", "interface_type", ",", "resource_class", ")", ")" ]
Return the session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str :return: Session
[ "Return", "the", "session", "class", "for", "a", "given", "interface", "type", "and", "resource", "class", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/sessions.py#L42-L52
pyvisa/pyvisa-sim
pyvisa-sim/sessions.py
Session.register
def register(cls, interface_type, resource_class): """Register a session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str """ def _internal(python_class): if (interface_type, resource_class) in cls._session_classes: logger.warning('%s is already registered in the ResourceManager. ' 'Overwriting with %s' % ((interface_type, resource_class), python_class)) python_class.session_type = (interface_type, resource_class) cls._session_classes[(interface_type, resource_class)] = python_class return python_class return _internal
python
def register(cls, interface_type, resource_class): """Register a session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str """ def _internal(python_class): if (interface_type, resource_class) in cls._session_classes: logger.warning('%s is already registered in the ResourceManager. ' 'Overwriting with %s' % ((interface_type, resource_class), python_class)) python_class.session_type = (interface_type, resource_class) cls._session_classes[(interface_type, resource_class)] = python_class return python_class return _internal
[ "def", "register", "(", "cls", ",", "interface_type", ",", "resource_class", ")", ":", "def", "_internal", "(", "python_class", ")", ":", "if", "(", "interface_type", ",", "resource_class", ")", "in", "cls", ".", "_session_classes", ":", "logger", ".", "warning", "(", "'%s is already registered in the ResourceManager. '", "'Overwriting with %s'", "%", "(", "(", "interface_type", ",", "resource_class", ")", ",", "python_class", ")", ")", "python_class", ".", "session_type", "=", "(", "interface_type", ",", "resource_class", ")", "cls", ".", "_session_classes", "[", "(", "interface_type", ",", "resource_class", ")", "]", "=", "python_class", "return", "python_class", "return", "_internal" ]
Register a session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str
[ "Register", "a", "session", "class", "for", "a", "given", "interface", "type", "and", "resource", "class", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/sessions.py#L55-L69
pyvisa/pyvisa-sim
pyvisa-sim/sessions.py
Session.get_attribute
def get_attribute(self, attribute): """Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode """ # Check that the attribute exists. try: attr = attributes.AttributesByID[attribute] except KeyError: return 0, constants.StatusCode.error_nonsupported_attribute # Check that the attribute is valid for this session type. if not attr.in_resource(self.session_type): return 0, constants.StatusCode.error_nonsupported_attribute # Check that the attribute is readable. if not attr.read: raise Exception('Do not now how to handle write only attributes.') # Return the current value of the default according the VISA spec return self.attrs.setdefault(attribute, attr.default), constants.StatusCode.success
python
def get_attribute(self, attribute): """Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode """ # Check that the attribute exists. try: attr = attributes.AttributesByID[attribute] except KeyError: return 0, constants.StatusCode.error_nonsupported_attribute # Check that the attribute is valid for this session type. if not attr.in_resource(self.session_type): return 0, constants.StatusCode.error_nonsupported_attribute # Check that the attribute is readable. if not attr.read: raise Exception('Do not now how to handle write only attributes.') # Return the current value of the default according the VISA spec return self.attrs.setdefault(attribute, attr.default), constants.StatusCode.success
[ "def", "get_attribute", "(", "self", ",", "attribute", ")", ":", "# Check that the attribute exists.", "try", ":", "attr", "=", "attributes", ".", "AttributesByID", "[", "attribute", "]", "except", "KeyError", ":", "return", "0", ",", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute", "# Check that the attribute is valid for this session type.", "if", "not", "attr", ".", "in_resource", "(", "self", ".", "session_type", ")", ":", "return", "0", ",", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute", "# Check that the attribute is readable.", "if", "not", "attr", ".", "read", ":", "raise", "Exception", "(", "'Do not now how to handle write only attributes.'", ")", "# Return the current value of the default according the VISA spec", "return", "self", ".", "attrs", ".", "setdefault", "(", "attribute", ",", "attr", ".", "default", ")", ",", "constants", ".", "StatusCode", ".", "success" ]
Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode
[ "Get", "an", "attribute", "from", "the", "session", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/sessions.py#L90-L113
pyvisa/pyvisa-sim
pyvisa-sim/sessions.py
Session.set_attribute
def set_attribute(self, attribute, attribute_state): """Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode """ # Check that the attribute exists. try: attr = attributes.AttributesByID[attribute] except KeyError: return constants.StatusCode.error_nonsupported_attribute # Check that the attribute is valid for this session type. if not attr.in_resource(self.session_type): return constants.StatusCode.error_nonsupported_attribute # Check that the attribute is writable. if not attr.write: return constants.StatusCode.error_attribute_read_only try: self.attrs[attribute] = attribute_state except ValueError: return constants.StatusCode.error_nonsupported_attribute_state return constants.StatusCode.success
python
def set_attribute(self, attribute, attribute_state): """Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode """ # Check that the attribute exists. try: attr = attributes.AttributesByID[attribute] except KeyError: return constants.StatusCode.error_nonsupported_attribute # Check that the attribute is valid for this session type. if not attr.in_resource(self.session_type): return constants.StatusCode.error_nonsupported_attribute # Check that the attribute is writable. if not attr.write: return constants.StatusCode.error_attribute_read_only try: self.attrs[attribute] = attribute_state except ValueError: return constants.StatusCode.error_nonsupported_attribute_state return constants.StatusCode.success
[ "def", "set_attribute", "(", "self", ",", "attribute", ",", "attribute_state", ")", ":", "# Check that the attribute exists.", "try", ":", "attr", "=", "attributes", ".", "AttributesByID", "[", "attribute", "]", "except", "KeyError", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute", "# Check that the attribute is valid for this session type.", "if", "not", "attr", ".", "in_resource", "(", "self", ".", "session_type", ")", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute", "# Check that the attribute is writable.", "if", "not", "attr", ".", "write", ":", "return", "constants", ".", "StatusCode", ".", "error_attribute_read_only", "try", ":", "self", ".", "attrs", "[", "attribute", "]", "=", "attribute_state", "except", "ValueError", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute_state", "return", "constants", ".", "StatusCode", ".", "success" ]
Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode
[ "Get", "an", "attribute", "from", "the", "session", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/sessions.py#L115-L142
pyvisa/pyvisa-sim
pyvisa-sim/component.py
to_bytes
def to_bytes(val): """Takes a text message and return a tuple """ if val is NoResponse: return val val = val.replace('\\r', '\r').replace('\\n', '\n') return val.encode()
python
def to_bytes(val): """Takes a text message and return a tuple """ if val is NoResponse: return val val = val.replace('\\r', '\r').replace('\\n', '\n') return val.encode()
[ "def", "to_bytes", "(", "val", ")", ":", "if", "val", "is", "NoResponse", ":", "return", "val", "val", "=", "val", ".", "replace", "(", "'\\\\r'", ",", "'\\r'", ")", ".", "replace", "(", "'\\\\n'", ",", "'\\n'", ")", "return", "val", ".", "encode", "(", ")" ]
Takes a text message and return a tuple
[ "Takes", "a", "text", "message", "and", "return", "a", "tuple" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L16-L22
pyvisa/pyvisa-sim
pyvisa-sim/component.py
Property.validate_value
def validate_value(self, string_value): """Validate that a value match the Property specs. """ specs = self.specs if 'type' in specs: value = specs['type'](string_value) else: value = string_value if 'min' in specs and value < specs['min']: raise ValueError if 'max' in specs and value > specs['max']: raise ValueError if 'valid' in specs and value not in specs['valid']: raise ValueError return value
python
def validate_value(self, string_value): """Validate that a value match the Property specs. """ specs = self.specs if 'type' in specs: value = specs['type'](string_value) else: value = string_value if 'min' in specs and value < specs['min']: raise ValueError if 'max' in specs and value > specs['max']: raise ValueError if 'valid' in specs and value not in specs['valid']: raise ValueError return value
[ "def", "validate_value", "(", "self", ",", "string_value", ")", ":", "specs", "=", "self", ".", "specs", "if", "'type'", "in", "specs", ":", "value", "=", "specs", "[", "'type'", "]", "(", "string_value", ")", "else", ":", "value", "=", "string_value", "if", "'min'", "in", "specs", "and", "value", "<", "specs", "[", "'min'", "]", ":", "raise", "ValueError", "if", "'max'", "in", "specs", "and", "value", ">", "specs", "[", "'max'", "]", ":", "raise", "ValueError", "if", "'valid'", "in", "specs", "and", "value", "not", "in", "specs", "[", "'valid'", "]", ":", "raise", "ValueError", "return", "value" ]
Validate that a value match the Property specs.
[ "Validate", "that", "a", "value", "match", "the", "Property", "specs", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L77-L92
pyvisa/pyvisa-sim
pyvisa-sim/component.py
Component._match_dialog
def _match_dialog(self, query, dialogues=None): """Tries to match in dialogues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if dialogues is None: dialogues = self._dialogues # Try to match in the queries if query in dialogues: response = dialogues[query] logger.debug('Found response in queries: %s' % repr(response)) return response
python
def _match_dialog(self, query, dialogues=None): """Tries to match in dialogues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if dialogues is None: dialogues = self._dialogues # Try to match in the queries if query in dialogues: response = dialogues[query] logger.debug('Found response in queries: %s' % repr(response)) return response
[ "def", "_match_dialog", "(", "self", ",", "query", ",", "dialogues", "=", "None", ")", ":", "if", "dialogues", "is", "None", ":", "dialogues", "=", "self", ".", "_dialogues", "# Try to match in the queries", "if", "query", "in", "dialogues", ":", "response", "=", "dialogues", "[", "query", "]", "logger", ".", "debug", "(", "'Found response in queries: %s'", "%", "repr", "(", "response", ")", ")", "return", "response" ]
Tries to match in dialogues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
[ "Tries", "to", "match", "in", "dialogues" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L158-L174
pyvisa/pyvisa-sim
pyvisa-sim/component.py
Component._match_getters
def _match_getters(self, query, getters=None): """Tries to match in getters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if getters is None: getters = self._getters if query in getters: name, response = getters[query] logger.debug('Found response in getter of %s' % name) response = response.format(self._properties[name].get_value()) return response.encode('utf-8')
python
def _match_getters(self, query, getters=None): """Tries to match in getters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if getters is None: getters = self._getters if query in getters: name, response = getters[query] logger.debug('Found response in getter of %s' % name) response = response.format(self._properties[name].get_value()) return response.encode('utf-8')
[ "def", "_match_getters", "(", "self", ",", "query", ",", "getters", "=", "None", ")", ":", "if", "getters", "is", "None", ":", "getters", "=", "self", ".", "_getters", "if", "query", "in", "getters", ":", "name", ",", "response", "=", "getters", "[", "query", "]", "logger", ".", "debug", "(", "'Found response in getter of %s'", "%", "name", ")", "response", "=", "response", ".", "format", "(", "self", ".", "_properties", "[", "name", "]", ".", "get_value", "(", ")", ")", "return", "response", ".", "encode", "(", "'utf-8'", ")" ]
Tries to match in getters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
[ "Tries", "to", "match", "in", "getters" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L176-L191
pyvisa/pyvisa-sim
pyvisa-sim/component.py
Component._match_setters
def _match_setters(self, query): """Tries to match in setters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ q = query.decode('utf-8') for name, parser, response, error_response in self._setters: try: value = parser(q) logger.debug('Found response in setter of %s' % name) except ValueError: continue try: self._properties[name].set_value(value) return response except ValueError: if isinstance(error_response, bytes): return error_response return self.error_response('command_error') return None
python
def _match_setters(self, query): """Tries to match in setters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ q = query.decode('utf-8') for name, parser, response, error_response in self._setters: try: value = parser(q) logger.debug('Found response in setter of %s' % name) except ValueError: continue try: self._properties[name].set_value(value) return response except ValueError: if isinstance(error_response, bytes): return error_response return self.error_response('command_error') return None
[ "def", "_match_setters", "(", "self", ",", "query", ")", ":", "q", "=", "query", ".", "decode", "(", "'utf-8'", ")", "for", "name", ",", "parser", ",", "response", ",", "error_response", "in", "self", ".", "_setters", ":", "try", ":", "value", "=", "parser", "(", "q", ")", "logger", ".", "debug", "(", "'Found response in setter of %s'", "%", "name", ")", "except", "ValueError", ":", "continue", "try", ":", "self", ".", "_properties", "[", "name", "]", ".", "set_value", "(", "value", ")", "return", "response", "except", "ValueError", ":", "if", "isinstance", "(", "error_response", ",", "bytes", ")", ":", "return", "error_response", "return", "self", ".", "error_response", "(", "'command_error'", ")", "return", "None" ]
Tries to match in setters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
[ "Tries", "to", "match", "in", "setters" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L193-L217
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device.add_error_handler
def add_error_handler(self, error_input): """Add error handler to the device """ if isinstance(error_input, dict): error_response = error_input.get('response', {}) cerr = error_response.get('command_error', NoResponse) qerr = error_response.get('query_error', NoResponse) response_dict = {'command_error': cerr, 'query_error': qerr} register_list = error_input.get('status_register', []) for register_dict in register_list: query = register_dict['q'] register = StatusRegister(register_dict) self._status_registers[to_bytes(query)] = register for key in register.keys(): self._error_map[key] = register queue_list = error_input.get('error_queue', []) for queue_dict in queue_list: query = queue_dict['q'] err_queue = ErrorQueue(queue_dict) self._error_queues[to_bytes(query)] = err_queue else: response_dict = {'command_error': error_input, 'query_error': error_input} for key, value in response_dict.items(): self._error_response[key] = to_bytes(value)
python
def add_error_handler(self, error_input): """Add error handler to the device """ if isinstance(error_input, dict): error_response = error_input.get('response', {}) cerr = error_response.get('command_error', NoResponse) qerr = error_response.get('query_error', NoResponse) response_dict = {'command_error': cerr, 'query_error': qerr} register_list = error_input.get('status_register', []) for register_dict in register_list: query = register_dict['q'] register = StatusRegister(register_dict) self._status_registers[to_bytes(query)] = register for key in register.keys(): self._error_map[key] = register queue_list = error_input.get('error_queue', []) for queue_dict in queue_list: query = queue_dict['q'] err_queue = ErrorQueue(queue_dict) self._error_queues[to_bytes(query)] = err_queue else: response_dict = {'command_error': error_input, 'query_error': error_input} for key, value in response_dict.items(): self._error_response[key] = to_bytes(value)
[ "def", "add_error_handler", "(", "self", ",", "error_input", ")", ":", "if", "isinstance", "(", "error_input", ",", "dict", ")", ":", "error_response", "=", "error_input", ".", "get", "(", "'response'", ",", "{", "}", ")", "cerr", "=", "error_response", ".", "get", "(", "'command_error'", ",", "NoResponse", ")", "qerr", "=", "error_response", ".", "get", "(", "'query_error'", ",", "NoResponse", ")", "response_dict", "=", "{", "'command_error'", ":", "cerr", ",", "'query_error'", ":", "qerr", "}", "register_list", "=", "error_input", ".", "get", "(", "'status_register'", ",", "[", "]", ")", "for", "register_dict", "in", "register_list", ":", "query", "=", "register_dict", "[", "'q'", "]", "register", "=", "StatusRegister", "(", "register_dict", ")", "self", ".", "_status_registers", "[", "to_bytes", "(", "query", ")", "]", "=", "register", "for", "key", "in", "register", ".", "keys", "(", ")", ":", "self", ".", "_error_map", "[", "key", "]", "=", "register", "queue_list", "=", "error_input", ".", "get", "(", "'error_queue'", ",", "[", "]", ")", "for", "queue_dict", "in", "queue_list", ":", "query", "=", "queue_dict", "[", "'q'", "]", "err_queue", "=", "ErrorQueue", "(", "queue_dict", ")", "self", ".", "_error_queues", "[", "to_bytes", "(", "query", ")", "]", "=", "err_queue", "else", ":", "response_dict", "=", "{", "'command_error'", ":", "error_input", ",", "'query_error'", ":", "error_input", "}", "for", "key", ",", "value", "in", "response_dict", ".", "items", "(", ")", ":", "self", ".", "_error_response", "[", "key", "]", "=", "to_bytes", "(", "value", ")" ]
Add error handler to the device
[ "Add", "error", "handler", "to", "the", "device" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L156-L189
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device.add_eom
def add_eom(self, type_class, query_termination, response_termination): """Add default end of message for a given interface type and resource class. :param type_class: interface type and resource class as strings joined by space :param query_termination: end of message used in queries. :param response_termination: end of message used in responses. """ interface_type, resource_class = type_class.split(' ') interface_type = getattr(constants.InterfaceType, interface_type.lower()) self._eoms[(interface_type, resource_class)] = (to_bytes(query_termination), to_bytes(response_termination))
python
def add_eom(self, type_class, query_termination, response_termination): """Add default end of message for a given interface type and resource class. :param type_class: interface type and resource class as strings joined by space :param query_termination: end of message used in queries. :param response_termination: end of message used in responses. """ interface_type, resource_class = type_class.split(' ') interface_type = getattr(constants.InterfaceType, interface_type.lower()) self._eoms[(interface_type, resource_class)] = (to_bytes(query_termination), to_bytes(response_termination))
[ "def", "add_eom", "(", "self", ",", "type_class", ",", "query_termination", ",", "response_termination", ")", ":", "interface_type", ",", "resource_class", "=", "type_class", ".", "split", "(", "' '", ")", "interface_type", "=", "getattr", "(", "constants", ".", "InterfaceType", ",", "interface_type", ".", "lower", "(", ")", ")", "self", ".", "_eoms", "[", "(", "interface_type", ",", "resource_class", ")", "]", "=", "(", "to_bytes", "(", "query_termination", ")", ",", "to_bytes", "(", "response_termination", ")", ")" ]
Add default end of message for a given interface type and resource class. :param type_class: interface type and resource class as strings joined by space :param query_termination: end of message used in queries. :param response_termination: end of message used in responses.
[ "Add", "default", "end", "of", "message", "for", "a", "given", "interface", "type", "and", "resource", "class", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L200-L212
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device.write
def write(self, data): """Write data into the device input buffer. :param data: single element byte :type data: bytes """ logger.debug('Writing into device input buffer: %r' % data) if not isinstance(data, bytes): raise TypeError('data must be an instance of bytes') if len(data) != 1: msg = 'data must have a length of 1, not %d' raise ValueError(msg % len(data)) self._input_buffer.extend(data) l = len(self._query_eom) if not self._input_buffer.endswith(self._query_eom): return try: message = bytes(self._input_buffer[:-l]) queries = (message.split(self.delimiter) if self.delimiter else [message]) for query in queries: response = self._match(query) eom = self._response_eom if response is None: response = self.error_response('command_error') if response is not NoResponse: self._output_buffer.extend(response) self._output_buffer.extend(eom) finally: self._input_buffer = bytearray()
python
def write(self, data): """Write data into the device input buffer. :param data: single element byte :type data: bytes """ logger.debug('Writing into device input buffer: %r' % data) if not isinstance(data, bytes): raise TypeError('data must be an instance of bytes') if len(data) != 1: msg = 'data must have a length of 1, not %d' raise ValueError(msg % len(data)) self._input_buffer.extend(data) l = len(self._query_eom) if not self._input_buffer.endswith(self._query_eom): return try: message = bytes(self._input_buffer[:-l]) queries = (message.split(self.delimiter) if self.delimiter else [message]) for query in queries: response = self._match(query) eom = self._response_eom if response is None: response = self.error_response('command_error') if response is not NoResponse: self._output_buffer.extend(response) self._output_buffer.extend(eom) finally: self._input_buffer = bytearray()
[ "def", "write", "(", "self", ",", "data", ")", ":", "logger", ".", "debug", "(", "'Writing into device input buffer: %r'", "%", "data", ")", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "TypeError", "(", "'data must be an instance of bytes'", ")", "if", "len", "(", "data", ")", "!=", "1", ":", "msg", "=", "'data must have a length of 1, not %d'", "raise", "ValueError", "(", "msg", "%", "len", "(", "data", ")", ")", "self", ".", "_input_buffer", ".", "extend", "(", "data", ")", "l", "=", "len", "(", "self", ".", "_query_eom", ")", "if", "not", "self", ".", "_input_buffer", ".", "endswith", "(", "self", ".", "_query_eom", ")", ":", "return", "try", ":", "message", "=", "bytes", "(", "self", ".", "_input_buffer", "[", ":", "-", "l", "]", ")", "queries", "=", "(", "message", ".", "split", "(", "self", ".", "delimiter", ")", "if", "self", ".", "delimiter", "else", "[", "message", "]", ")", "for", "query", "in", "queries", ":", "response", "=", "self", ".", "_match", "(", "query", ")", "eom", "=", "self", ".", "_response_eom", "if", "response", "is", "None", ":", "response", "=", "self", ".", "error_response", "(", "'command_error'", ")", "if", "response", "is", "not", "NoResponse", ":", "self", ".", "_output_buffer", ".", "extend", "(", "response", ")", "self", ".", "_output_buffer", ".", "extend", "(", "eom", ")", "finally", ":", "self", ".", "_input_buffer", "=", "bytearray", "(", ")" ]
Write data into the device input buffer. :param data: single element byte :type data: bytes
[ "Write", "data", "into", "the", "device", "input", "buffer", "." ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L214-L250
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device.read
def read(self): """Return a single byte from the output buffer """ if self._output_buffer: b, self._output_buffer = (self._output_buffer[0:1], self._output_buffer[1:]) return b return b''
python
def read(self): """Return a single byte from the output buffer """ if self._output_buffer: b, self._output_buffer = (self._output_buffer[0:1], self._output_buffer[1:]) return b return b''
[ "def", "read", "(", "self", ")", ":", "if", "self", ".", "_output_buffer", ":", "b", ",", "self", ".", "_output_buffer", "=", "(", "self", ".", "_output_buffer", "[", "0", ":", "1", "]", ",", "self", ".", "_output_buffer", "[", "1", ":", "]", ")", "return", "b", "return", "b''" ]
Return a single byte from the output buffer
[ "Return", "a", "single", "byte", "from", "the", "output", "buffer" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L252-L260
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device._match
def _match(self, query): """Tries to match in dialogues, getters and setters and subcomponents :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ response = self._match_dialog(query) if response is not None: return response response = self._match_getters(query) if response is not None: return response response = self._match_registers(query) if response is not None: return response response = self._match_errors_queues(query) if response is not None: return response response = self._match_setters(query) if response is not None: return response if response is None: for channel in self._channels.values(): response = channel.match(query) if response: return response return None
python
def _match(self, query): """Tries to match in dialogues, getters and setters and subcomponents :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ response = self._match_dialog(query) if response is not None: return response response = self._match_getters(query) if response is not None: return response response = self._match_registers(query) if response is not None: return response response = self._match_errors_queues(query) if response is not None: return response response = self._match_setters(query) if response is not None: return response if response is None: for channel in self._channels.values(): response = channel.match(query) if response: return response return None
[ "def", "_match", "(", "self", ",", "query", ")", ":", "response", "=", "self", ".", "_match_dialog", "(", "query", ")", "if", "response", "is", "not", "None", ":", "return", "response", "response", "=", "self", ".", "_match_getters", "(", "query", ")", "if", "response", "is", "not", "None", ":", "return", "response", "response", "=", "self", ".", "_match_registers", "(", "query", ")", "if", "response", "is", "not", "None", ":", "return", "response", "response", "=", "self", ".", "_match_errors_queues", "(", "query", ")", "if", "response", "is", "not", "None", ":", "return", "response", "response", "=", "self", ".", "_match_setters", "(", "query", ")", "if", "response", "is", "not", "None", ":", "return", "response", "if", "response", "is", "None", ":", "for", "channel", "in", "self", ".", "_channels", ".", "values", "(", ")", ":", "response", "=", "channel", ".", "match", "(", "query", ")", "if", "response", ":", "return", "response", "return", "None" ]
Tries to match in dialogues, getters and setters and subcomponents :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
[ "Tries", "to", "match", "in", "dialogues", "getters", "and", "setters", "and", "subcomponents" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L262-L296
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device._match_registers
def _match_registers(self, query): """Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._status_registers: register = self._status_registers[query] response = register.value logger.debug('Found response in status register: %s', repr(response)) register.clear() return response
python
def _match_registers(self, query): """Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._status_registers: register = self._status_registers[query] response = register.value logger.debug('Found response in status register: %s', repr(response)) register.clear() return response
[ "def", "_match_registers", "(", "self", ",", "query", ")", ":", "if", "query", "in", "self", ".", "_status_registers", ":", "register", "=", "self", ".", "_status_registers", "[", "query", "]", "response", "=", "register", ".", "value", "logger", ".", "debug", "(", "'Found response in status register: %s'", ",", "repr", "(", "response", ")", ")", "register", ".", "clear", "(", ")", "return", "response" ]
Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
[ "Tries", "to", "match", "in", "status", "registers" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L298-L313
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Device._match_errors_queues
def _match_errors_queues(self, query): """Tries to match in error queues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._error_queues: queue = self._error_queues[query] response = queue.value logger.debug('Found response in error queue: %s', repr(response)) return response
python
def _match_errors_queues(self, query): """Tries to match in error queues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._error_queues: queue = self._error_queues[query] response = queue.value logger.debug('Found response in error queue: %s', repr(response)) return response
[ "def", "_match_errors_queues", "(", "self", ",", "query", ")", ":", "if", "query", "in", "self", ".", "_error_queues", ":", "queue", "=", "self", ".", "_error_queues", "[", "query", "]", "response", "=", "queue", ".", "value", "logger", ".", "debug", "(", "'Found response in error queue: %s'", ",", "repr", "(", "response", ")", ")", "return", "response" ]
Tries to match in error queues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
[ "Tries", "to", "match", "in", "error", "queues" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L315-L329
pyvisa/pyvisa-sim
pyvisa-sim/devices.py
Devices.add_device
def add_device(self, resource_name, device): """Bind device to resource name """ if device.resource_name is not None: msg = 'The device %r is already assigned to %s' raise ValueError(msg % (device, device.resource_name)) device.resource_name = resource_name self._internal[device.resource_name] = device
python
def add_device(self, resource_name, device): """Bind device to resource name """ if device.resource_name is not None: msg = 'The device %r is already assigned to %s' raise ValueError(msg % (device, device.resource_name)) device.resource_name = resource_name self._internal[device.resource_name] = device
[ "def", "add_device", "(", "self", ",", "resource_name", ",", "device", ")", ":", "if", "device", ".", "resource_name", "is", "not", "None", ":", "msg", "=", "'The device %r is already assigned to %s'", "raise", "ValueError", "(", "msg", "%", "(", "device", ",", "device", ".", "resource_name", ")", ")", "device", ".", "resource_name", "=", "resource_name", "self", ".", "_internal", "[", "device", ".", "resource_name", "]", "=", "device" ]
Bind device to resource name
[ "Bind", "device", "to", "resource", "name" ]
train
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L343-L353
percipient/django-querysetsequence
queryset_sequence/pagination.py
SequenceCursorPagination.get_ordering
def get_ordering(self, *args, **kwargs): """Take whatever the expected ordering is and then first order by QuerySet.""" result = super(SequenceCursorPagination, self).get_ordering(*args, **kwargs) # Because paginate_queryset sets self.ordering after reading it...we # need to only modify it sometimes. (This allows re-use of the # paginator, which probably only happens in tests.) if result[0] != '#': result = ('#', ) + result return result
python
def get_ordering(self, *args, **kwargs): """Take whatever the expected ordering is and then first order by QuerySet.""" result = super(SequenceCursorPagination, self).get_ordering(*args, **kwargs) # Because paginate_queryset sets self.ordering after reading it...we # need to only modify it sometimes. (This allows re-use of the # paginator, which probably only happens in tests.) if result[0] != '#': result = ('#', ) + result return result
[ "def", "get_ordering", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "super", "(", "SequenceCursorPagination", ",", "self", ")", ".", "get_ordering", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Because paginate_queryset sets self.ordering after reading it...we", "# need to only modify it sometimes. (This allows re-use of the", "# paginator, which probably only happens in tests.)", "if", "result", "[", "0", "]", "!=", "'#'", ":", "result", "=", "(", "'#'", ",", ")", "+", "result", "return", "result" ]
Take whatever the expected ordering is and then first order by QuerySet.
[ "Take", "whatever", "the", "expected", "ordering", "is", "and", "then", "first", "order", "by", "QuerySet", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/pagination.py#L155-L165
percipient/django-querysetsequence
queryset_sequence/pagination.py
SequenceCursorPagination._get_position_from_instance
def _get_position_from_instance(self, instance, ordering): """ The position will be a tuple of values: The QuerySet number inside of the QuerySetSequence. Whatever the normal value taken from the ordering property gives. """ # Get the QuerySet number of the current instance. qs_order = getattr(instance, '#') # Strip the '#' and call the standard _get_position_from_instance. result = super(SequenceCursorPagination, self)._get_position_from_instance(instance, ordering[1:]) # Return a tuple of these two elements. return (qs_order, result)
python
def _get_position_from_instance(self, instance, ordering): """ The position will be a tuple of values: The QuerySet number inside of the QuerySetSequence. Whatever the normal value taken from the ordering property gives. """ # Get the QuerySet number of the current instance. qs_order = getattr(instance, '#') # Strip the '#' and call the standard _get_position_from_instance. result = super(SequenceCursorPagination, self)._get_position_from_instance(instance, ordering[1:]) # Return a tuple of these two elements. return (qs_order, result)
[ "def", "_get_position_from_instance", "(", "self", ",", "instance", ",", "ordering", ")", ":", "# Get the QuerySet number of the current instance.", "qs_order", "=", "getattr", "(", "instance", ",", "'#'", ")", "# Strip the '#' and call the standard _get_position_from_instance.", "result", "=", "super", "(", "SequenceCursorPagination", ",", "self", ")", ".", "_get_position_from_instance", "(", "instance", ",", "ordering", "[", "1", ":", "]", ")", "# Return a tuple of these two elements.", "return", "(", "qs_order", ",", "result", ")" ]
The position will be a tuple of values: The QuerySet number inside of the QuerySetSequence. Whatever the normal value taken from the ordering property gives.
[ "The", "position", "will", "be", "a", "tuple", "of", "values", ":" ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/pagination.py#L167-L182
percipient/django-querysetsequence
queryset_sequence/pagination.py
SequenceCursorPagination.decode_cursor
def decode_cursor(self, request): """ Given a request with a cursor, return a `Cursor` instance. Differs from the standard CursorPagination to handle a tuple in the position field. """ # Determine if we have a cursor, and if so then decode it. encoded = request.query_params.get(self.cursor_query_param) if encoded is None: return None try: querystring = b64decode(encoded.encode('ascii')).decode('ascii') tokens = urlparse.parse_qs(querystring, keep_blank_values=True) offset = tokens.get('o', ['0'])[0] offset = _positive_int(offset, cutoff=self.offset_cutoff) reverse = tokens.get('r', ['0'])[0] reverse = bool(int(reverse)) # The difference. Don't get just the 0th entry: get all entries. position = tokens.get('p', None) except (TypeError, ValueError): raise NotFound(self.invalid_cursor_message) return Cursor(offset=offset, reverse=reverse, position=position)
python
def decode_cursor(self, request): """ Given a request with a cursor, return a `Cursor` instance. Differs from the standard CursorPagination to handle a tuple in the position field. """ # Determine if we have a cursor, and if so then decode it. encoded = request.query_params.get(self.cursor_query_param) if encoded is None: return None try: querystring = b64decode(encoded.encode('ascii')).decode('ascii') tokens = urlparse.parse_qs(querystring, keep_blank_values=True) offset = tokens.get('o', ['0'])[0] offset = _positive_int(offset, cutoff=self.offset_cutoff) reverse = tokens.get('r', ['0'])[0] reverse = bool(int(reverse)) # The difference. Don't get just the 0th entry: get all entries. position = tokens.get('p', None) except (TypeError, ValueError): raise NotFound(self.invalid_cursor_message) return Cursor(offset=offset, reverse=reverse, position=position)
[ "def", "decode_cursor", "(", "self", ",", "request", ")", ":", "# Determine if we have a cursor, and if so then decode it.", "encoded", "=", "request", ".", "query_params", ".", "get", "(", "self", ".", "cursor_query_param", ")", "if", "encoded", "is", "None", ":", "return", "None", "try", ":", "querystring", "=", "b64decode", "(", "encoded", ".", "encode", "(", "'ascii'", ")", ")", ".", "decode", "(", "'ascii'", ")", "tokens", "=", "urlparse", ".", "parse_qs", "(", "querystring", ",", "keep_blank_values", "=", "True", ")", "offset", "=", "tokens", ".", "get", "(", "'o'", ",", "[", "'0'", "]", ")", "[", "0", "]", "offset", "=", "_positive_int", "(", "offset", ",", "cutoff", "=", "self", ".", "offset_cutoff", ")", "reverse", "=", "tokens", ".", "get", "(", "'r'", ",", "[", "'0'", "]", ")", "[", "0", "]", "reverse", "=", "bool", "(", "int", "(", "reverse", ")", ")", "# The difference. Don't get just the 0th entry: get all entries.", "position", "=", "tokens", ".", "get", "(", "'p'", ",", "None", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "NotFound", "(", "self", ".", "invalid_cursor_message", ")", "return", "Cursor", "(", "offset", "=", "offset", ",", "reverse", "=", "reverse", ",", "position", "=", "position", ")" ]
Given a request with a cursor, return a `Cursor` instance. Differs from the standard CursorPagination to handle a tuple in the position field.
[ "Given", "a", "request", "with", "a", "cursor", "return", "a", "Cursor", "instance", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/pagination.py#L184-L211
percipient/django-querysetsequence
queryset_sequence/__init__.py
multiply_iterables
def multiply_iterables(it1, it2): """ Element-wise iterables multiplications. """ assert len(it1) == len(it2),\ "Can not element-wise multiply iterables of different length." return list(map(mul, it1, it2))
python
def multiply_iterables(it1, it2): """ Element-wise iterables multiplications. """ assert len(it1) == len(it2),\ "Can not element-wise multiply iterables of different length." return list(map(mul, it1, it2))
[ "def", "multiply_iterables", "(", "it1", ",", "it2", ")", ":", "assert", "len", "(", "it1", ")", "==", "len", "(", "it2", ")", ",", "\"Can not element-wise multiply iterables of different length.\"", "return", "list", "(", "map", "(", "mul", ",", "it1", ",", "it2", ")", ")" ]
Element-wise iterables multiplications.
[ "Element", "-", "wise", "iterables", "multiplications", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L29-L35
percipient/django-querysetsequence
queryset_sequence/__init__.py
ComparatorMixin._cmp
def _cmp(cls, value1, value2): """ Comparison method that takes into account Django's special rules when ordering by a field that is a model: 1. Try following the default ordering on the related model. 2. Order by the model's primary key, if there is no Meta.ordering. """ if isinstance(value1, Model) and isinstance(value2, Model): field_names = value1._meta.ordering # Assert that the ordering is the same between different models. if field_names != value2._meta.ordering: valid_field_names = (set(cls._get_field_names(value1)) & set(cls._get_field_names(value2))) raise FieldError( "Ordering differs between models. Choices are: %s" % ', '.join(valid_field_names)) # By default, order by the pk. if not field_names: field_names = ['pk'] # TODO Figure out if we don't need to generate this comparator every # time. return cls._generate_comparator(field_names)(value1, value2) return cmp(value1, value2)
python
def _cmp(cls, value1, value2): """ Comparison method that takes into account Django's special rules when ordering by a field that is a model: 1. Try following the default ordering on the related model. 2. Order by the model's primary key, if there is no Meta.ordering. """ if isinstance(value1, Model) and isinstance(value2, Model): field_names = value1._meta.ordering # Assert that the ordering is the same between different models. if field_names != value2._meta.ordering: valid_field_names = (set(cls._get_field_names(value1)) & set(cls._get_field_names(value2))) raise FieldError( "Ordering differs between models. Choices are: %s" % ', '.join(valid_field_names)) # By default, order by the pk. if not field_names: field_names = ['pk'] # TODO Figure out if we don't need to generate this comparator every # time. return cls._generate_comparator(field_names)(value1, value2) return cmp(value1, value2)
[ "def", "_cmp", "(", "cls", ",", "value1", ",", "value2", ")", ":", "if", "isinstance", "(", "value1", ",", "Model", ")", "and", "isinstance", "(", "value2", ",", "Model", ")", ":", "field_names", "=", "value1", ".", "_meta", ".", "ordering", "# Assert that the ordering is the same between different models.", "if", "field_names", "!=", "value2", ".", "_meta", ".", "ordering", ":", "valid_field_names", "=", "(", "set", "(", "cls", ".", "_get_field_names", "(", "value1", ")", ")", "&", "set", "(", "cls", ".", "_get_field_names", "(", "value2", ")", ")", ")", "raise", "FieldError", "(", "\"Ordering differs between models. Choices are: %s\"", "%", "', '", ".", "join", "(", "valid_field_names", ")", ")", "# By default, order by the pk.", "if", "not", "field_names", ":", "field_names", "=", "[", "'pk'", "]", "# TODO Figure out if we don't need to generate this comparator every", "# time.", "return", "cls", ".", "_generate_comparator", "(", "field_names", ")", "(", "value1", ",", "value2", ")", "return", "cmp", "(", "value1", ",", "value2", ")" ]
Comparison method that takes into account Django's special rules when ordering by a field that is a model: 1. Try following the default ordering on the related model. 2. Order by the model's primary key, if there is no Meta.ordering.
[ "Comparison", "method", "that", "takes", "into", "account", "Django", "s", "special", "rules", "when", "ordering", "by", "a", "field", "that", "is", "a", "model", ":" ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L52-L80
percipient/django-querysetsequence
queryset_sequence/__init__.py
ComparatorMixin._generate_comparator
def _generate_comparator(cls, field_names): """ Construct a comparator function based on the field names. The comparator returns the first non-zero comparison value. Inputs: field_names (iterable of strings): The field names to sort on. Returns: A comparator function. """ # Ensure that field names is a list and not a tuple. field_names = list(field_names) # For fields that start with a '-', reverse the ordering of the # comparison. reverses = [1] * len(field_names) for i, field_name in enumerate(field_names): if field_name[0] == '-': reverses[i] = -1 field_names[i] = field_name[1:] field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names] def comparator(i1, i2): # Get a tuple of values for comparison. v1 = attrgetter(*field_names)(i1) v2 = attrgetter(*field_names)(i2) # If there's only one arg supplied, attrgetter returns a single # item, directly return the result in this case. if len(field_names) == 1: return cls._cmp(v1, v2) * reverses[0] # Compare each field for the two items, reversing if necessary. order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses) try: # The first non-zero element. return next(dropwhile(__not__, order)) except StopIteration: # Everything was equivalent. return 0 return comparator
python
def _generate_comparator(cls, field_names): """ Construct a comparator function based on the field names. The comparator returns the first non-zero comparison value. Inputs: field_names (iterable of strings): The field names to sort on. Returns: A comparator function. """ # Ensure that field names is a list and not a tuple. field_names = list(field_names) # For fields that start with a '-', reverse the ordering of the # comparison. reverses = [1] * len(field_names) for i, field_name in enumerate(field_names): if field_name[0] == '-': reverses[i] = -1 field_names[i] = field_name[1:] field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names] def comparator(i1, i2): # Get a tuple of values for comparison. v1 = attrgetter(*field_names)(i1) v2 = attrgetter(*field_names)(i2) # If there's only one arg supplied, attrgetter returns a single # item, directly return the result in this case. if len(field_names) == 1: return cls._cmp(v1, v2) * reverses[0] # Compare each field for the two items, reversing if necessary. order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses) try: # The first non-zero element. return next(dropwhile(__not__, order)) except StopIteration: # Everything was equivalent. return 0 return comparator
[ "def", "_generate_comparator", "(", "cls", ",", "field_names", ")", ":", "# Ensure that field names is a list and not a tuple.", "field_names", "=", "list", "(", "field_names", ")", "# For fields that start with a '-', reverse the ordering of the", "# comparison.", "reverses", "=", "[", "1", "]", "*", "len", "(", "field_names", ")", "for", "i", ",", "field_name", "in", "enumerate", "(", "field_names", ")", ":", "if", "field_name", "[", "0", "]", "==", "'-'", ":", "reverses", "[", "i", "]", "=", "-", "1", "field_names", "[", "i", "]", "=", "field_name", "[", "1", ":", "]", "field_names", "=", "[", "f", ".", "replace", "(", "LOOKUP_SEP", ",", "'.'", ")", "for", "f", "in", "field_names", "]", "def", "comparator", "(", "i1", ",", "i2", ")", ":", "# Get a tuple of values for comparison.", "v1", "=", "attrgetter", "(", "*", "field_names", ")", "(", "i1", ")", "v2", "=", "attrgetter", "(", "*", "field_names", ")", "(", "i2", ")", "# If there's only one arg supplied, attrgetter returns a single", "# item, directly return the result in this case.", "if", "len", "(", "field_names", ")", "==", "1", ":", "return", "cls", ".", "_cmp", "(", "v1", ",", "v2", ")", "*", "reverses", "[", "0", "]", "# Compare each field for the two items, reversing if necessary.", "order", "=", "multiply_iterables", "(", "list", "(", "map", "(", "cls", ".", "_cmp", ",", "v1", ",", "v2", ")", ")", ",", "reverses", ")", "try", ":", "# The first non-zero element.", "return", "next", "(", "dropwhile", "(", "__not__", ",", "order", ")", ")", "except", "StopIteration", ":", "# Everything was equivalent.", "return", "0", "return", "comparator" ]
Construct a comparator function based on the field names. The comparator returns the first non-zero comparison value. Inputs: field_names (iterable of strings): The field names to sort on. Returns: A comparator function.
[ "Construct", "a", "comparator", "function", "based", "on", "the", "field", "names", ".", "The", "comparator", "returns", "the", "first", "non", "-", "zero", "comparison", "value", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L83-L127
percipient/django-querysetsequence
queryset_sequence/__init__.py
QuerySequenceIterable._ordered_iterator
def _ordered_iterator(self): """ Interleave the values of each QuerySet in order to handle the requested ordering. Also adds the '#' property to each returned item. """ # A list of tuples, each with: # * The iterable # * The QuerySet number # * The next value # # (Remember that each QuerySet is already sorted.) iterables = [] for i, qs in zip(self._queryset_idxs, self._querysets): it = iter(qs) try: value = next(it) except StopIteration: # If this is already empty, just skip it. continue # Set the QuerySet number so that the comparison works properly. setattr(value, '#', i) iterables.append((it, i, value)) # The offset of items returned. index = 0 # Create a comparison function based on the requested ordering. _comparator = self._generate_comparator(self._order_by) def comparator(tuple_1, tuple_2): # The last element in each tuple is the actual item to compare. return _comparator(tuple_1[2], tuple_2[2]) comparator = functools.cmp_to_key(comparator) # If in reverse mode, get the last value instead of the first value from # ordered_values below. if self._standard_ordering: next_value_ind = 0 else: next_value_ind = -1 # Continue until all iterables are empty. while iterables: # If there's only one iterator left, don't bother sorting. if len(iterables) > 1: # Sort the current values for each iterable. iterables = sorted(iterables, key=comparator) # The next ordering item is in the first position, unless we're # in reverse mode. it, i, value = iterables[next_value_ind] else: it, i, value = iterables[0] # Return the next value if we're within the slice of interest. if self._low_mark <= index: yield value index += 1 # We've left the slice of interest, we're done. if index == self._high_mark: return # Iterate the iterable that just lost a value. try: value = next(it) # Set the QuerySet number so that the comparison works properly. setattr(value, '#', i) iterables[next_value_ind] = it, i, value except StopIteration: # This iterator is done, remove it. del iterables[next_value_ind]
python
def _ordered_iterator(self): """ Interleave the values of each QuerySet in order to handle the requested ordering. Also adds the '#' property to each returned item. """ # A list of tuples, each with: # * The iterable # * The QuerySet number # * The next value # # (Remember that each QuerySet is already sorted.) iterables = [] for i, qs in zip(self._queryset_idxs, self._querysets): it = iter(qs) try: value = next(it) except StopIteration: # If this is already empty, just skip it. continue # Set the QuerySet number so that the comparison works properly. setattr(value, '#', i) iterables.append((it, i, value)) # The offset of items returned. index = 0 # Create a comparison function based on the requested ordering. _comparator = self._generate_comparator(self._order_by) def comparator(tuple_1, tuple_2): # The last element in each tuple is the actual item to compare. return _comparator(tuple_1[2], tuple_2[2]) comparator = functools.cmp_to_key(comparator) # If in reverse mode, get the last value instead of the first value from # ordered_values below. if self._standard_ordering: next_value_ind = 0 else: next_value_ind = -1 # Continue until all iterables are empty. while iterables: # If there's only one iterator left, don't bother sorting. if len(iterables) > 1: # Sort the current values for each iterable. iterables = sorted(iterables, key=comparator) # The next ordering item is in the first position, unless we're # in reverse mode. it, i, value = iterables[next_value_ind] else: it, i, value = iterables[0] # Return the next value if we're within the slice of interest. if self._low_mark <= index: yield value index += 1 # We've left the slice of interest, we're done. if index == self._high_mark: return # Iterate the iterable that just lost a value. try: value = next(it) # Set the QuerySet number so that the comparison works properly. setattr(value, '#', i) iterables[next_value_ind] = it, i, value except StopIteration: # This iterator is done, remove it. del iterables[next_value_ind]
[ "def", "_ordered_iterator", "(", "self", ")", ":", "# A list of tuples, each with:", "# * The iterable", "# * The QuerySet number", "# * The next value", "#", "# (Remember that each QuerySet is already sorted.)", "iterables", "=", "[", "]", "for", "i", ",", "qs", "in", "zip", "(", "self", ".", "_queryset_idxs", ",", "self", ".", "_querysets", ")", ":", "it", "=", "iter", "(", "qs", ")", "try", ":", "value", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "# If this is already empty, just skip it.", "continue", "# Set the QuerySet number so that the comparison works properly.", "setattr", "(", "value", ",", "'#'", ",", "i", ")", "iterables", ".", "append", "(", "(", "it", ",", "i", ",", "value", ")", ")", "# The offset of items returned.", "index", "=", "0", "# Create a comparison function based on the requested ordering.", "_comparator", "=", "self", ".", "_generate_comparator", "(", "self", ".", "_order_by", ")", "def", "comparator", "(", "tuple_1", ",", "tuple_2", ")", ":", "# The last element in each tuple is the actual item to compare.", "return", "_comparator", "(", "tuple_1", "[", "2", "]", ",", "tuple_2", "[", "2", "]", ")", "comparator", "=", "functools", ".", "cmp_to_key", "(", "comparator", ")", "# If in reverse mode, get the last value instead of the first value from", "# ordered_values below.", "if", "self", ".", "_standard_ordering", ":", "next_value_ind", "=", "0", "else", ":", "next_value_ind", "=", "-", "1", "# Continue until all iterables are empty.", "while", "iterables", ":", "# If there's only one iterator left, don't bother sorting.", "if", "len", "(", "iterables", ")", ">", "1", ":", "# Sort the current values for each iterable.", "iterables", "=", "sorted", "(", "iterables", ",", "key", "=", "comparator", ")", "# The next ordering item is in the first position, unless we're", "# in reverse mode.", "it", ",", "i", ",", "value", "=", "iterables", "[", "next_value_ind", "]", "else", ":", "it", ",", "i", ",", "value", "=", "iterables", "[", "0", "]", "# Return the next value if we're within the slice of interest.", "if", "self", ".", "_low_mark", "<=", "index", ":", "yield", "value", "index", "+=", "1", "# We've left the slice of interest, we're done.", "if", "index", "==", "self", ".", "_high_mark", ":", "return", "# Iterate the iterable that just lost a value.", "try", ":", "value", "=", "next", "(", "it", ")", "# Set the QuerySet number so that the comparison works properly.", "setattr", "(", "value", ",", "'#'", ",", "i", ")", "iterables", "[", "next_value_ind", "]", "=", "it", ",", "i", ",", "value", "except", "StopIteration", ":", "# This iterator is done, remove it.", "del", "iterables", "[", "next_value_ind", "]" ]
Interleave the values of each QuerySet in order to handle the requested ordering. Also adds the '#' property to each returned item.
[ "Interleave", "the", "values", "of", "each", "QuerySet", "in", "order", "to", "handle", "the", "requested", "ordering", ".", "Also", "adds", "the", "#", "property", "to", "each", "returned", "item", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L140-L209
percipient/django-querysetsequence
queryset_sequence/__init__.py
QuerySequenceIterable._unordered_iterator
def _unordered_iterator(self): """ Return the value of each QuerySet, but also add the '#' property to each return item. """ for i, qs in zip(self._queryset_idxs, self._querysets): for item in qs: setattr(item, '#', i) yield item
python
def _unordered_iterator(self): """ Return the value of each QuerySet, but also add the '#' property to each return item. """ for i, qs in zip(self._queryset_idxs, self._querysets): for item in qs: setattr(item, '#', i) yield item
[ "def", "_unordered_iterator", "(", "self", ")", ":", "for", "i", ",", "qs", "in", "zip", "(", "self", ".", "_queryset_idxs", ",", "self", ".", "_querysets", ")", ":", "for", "item", "in", "qs", ":", "setattr", "(", "item", ",", "'#'", ",", "i", ")", "yield", "item" ]
Return the value of each QuerySet, but also add the '#' property to each return item.
[ "Return", "the", "value", "of", "each", "QuerySet", "but", "also", "add", "the", "#", "property", "to", "each", "return", "item", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L211-L219
percipient/django-querysetsequence
queryset_sequence/__init__.py
QuerySetSequence._filter_or_exclude_querysets
def _filter_or_exclude_querysets(self, negate, **kwargs): """ Similar to QuerySet._filter_or_exclude, but run over the QuerySets in the QuerySetSequence instead of over each QuerySet's fields. """ # Ensure negate is a boolean. negate = bool(negate) for kwarg, value in kwargs.items(): parts = kwarg.split(LOOKUP_SEP) # Ensure this is being used to filter QuerySets. if parts[0] != '#': raise ValueError("Keyword '%s' is not a valid keyword to filter over, " "it must begin with '#'." % kwarg) # Don't allow __ multiple times. if len(parts) > 2: raise ValueError("Keyword '%s' must not contain multiple " "lookup seperators." % kwarg) # The actual lookup is the second part. try: lookup = parts[1] except IndexError: lookup = 'exact' # Math operators that all have the same logic. LOOKUP_TO_OPERATOR = { 'exact': eq, 'iexact': eq, 'gt': gt, 'gte': ge, 'lt': lt, 'lte': le, } try: operator = LOOKUP_TO_OPERATOR[lookup] # These expect integers, this matches the logic in # IntegerField.get_prep_value(). (Essentially treat the '#' # field as an IntegerField.) if value is not None: value = int(value) self._queryset_idxs = filter(lambda i: operator(i, value) != negate, self._queryset_idxs) continue except KeyError: # It wasn't one of the above operators, keep trying. pass # Some of these seem to get handled as bytes. if lookup in ('contains', 'icontains'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: (value in six.text_type(i)) != negate, self._queryset_idxs) elif lookup == 'in': self._queryset_idxs = filter(lambda i: (i in value) != negate, self._queryset_idxs) elif lookup in ('startswith', 'istartswith'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: six.text_type(i).startswith(value) != negate, self._queryset_idxs) elif lookup in ('endswith', 'iendswith'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: six.text_type(i).endswith(value) != negate, self._queryset_idxs) elif lookup == 'range': # Inclusive include. start, end = value self._queryset_idxs = filter(lambda i: (start <= i <= end) != negate, self._queryset_idxs) else: # Any other field lookup is not supported, e.g. date, year, month, # day, week_day, hour, minute, second, isnull, search, regex, and # iregex. raise ValueError("Unsupported lookup '%s'" % lookup) # Convert back to a list on Python 3. self._queryset_idxs = list(self._queryset_idxs) # Finally, keep only the QuerySets we care about! self._querysets = [self._querysets[i] for i in self._queryset_idxs]
python
def _filter_or_exclude_querysets(self, negate, **kwargs): """ Similar to QuerySet._filter_or_exclude, but run over the QuerySets in the QuerySetSequence instead of over each QuerySet's fields. """ # Ensure negate is a boolean. negate = bool(negate) for kwarg, value in kwargs.items(): parts = kwarg.split(LOOKUP_SEP) # Ensure this is being used to filter QuerySets. if parts[0] != '#': raise ValueError("Keyword '%s' is not a valid keyword to filter over, " "it must begin with '#'." % kwarg) # Don't allow __ multiple times. if len(parts) > 2: raise ValueError("Keyword '%s' must not contain multiple " "lookup seperators." % kwarg) # The actual lookup is the second part. try: lookup = parts[1] except IndexError: lookup = 'exact' # Math operators that all have the same logic. LOOKUP_TO_OPERATOR = { 'exact': eq, 'iexact': eq, 'gt': gt, 'gte': ge, 'lt': lt, 'lte': le, } try: operator = LOOKUP_TO_OPERATOR[lookup] # These expect integers, this matches the logic in # IntegerField.get_prep_value(). (Essentially treat the '#' # field as an IntegerField.) if value is not None: value = int(value) self._queryset_idxs = filter(lambda i: operator(i, value) != negate, self._queryset_idxs) continue except KeyError: # It wasn't one of the above operators, keep trying. pass # Some of these seem to get handled as bytes. if lookup in ('contains', 'icontains'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: (value in six.text_type(i)) != negate, self._queryset_idxs) elif lookup == 'in': self._queryset_idxs = filter(lambda i: (i in value) != negate, self._queryset_idxs) elif lookup in ('startswith', 'istartswith'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: six.text_type(i).startswith(value) != negate, self._queryset_idxs) elif lookup in ('endswith', 'iendswith'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: six.text_type(i).endswith(value) != negate, self._queryset_idxs) elif lookup == 'range': # Inclusive include. start, end = value self._queryset_idxs = filter(lambda i: (start <= i <= end) != negate, self._queryset_idxs) else: # Any other field lookup is not supported, e.g. date, year, month, # day, week_day, hour, minute, second, isnull, search, regex, and # iregex. raise ValueError("Unsupported lookup '%s'" % lookup) # Convert back to a list on Python 3. self._queryset_idxs = list(self._queryset_idxs) # Finally, keep only the QuerySets we care about! self._querysets = [self._querysets[i] for i in self._queryset_idxs]
[ "def", "_filter_or_exclude_querysets", "(", "self", ",", "negate", ",", "*", "*", "kwargs", ")", ":", "# Ensure negate is a boolean.", "negate", "=", "bool", "(", "negate", ")", "for", "kwarg", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "parts", "=", "kwarg", ".", "split", "(", "LOOKUP_SEP", ")", "# Ensure this is being used to filter QuerySets.", "if", "parts", "[", "0", "]", "!=", "'#'", ":", "raise", "ValueError", "(", "\"Keyword '%s' is not a valid keyword to filter over, \"", "\"it must begin with '#'.\"", "%", "kwarg", ")", "# Don't allow __ multiple times.", "if", "len", "(", "parts", ")", ">", "2", ":", "raise", "ValueError", "(", "\"Keyword '%s' must not contain multiple \"", "\"lookup seperators.\"", "%", "kwarg", ")", "# The actual lookup is the second part.", "try", ":", "lookup", "=", "parts", "[", "1", "]", "except", "IndexError", ":", "lookup", "=", "'exact'", "# Math operators that all have the same logic.", "LOOKUP_TO_OPERATOR", "=", "{", "'exact'", ":", "eq", ",", "'iexact'", ":", "eq", ",", "'gt'", ":", "gt", ",", "'gte'", ":", "ge", ",", "'lt'", ":", "lt", ",", "'lte'", ":", "le", ",", "}", "try", ":", "operator", "=", "LOOKUP_TO_OPERATOR", "[", "lookup", "]", "# These expect integers, this matches the logic in", "# IntegerField.get_prep_value(). (Essentially treat the '#'", "# field as an IntegerField.)", "if", "value", "is", "not", "None", ":", "value", "=", "int", "(", "value", ")", "self", ".", "_queryset_idxs", "=", "filter", "(", "lambda", "i", ":", "operator", "(", "i", ",", "value", ")", "!=", "negate", ",", "self", ".", "_queryset_idxs", ")", "continue", "except", "KeyError", ":", "# It wasn't one of the above operators, keep trying.", "pass", "# Some of these seem to get handled as bytes.", "if", "lookup", "in", "(", "'contains'", ",", "'icontains'", ")", ":", "value", "=", "six", ".", "text_type", "(", "value", ")", "self", ".", "_queryset_idxs", "=", "filter", "(", "lambda", "i", ":", "(", "value", "in", "six", ".", "text_type", "(", "i", ")", ")", "!=", "negate", ",", "self", ".", "_queryset_idxs", ")", "elif", "lookup", "==", "'in'", ":", "self", ".", "_queryset_idxs", "=", "filter", "(", "lambda", "i", ":", "(", "i", "in", "value", ")", "!=", "negate", ",", "self", ".", "_queryset_idxs", ")", "elif", "lookup", "in", "(", "'startswith'", ",", "'istartswith'", ")", ":", "value", "=", "six", ".", "text_type", "(", "value", ")", "self", ".", "_queryset_idxs", "=", "filter", "(", "lambda", "i", ":", "six", ".", "text_type", "(", "i", ")", ".", "startswith", "(", "value", ")", "!=", "negate", ",", "self", ".", "_queryset_idxs", ")", "elif", "lookup", "in", "(", "'endswith'", ",", "'iendswith'", ")", ":", "value", "=", "six", ".", "text_type", "(", "value", ")", "self", ".", "_queryset_idxs", "=", "filter", "(", "lambda", "i", ":", "six", ".", "text_type", "(", "i", ")", ".", "endswith", "(", "value", ")", "!=", "negate", ",", "self", ".", "_queryset_idxs", ")", "elif", "lookup", "==", "'range'", ":", "# Inclusive include.", "start", ",", "end", "=", "value", "self", ".", "_queryset_idxs", "=", "filter", "(", "lambda", "i", ":", "(", "start", "<=", "i", "<=", "end", ")", "!=", "negate", ",", "self", ".", "_queryset_idxs", ")", "else", ":", "# Any other field lookup is not supported, e.g. date, year, month,", "# day, week_day, hour, minute, second, isnull, search, regex, and", "# iregex.", "raise", "ValueError", "(", "\"Unsupported lookup '%s'\"", "%", "lookup", ")", "# Convert back to a list on Python 3.", "self", ".", "_queryset_idxs", "=", "list", "(", "self", ".", "_queryset_idxs", ")", "# Finally, keep only the QuerySets we care about!", "self", ".", "_querysets", "=", "[", "self", ".", "_querysets", "[", "i", "]", "for", "i", "in", "self", ".", "_queryset_idxs", "]" ]
Similar to QuerySet._filter_or_exclude, but run over the QuerySets in the QuerySetSequence instead of over each QuerySet's fields.
[ "Similar", "to", "QuerySet", ".", "_filter_or_exclude", "but", "run", "over", "the", "QuerySets", "in", "the", "QuerySetSequence", "instead", "of", "over", "each", "QuerySet", "s", "fields", "." ]
train
https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L445-L527