body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def autocontrast(image): 'Implements Autocontrast function from PIL using TF ops.' def scale_channel(channel): 'Scale the 2D image using the autocontrast rule.' lo = tf.cast(tf.reduce_min(channel), tf.float32) hi = tf.cast(tf.reduce_max(channel), tf.float32) def scale_values(im): scale = (255.0 / (hi - lo)) offset = ((- lo) * scale) im = ((tf.cast(im, tf.float32) * scale) + offset) return tf.saturate_cast(im, tf.uint8) result = tf.cond((hi > lo), (lambda : scale_values(channel)), (lambda : channel)) return result s1 = scale_channel(image[:, :, 0]) s2 = scale_channel(image[:, :, 1]) s3 = scale_channel(image[:, :, 2]) image = tf.stack([s1, s2, s3], 2) return image
-809,971,772,111,610,000
Implements Autocontrast function from PIL using TF ops.
third_party/augment_ops.py
autocontrast
google-research/crest
python
def autocontrast(image): def scale_channel(channel): 'Scale the 2D image using the autocontrast rule.' lo = tf.cast(tf.reduce_min(channel), tf.float32) hi = tf.cast(tf.reduce_max(channel), tf.float32) def scale_values(im): scale = (255.0 / (hi - lo)) offset = ((- lo) * scale) im = ((tf.cast(im, tf.float32) * scale) + offset) return tf.saturate_cast(im, tf.uint8) result = tf.cond((hi > lo), (lambda : scale_values(channel)), (lambda : channel)) return result s1 = scale_channel(image[:, :, 0]) s2 = scale_channel(image[:, :, 1]) s3 = scale_channel(image[:, :, 2]) image = tf.stack([s1, s2, s3], 2) return image
def autocontrast_blend(image, factor): 'Implements blend of autocontrast with original image.' return blend(autocontrast(image), image, factor)
837,949,824,050,951,700
Implements blend of autocontrast with original image.
third_party/augment_ops.py
autocontrast_blend
google-research/crest
python
def autocontrast_blend(image, factor): return blend(autocontrast(image), image, factor)
def sharpness(image, factor): 'Implements Sharpness function from PIL using TF ops.' orig_im = image image = tf.cast(image, tf.float32) image = tf.expand_dims(image, 0) kernel = (tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13.0) kernel = tf.tile(kernel, [1, 1, 3, 1]) strides = [1, 1, 1, 1] degenerate = tf.nn.depthwise_conv2d(image, kernel, strides, padding='VALID', dilations=[1, 1]) degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0]) mask = tf.ones_like(degenerate) padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im) return blend(result, orig_im, factor)
-2,921,100,399,587,338,000
Implements Sharpness function from PIL using TF ops.
third_party/augment_ops.py
sharpness
google-research/crest
python
def sharpness(image, factor): orig_im = image image = tf.cast(image, tf.float32) image = tf.expand_dims(image, 0) kernel = (tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13.0) kernel = tf.tile(kernel, [1, 1, 3, 1]) strides = [1, 1, 1, 1] degenerate = tf.nn.depthwise_conv2d(image, kernel, strides, padding='VALID', dilations=[1, 1]) degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0]) mask = tf.ones_like(degenerate) padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im) return blend(result, orig_im, factor)
def equalize(image): 'Implements Equalize function from PIL using TF ops.' def scale_channel(im, c): 'Scale the data in the channel to implement equalize.' im = tf.cast(im[:, :, c], tf.int32) histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [(- 1)]) step = ((tf.reduce_sum(nonzero_histo) - nonzero_histo[(- 1)]) // 255) def build_lut(histo, step): lut = ((tf.cumsum(histo) + (step // 2)) // step) lut = tf.concat([[0], lut[:(- 1)]], 0) return tf.clip_by_value(lut, 0, 255) result = tf.cond(tf.equal(step, 0), (lambda : im), (lambda : tf.gather(build_lut(histo, step), im))) return tf.cast(result, tf.uint8) s1 = scale_channel(image, 0) s2 = scale_channel(image, 1) s3 = scale_channel(image, 2) image = tf.stack([s1, s2, s3], 2) return image
-4,360,158,787,895,066,600
Implements Equalize function from PIL using TF ops.
third_party/augment_ops.py
equalize
google-research/crest
python
def equalize(image): def scale_channel(im, c): 'Scale the data in the channel to implement equalize.' im = tf.cast(im[:, :, c], tf.int32) histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [(- 1)]) step = ((tf.reduce_sum(nonzero_histo) - nonzero_histo[(- 1)]) // 255) def build_lut(histo, step): lut = ((tf.cumsum(histo) + (step // 2)) // step) lut = tf.concat([[0], lut[:(- 1)]], 0) return tf.clip_by_value(lut, 0, 255) result = tf.cond(tf.equal(step, 0), (lambda : im), (lambda : tf.gather(build_lut(histo, step), im))) return tf.cast(result, tf.uint8) s1 = scale_channel(image, 0) s2 = scale_channel(image, 1) s3 = scale_channel(image, 2) image = tf.stack([s1, s2, s3], 2) return image
def equalize_blend(image, factor): 'Implements blend of equalize with original image.' return blend(equalize(image), image, factor)
-6,565,587,435,892,967,000
Implements blend of equalize with original image.
third_party/augment_ops.py
equalize_blend
google-research/crest
python
def equalize_blend(image, factor): return blend(equalize(image), image, factor)
def blur(image, factor): 'Blur with the same kernel as ImageFilter.BLUR.' blur_kernel = (tf.constant([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=tf.float32, shape=[5, 5, 1, 1]) / 16.0) blurred_im = _convolve_image_with_kernel(image, blur_kernel) return blend(image, blurred_im, factor)
1,411,185,370,369,190,700
Blur with the same kernel as ImageFilter.BLUR.
third_party/augment_ops.py
blur
google-research/crest
python
def blur(image, factor): blur_kernel = (tf.constant([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0]], dtype=tf.float32, shape=[5, 5, 1, 1]) / 16.0) blurred_im = _convolve_image_with_kernel(image, blur_kernel) return blend(image, blurred_im, factor)
def smooth(image, factor): 'Smooth with the same kernel as ImageFilter.SMOOTH.' smooth_kernel = (tf.constant([[1.0, 1.0, 1.0], [1.0, 5.0, 1.0], [1.0, 1.0, 1.0]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13.0) smoothed_im = _convolve_image_with_kernel(image, smooth_kernel) return blend(image, smoothed_im, factor)
-3,157,272,762,969,639,400
Smooth with the same kernel as ImageFilter.SMOOTH.
third_party/augment_ops.py
smooth
google-research/crest
python
def smooth(image, factor): smooth_kernel = (tf.constant([[1.0, 1.0, 1.0], [1.0, 5.0, 1.0], [1.0, 1.0, 1.0]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13.0) smoothed_im = _convolve_image_with_kernel(image, smooth_kernel) return blend(image, smoothed_im, factor)
def rescale(image, level): 'Rescales image and enlarged cornet.' size = image.shape[:2] scale = (level * 0.25) scale_height = tf.cast((scale * size[0]), tf.int32) scale_width = tf.cast((scale * size[1]), tf.int32) cropped_image = tf.image.crop_to_bounding_box(image, offset_height=scale_height, offset_width=scale_width, target_height=(size[0] - scale_height), target_width=(size[1] - scale_width)) rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC) return tf.saturate_cast(rescaled, tf.uint8)
6,311,872,618,237,034,000
Rescales image and enlarged cornet.
third_party/augment_ops.py
rescale
google-research/crest
python
def rescale(image, level): size = image.shape[:2] scale = (level * 0.25) scale_height = tf.cast((scale * size[0]), tf.int32) scale_width = tf.cast((scale * size[1]), tf.int32) cropped_image = tf.image.crop_to_bounding_box(image, offset_height=scale_height, offset_width=scale_width, target_height=(size[0] - scale_height), target_width=(size[1] - scale_width)) rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC) return tf.saturate_cast(rescaled, tf.uint8)
def scale_channel(channel): 'Scale the 2D image using the autocontrast rule.' lo = tf.cast(tf.reduce_min(channel), tf.float32) hi = tf.cast(tf.reduce_max(channel), tf.float32) def scale_values(im): scale = (255.0 / (hi - lo)) offset = ((- lo) * scale) im = ((tf.cast(im, tf.float32) * scale) + offset) return tf.saturate_cast(im, tf.uint8) result = tf.cond((hi > lo), (lambda : scale_values(channel)), (lambda : channel)) return result
899,198,447,933,424,800
Scale the 2D image using the autocontrast rule.
third_party/augment_ops.py
scale_channel
google-research/crest
python
def scale_channel(channel): lo = tf.cast(tf.reduce_min(channel), tf.float32) hi = tf.cast(tf.reduce_max(channel), tf.float32) def scale_values(im): scale = (255.0 / (hi - lo)) offset = ((- lo) * scale) im = ((tf.cast(im, tf.float32) * scale) + offset) return tf.saturate_cast(im, tf.uint8) result = tf.cond((hi > lo), (lambda : scale_values(channel)), (lambda : channel)) return result
def scale_channel(im, c): 'Scale the data in the channel to implement equalize.' im = tf.cast(im[:, :, c], tf.int32) histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [(- 1)]) step = ((tf.reduce_sum(nonzero_histo) - nonzero_histo[(- 1)]) // 255) def build_lut(histo, step): lut = ((tf.cumsum(histo) + (step // 2)) // step) lut = tf.concat([[0], lut[:(- 1)]], 0) return tf.clip_by_value(lut, 0, 255) result = tf.cond(tf.equal(step, 0), (lambda : im), (lambda : tf.gather(build_lut(histo, step), im))) return tf.cast(result, tf.uint8)
6,353,019,026,156,998,000
Scale the data in the channel to implement equalize.
third_party/augment_ops.py
scale_channel
google-research/crest
python
def scale_channel(im, c): im = tf.cast(im[:, :, c], tf.int32) histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [(- 1)]) step = ((tf.reduce_sum(nonzero_histo) - nonzero_histo[(- 1)]) // 255) def build_lut(histo, step): lut = ((tf.cumsum(histo) + (step // 2)) // step) lut = tf.concat([[0], lut[:(- 1)]], 0) return tf.clip_by_value(lut, 0, 255) result = tf.cond(tf.equal(step, 0), (lambda : im), (lambda : tf.gather(build_lut(histo, step), im))) return tf.cast(result, tf.uint8)
def get_author_name(soup): "Get the author's name from its main page.\n\n Args:\n soup (bs4.element.Tag): connection to the author page.\n\n Returns:\n string: name of the author.\n\n Examples::\n >>> from scrapereads import connect\n >>> url = 'https://www.goodreads.com/author/show/1077326'\n >>> soup = connect(url)\n >>> get_author_name(soup)\n J.K. Rowling\n\n " author_h1 = soup.find('h1', attrs={'class': 'authorName'}) return author_h1.find('span').text
-2,567,605,535,743,929,300
Get the author's name from its main page. Args: soup (bs4.element.Tag): connection to the author page. Returns: string: name of the author. Examples:: >>> from scrapereads import connect >>> url = 'https://www.goodreads.com/author/show/1077326' >>> soup = connect(url) >>> get_author_name(soup) J.K. Rowling
scrapereads/scrape.py
get_author_name
arthurdjn/scrape-goodreads
python
def get_author_name(soup): "Get the author's name from its main page.\n\n Args:\n soup (bs4.element.Tag): connection to the author page.\n\n Returns:\n string: name of the author.\n\n Examples::\n >>> from scrapereads import connect\n >>> url = 'https://www.goodreads.com/author/show/1077326'\n >>> soup = connect(url)\n >>> get_author_name(soup)\n J.K. Rowling\n\n " author_h1 = soup.find('h1', attrs={'class': 'authorName'}) return author_h1.find('span').text
def get_author_desc(soup): "Get the author description / biography.\n\n Args:\n soup (bs4.element.Tag): connection to the author page.\n\n Returns:\n str: long description of the author.\n\n Examples::\n >>> from scrapereads import connect\n >>> url = 'https://www.goodreads.com/author/show/1077326'\n >>> soup = connect(url)\n >>> get_author_desc(soup)\n See also: Robert Galbraith\n Although she writes under the pen name J.K. Rowling, pronounced like rolling,\n her name when her first Harry Potter book was published was simply Joanne Rowling.\n ...\n\n " author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'}) author_info_long = author_info_desc.findAll('span')[(- 1)] long_desc = '' for sentence in author_info_long.children: if isinstance(sentence, bs4.element.Tag): if (sentence.name == 'br'): long_desc += '\n' else: long_desc += sentence.text else: long_desc += sentence long_desc = long_desc.replace('’', "'") return long_desc
-7,998,133,341,309,047,000
Get the author description / biography. Args: soup (bs4.element.Tag): connection to the author page. Returns: str: long description of the author. Examples:: >>> from scrapereads import connect >>> url = 'https://www.goodreads.com/author/show/1077326' >>> soup = connect(url) >>> get_author_desc(soup) See also: Robert Galbraith Although she writes under the pen name J.K. Rowling, pronounced like rolling, her name when her first Harry Potter book was published was simply Joanne Rowling. ...
scrapereads/scrape.py
get_author_desc
arthurdjn/scrape-goodreads
python
def get_author_desc(soup): "Get the author description / biography.\n\n Args:\n soup (bs4.element.Tag): connection to the author page.\n\n Returns:\n str: long description of the author.\n\n Examples::\n >>> from scrapereads import connect\n >>> url = 'https://www.goodreads.com/author/show/1077326'\n >>> soup = connect(url)\n >>> get_author_desc(soup)\n See also: Robert Galbraith\n Although she writes under the pen name J.K. Rowling, pronounced like rolling,\n her name when her first Harry Potter book was published was simply Joanne Rowling.\n ...\n\n " author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'}) author_info_long = author_info_desc.findAll('span')[(- 1)] long_desc = for sentence in author_info_long.children: if isinstance(sentence, bs4.element.Tag): if (sentence.name == 'br'): long_desc += '\n' else: long_desc += sentence.text else: long_desc += sentence long_desc = long_desc.replace('’', "'") return long_desc
def get_author_info(soup): 'Get all information from an author (genres, influences, website etc.).\n\n Args:\n soup (bs4.element.Tag): author page connection.\n\n Returns:\n dict\n\n ' container = soup.find('div', attrs={'class': 'rightContainer'}) author_info = {} data_div = container.find('br', attrs={'class': 'clear'}) while data_div: if data_div.name: data_class = data_div.get('class')[0] if (data_class == 'aboutAuthorInfo'): break elif (data_class == 'dataTitle'): key = data_div.text.strip() author_info[key] = [] if (data_div.text == 'Born'): data_div = data_div.next_sibling author_info[key].append(data_div.strip()) elif (data_div.text == 'Influences'): data_div = data_div.next_sibling.next_sibling data_items = data_div.findAll('span')[(- 1)].findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) elif (data_div.text == 'Member Since'): data_div = data_div.next_sibling.next_sibling author_info[key].append(data_div.text.strip()) else: data_items = data_div.findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) data_div = data_div.next_sibling author_info.update({'Description': get_author_desc(soup)}) return author_info
-19,904,277,778,161,580
Get all information from an author (genres, influences, website etc.). Args: soup (bs4.element.Tag): author page connection. Returns: dict
scrapereads/scrape.py
get_author_info
arthurdjn/scrape-goodreads
python
def get_author_info(soup): 'Get all information from an author (genres, influences, website etc.).\n\n Args:\n soup (bs4.element.Tag): author page connection.\n\n Returns:\n dict\n\n ' container = soup.find('div', attrs={'class': 'rightContainer'}) author_info = {} data_div = container.find('br', attrs={'class': 'clear'}) while data_div: if data_div.name: data_class = data_div.get('class')[0] if (data_class == 'aboutAuthorInfo'): break elif (data_class == 'dataTitle'): key = data_div.text.strip() author_info[key] = [] if (data_div.text == 'Born'): data_div = data_div.next_sibling author_info[key].append(data_div.strip()) elif (data_div.text == 'Influences'): data_div = data_div.next_sibling.next_sibling data_items = data_div.findAll('span')[(- 1)].findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) elif (data_div.text == 'Member Since'): data_div = data_div.next_sibling.next_sibling author_info[key].append(data_div.text.strip()) else: data_items = data_div.findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) data_div = data_div.next_sibling author_info.update({'Description': get_author_desc(soup)}) return author_info
def scrape_quotes_container(soup): 'Get the quote container from a quote page.\n\n Args:\n soup (bs4.element.Tag): connection to the quote page.\n\n Returns:\n bs4.element.Tag\n\n ' return soup.findAll('div', attrs={'class': 'quotes'})
-2,257,911,790,353,518,800
Get the quote container from a quote page. Args: soup (bs4.element.Tag): connection to the quote page. Returns: bs4.element.Tag
scrapereads/scrape.py
scrape_quotes_container
arthurdjn/scrape-goodreads
python
def scrape_quotes_container(soup): 'Get the quote container from a quote page.\n\n Args:\n soup (bs4.element.Tag): connection to the quote page.\n\n Returns:\n bs4.element.Tag\n\n ' return soup.findAll('div', attrs={'class': 'quotes'})
def scrape_quotes(soup): 'Retrieve all ``<div>`` quote element from a quote page.\n\n Args:\n soup (bs4.element.Tag): connection to the quote page.\n\n Returns:\n yield bs4.element.Tag\n\n ' for container_div in scrape_quotes_container(soup): quote_div = container_div.find('div', attrs={'class': 'quote'}) while quote_div: if ((quote_div.name == 'div') and quote_div.get('class') and ('quote' in quote_div.get('class'))): (yield quote_div) quote_div = quote_div.next_sibling
3,836,355,887,043,983,400
Retrieve all ``<div>`` quote element from a quote page. Args: soup (bs4.element.Tag): connection to the quote page. Returns: yield bs4.element.Tag
scrapereads/scrape.py
scrape_quotes
arthurdjn/scrape-goodreads
python
def scrape_quotes(soup): 'Retrieve all ``<div>`` quote element from a quote page.\n\n Args:\n soup (bs4.element.Tag): connection to the quote page.\n\n Returns:\n yield bs4.element.Tag\n\n ' for container_div in scrape_quotes_container(soup): quote_div = container_div.find('div', attrs={'class': 'quote'}) while quote_div: if ((quote_div.name == 'div') and quote_div.get('class') and ('quote' in quote_div.get('class'))): (yield quote_div) quote_div = quote_div.next_sibling
def get_quote_text(quote_div): 'Get the text from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text.\n\n Returns:\n string\n\n ' quote_text = '' text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children for text in text_iterator: if (text.name == 'br'): quote_text += '\n' elif (not text.name): quote_text += text.strip() quote_text = process_quote_text(quote_text) return quote_text
1,677,841,520,926,764,300
Get the text from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text. Returns: string
scrapereads/scrape.py
get_quote_text
arthurdjn/scrape-goodreads
python
def get_quote_text(quote_div): 'Get the text from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text.\n\n Returns:\n string\n\n ' quote_text = text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children for text in text_iterator: if (text.name == 'br'): quote_text += '\n' elif (not text.name): quote_text += text.strip() quote_text = process_quote_text(quote_text) return quote_text
def scrape_quote_tags(quote_div): 'Scrape tags from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n yield ``<a>`` tags\n\n ' tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'}) if tags_container: for tag in tags_container.children: if (tag.name == 'a'): (yield tag) return None
-8,704,284,436,144,329,000
Scrape tags from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: yield ``<a>`` tags
scrapereads/scrape.py
scrape_quote_tags
arthurdjn/scrape-goodreads
python
def scrape_quote_tags(quote_div): 'Scrape tags from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n yield ``<a>`` tags\n\n ' tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'}) if tags_container: for tag in tags_container.children: if (tag.name == 'a'): (yield tag) return None
def get_quote_book(quote_div): 'Get the reference (book) from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n bs4.element.Tag\n\n ' quote_details = quote_div.find('div', attrs={'class': 'quoteText'}) return quote_details.find('a', attrs={'class': 'authorOrTitle'})
325,821,846,411,896,200
Get the reference (book) from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: bs4.element.Tag
scrapereads/scrape.py
get_quote_book
arthurdjn/scrape-goodreads
python
def get_quote_book(quote_div): 'Get the reference (book) from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n bs4.element.Tag\n\n ' quote_details = quote_div.find('div', attrs={'class': 'quoteText'}) return quote_details.find('a', attrs={'class': 'authorOrTitle'})
def get_quote_author_name(quote_div): "Get the author's name from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n string\n\n " quote_text = quote_div.find('div', attrs={'class': 'quoteText '}) author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text return remove_punctuation(author_name).title()
-7,591,282,057,785,029,000
Get the author's name from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: string
scrapereads/scrape.py
get_quote_author_name
arthurdjn/scrape-goodreads
python
def get_quote_author_name(quote_div): "Get the author's name from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n string\n\n " quote_text = quote_div.find('div', attrs={'class': 'quoteText '}) author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text return remove_punctuation(author_name).title()
def get_quote_likes(quote_div): 'Get the likes ``<a>`` tag from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n bs4.element.Tag: ``<a>`` tag for likes.\n\n ' quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'}) return quote_footer.find('a', attrs={'class': 'smallText'})
6,157,151,784,991,132,000
Get the likes ``<a>`` tag from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: bs4.element.Tag: ``<a>`` tag for likes.
scrapereads/scrape.py
get_quote_likes
arthurdjn/scrape-goodreads
python
def get_quote_likes(quote_div): 'Get the likes ``<a>`` tag from a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n bs4.element.Tag: ``<a>`` tag for likes.\n\n ' quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'}) return quote_footer.find('a', attrs={'class': 'smallText'})
def get_quote_name_id(quote_div): 'Get the name and id of a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n tuple: id and name.\n\n ' quote_href = get_quote_likes(quote_div).get('href') quote_id = quote_href.split('/')[(- 1)].split('-')[0] quote_name = '-'.join(quote_href.split('/')[(- 1)].split('-')[1:]) return (quote_id, quote_name)
-2,960,141,701,786,695,700
Get the name and id of a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: tuple: id and name.
scrapereads/scrape.py
get_quote_name_id
arthurdjn/scrape-goodreads
python
def get_quote_name_id(quote_div): 'Get the name and id of a ``<div>`` quote element.\n\n Args:\n quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.\n\n Returns:\n tuple: id and name.\n\n ' quote_href = get_quote_likes(quote_div).get('href') quote_id = quote_href.split('/')[(- 1)].split('-')[0] quote_name = '-'.join(quote_href.split('/')[(- 1)].split('-')[1:]) return (quote_id, quote_name)
def scrape_author_books(soup): "Retrieve books from an author's page.\n\n Args:\n soup (bs4.element.Tag): connection to an author books page.\n\n Returns:\n yield bs4.element.Tag: ``<tr>`` element.\n\n " table_tr = soup.find('tr') while table_tr: if (table_tr.name == 'tr'): (yield table_tr) table_tr = table_tr.next_sibling
-1,126,821,855,199,378,000
Retrieve books from an author's page. Args: soup (bs4.element.Tag): connection to an author books page. Returns: yield bs4.element.Tag: ``<tr>`` element.
scrapereads/scrape.py
scrape_author_books
arthurdjn/scrape-goodreads
python
def scrape_author_books(soup): "Retrieve books from an author's page.\n\n Args:\n soup (bs4.element.Tag): connection to an author books page.\n\n Returns:\n yield bs4.element.Tag: ``<tr>`` element.\n\n " table_tr = soup.find('tr') while table_tr: if (table_tr.name == 'tr'): (yield table_tr) table_tr = table_tr.next_sibling
def get_author_book_title(book_tr): "Get the book title ``<a>`` element from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: book title ``<a>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_title = get_author_book_title(book_tr)\n ... print(book_title.text.strip(), book_title.get('href'))\n The Bell Jar /book/show/6514.The_Bell_Jar\n Ariel /book/show/395090.Ariel\n The Collected Poems /book/show/31426.The_Collected_Poems\n The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath\n\n " return book_tr.find('a', attrs={'class': 'bookTitle'})
-1,115,418,599,705,648,800
Get the book title ``<a>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: book title ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_title = get_author_book_title(book_tr) ... print(book_title.text.strip(), book_title.get('href')) The Bell Jar /book/show/6514.The_Bell_Jar Ariel /book/show/395090.Ariel The Collected Poems /book/show/31426.The_Collected_Poems The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath
scrapereads/scrape.py
get_author_book_title
arthurdjn/scrape-goodreads
python
def get_author_book_title(book_tr): "Get the book title ``<a>`` element from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: book title ``<a>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_title = get_author_book_title(book_tr)\n ... print(book_title.text.strip(), book_title.get('href'))\n The Bell Jar /book/show/6514.The_Bell_Jar\n Ariel /book/show/395090.Ariel\n The Collected Poems /book/show/31426.The_Collected_Poems\n The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath\n\n " return book_tr.find('a', attrs={'class': 'bookTitle'})
def get_author_book_author(book_tr): "Get the author ``<a>`` element from a table ``<tr>`` element.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: author name ``<a>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_author = get_author_book_author(book_tr)\n ... print(book_author.text, book_author.get('href'))\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n\n " return book_tr.find('a', attrs={'class': 'authorName'})
4,352,936,690,821,773,300
Get the author ``<a>`` element from a table ``<tr>`` element. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: author name ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_author = get_author_book_author(book_tr) ... print(book_author.text, book_author.get('href')) Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
scrapereads/scrape.py
get_author_book_author
arthurdjn/scrape-goodreads
python
def get_author_book_author(book_tr): "Get the author ``<a>`` element from a table ``<tr>`` element.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: author name ``<a>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_author = get_author_book_author(book_tr)\n ... print(book_author.text, book_author.get('href'))\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath\n\n " return book_tr.find('a', attrs={'class': 'authorName'})
def get_author_book_ratings(book_tr): 'Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: ratings ``<span>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... ratings_span = get_author_book_ratings(book_tr)\n ... print(ratings_span.contents[-1])\n 4.55 avg rating — 2,414 ratings\n 3.77 avg rating — 1,689 ratings\n 4.28 avg rating — 892 ratings\n 4.54 avg rating — 490 ratings\n ...\n\n ' return book_tr.find('span', attrs={'class': 'minirating'})
-3,227,250,309,905,705,500
Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: ratings ``<span>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... ratings_span = get_author_book_ratings(book_tr) ... print(ratings_span.contents[-1]) 4.55 avg rating — 2,414 ratings 3.77 avg rating — 1,689 ratings 4.28 avg rating — 892 ratings 4.54 avg rating — 490 ratings ...
scrapereads/scrape.py
get_author_book_ratings
arthurdjn/scrape-goodreads
python
def get_author_book_ratings(book_tr): 'Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: ratings ``<span>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... ratings_span = get_author_book_ratings(book_tr)\n ... print(ratings_span.contents[-1])\n 4.55 avg rating — 2,414 ratings\n 3.77 avg rating — 1,689 ratings\n 4.28 avg rating — 892 ratings\n 4.54 avg rating — 490 ratings\n ...\n\n ' return book_tr.find('span', attrs={'class': 'minirating'})
def get_author_book_edition(book_tr): "Get the edition ``<a>`` element from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: book edition ``<a>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_edition = get_author_book_edition(book_tr)\n ... if book_edition:\n ... print(book_edition.text, book_edition.get('href'))\n ... print()\n 493 editions /work/editions/1385044-the-bell-jar\n 80 editions /work/editions/1185316-ariel\n 30 editions /work/editions/1003095-the-collected-poems\n 45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath\n ...\n\n " book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) return book_details.find('a', attrs={'class': 'greyText'})
8,646,870,124,449,000,000
Get the edition ``<a>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: book edition ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_edition = get_author_book_edition(book_tr) ... if book_edition: ... print(book_edition.text, book_edition.get('href')) ... print() 493 editions /work/editions/1385044-the-bell-jar 80 editions /work/editions/1185316-ariel 30 editions /work/editions/1003095-the-collected-poems 45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath ...
scrapereads/scrape.py
get_author_book_edition
arthurdjn/scrape-goodreads
python
def get_author_book_edition(book_tr): "Get the edition ``<a>`` element from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n bs4.element.Tag: book edition ``<a>`` element.\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_edition = get_author_book_edition(book_tr)\n ... if book_edition:\n ... print(book_edition.text, book_edition.get('href'))\n ... print()\n 493 editions /work/editions/1385044-the-bell-jar\n 80 editions /work/editions/1185316-ariel\n 30 editions /work/editions/1003095-the-collected-poems\n 45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath\n ...\n\n " book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) return book_details.find('a', attrs={'class': 'greyText'})
def get_author_book_date(book_tr): 'Get the published date from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n int: date of publication\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_date = get_author_book_date(book_tr)\n ... print(book_date)\n None\n None\n 1958\n 2009\n ...\n\n ' book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) book_publish = book_details.contents[(- 1)].replace('—', '').replace('\n', '') book_date = book_publish.replace('published', '').strip() book_date = (eval(book_date) if (book_date != '') else None) return book_date
5,196,297,577,235,133,000
Get the published date from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: int: date of publication Examples:: >>> for book_tr in scrape_author_books(soup): ... book_date = get_author_book_date(book_tr) ... print(book_date) None None 1958 2009 ...
scrapereads/scrape.py
get_author_book_date
arthurdjn/scrape-goodreads
python
def get_author_book_date(book_tr): 'Get the published date from a table ``<tr>`` element from an author page.\n\n Args:\n book_tr (bs4.element.Tag): ``<tr>`` book element.\n\n Returns:\n int: date of publication\n\n Examples::\n >>> for book_tr in scrape_author_books(soup):\n ... book_date = get_author_book_date(book_tr)\n ... print(book_date)\n None\n None\n 1958\n 2009\n ...\n\n ' book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) book_publish = book_details.contents[(- 1)].replace('—', ).replace('\n', ) book_date = book_publish.replace('published', ).strip() book_date = (eval(book_date) if (book_date != ) else None) return book_date
def get_book_quote_page(soup): 'Find the ``<a>`` element pointing to the quote page of a book.\n\n Args:\n soup (bs4.element.Tag):\n\n Returns:\n\n ' quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'}) if quote_div: return quote_div[(- 1)].find('a') return None
-3,858,875,114,353,949,700
Find the ``<a>`` element pointing to the quote page of a book. Args: soup (bs4.element.Tag): Returns:
scrapereads/scrape.py
get_book_quote_page
arthurdjn/scrape-goodreads
python
def get_book_quote_page(soup): 'Find the ``<a>`` element pointing to the quote page of a book.\n\n Args:\n soup (bs4.element.Tag):\n\n Returns:\n\n ' quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'}) if quote_div: return quote_div[(- 1)].find('a') return None
def find_divisors(x): '\n This is the "function to find divisors in order to find generators" module.\n This DocTest verifies that the module is correctly calculating all divisors\n of a number x.\n\n >>> find_divisors(10)\n [1, 2, 5, 10]\n\n >>> find_divisors(112)\n [1, 2, 4, 7, 8, 14, 16, 28, 56, 112]\n ' divisors = [i for i in range(1, (x + 1)) if ((x % i) == 0)] return divisors
-782,998,713,240,490,500
This is the "function to find divisors in order to find generators" module. This DocTest verifies that the module is correctly calculating all divisors of a number x. >>> find_divisors(10) [1, 2, 5, 10] >>> find_divisors(112) [1, 2, 4, 7, 8, 14, 16, 28, 56, 112]
libsig/FZZ_unique_ring_signature.py
find_divisors
vs-uulm/libsig_pets
python
def find_divisors(x): '\n This is the "function to find divisors in order to find generators" module.\n This DocTest verifies that the module is correctly calculating all divisors\n of a number x.\n\n >>> find_divisors(10)\n [1, 2, 5, 10]\n\n >>> find_divisors(112)\n [1, 2, 4, 7, 8, 14, 16, 28, 56, 112]\n ' divisors = [i for i in range(1, (x + 1)) if ((x % i) == 0)] return divisors
def find_generator(p): '\n The order of any element in a group can be divided by p-1.\n Step 1: Calculate all Divisors.\n Step 2: Test for a random element e of G wether e to the power of a Divisor is 1.\n if neither is one but e to the power of p-1, a generator is found.\n ' testGen = randint(1, p) listTested = [] listTested.append(testGen) divisors = find_divisors(p) while (len(listTested) < (p - 1)): if (testGen in listTested): for div in divisors: testPotency = (math.pow(testGen, div) % (p + 1)) if ((testPotency == 1.0) and (div != divisors[(- 1)])): break elif ((testPotency == 1.0) and (div == divisors[(- 1)])): return testGen testGen = randint(1, p) listTested.append(testGen)
3,486,228,835,377,068,500
The order of any element in a group can be divided by p-1. Step 1: Calculate all Divisors. Step 2: Test for a random element e of G wether e to the power of a Divisor is 1. if neither is one but e to the power of p-1, a generator is found.
libsig/FZZ_unique_ring_signature.py
find_generator
vs-uulm/libsig_pets
python
def find_generator(p): '\n The order of any element in a group can be divided by p-1.\n Step 1: Calculate all Divisors.\n Step 2: Test for a random element e of G wether e to the power of a Divisor is 1.\n if neither is one but e to the power of p-1, a generator is found.\n ' testGen = randint(1, p) listTested = [] listTested.append(testGen) divisors = find_divisors(p) while (len(listTested) < (p - 1)): if (testGen in listTested): for div in divisors: testPotency = (math.pow(testGen, div) % (p + 1)) if ((testPotency == 1.0) and (div != divisors[(- 1)])): break elif ((testPotency == 1.0) and (div == divisors[(- 1)])): return testGen testGen = randint(1, p) listTested.append(testGen)
def list_to_string(input_list): '\n convert a list into a concatenated string of all its elements\n ' result = ''.join(map(str, input_list)) return result
3,923,680,371,048,306,000
convert a list into a concatenated string of all its elements
libsig/FZZ_unique_ring_signature.py
list_to_string
vs-uulm/libsig_pets
python
def list_to_string(input_list): '\n \n ' result = .join(map(str, input_list)) return result
@staticmethod def ringsign(x, pubkey, message, verbose=False): '\n input: x is the privkey from user i, \n | all public keys: pubkeys,\n | the message\n \n output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn),\n | R: all the pubkeys concatenated,\n | cj,tj: random number within Zq\n ' R = list_to_string(pubkey) g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 mR = (message + str(R)) C = list() T = list() A = list() B = list() ri = (- 1) for i in pubkey: a = 0 b = 0 c = 0 t = 0 if (pow(g, x, q) != i): (c, t) = (randint(1, q), randint(1, q)) a = ((pow(g, t) * pow(int(i), c)) % q) b = ((pow(h1(mR), t) * pow(pow(h1(mR), x), c)) % q) else: ri = randint(1, q) a = pow(g, ri, q) b = pow(h1(mR), ri, q) c = (- 1) t = (- 1) A.append(a) B.append(b) C.append(c) T.append(t) cj = 0 ab = ''.join(('{}{}'.format(*t) for t in zip(A, B))) usernr = 0 for i in range(len(pubkey)): if (pubkey[i] != pow(g, x, q)): cj = ((cj + C[i]) % q) else: usernr = i ci = (h2(((message + R) + ab)) - (cj % (q - 1))) C[usernr] = ci ti = ((ri - (C[usernr] * x)) % (q - 1)) if (ti < 0): ti = ((q - 1) + ti) T[usernr] = ti ct = ','.join(('{},{}'.format(*t) for t in zip(C, T))) result = ((((((R + ',') + message) + ',') + str(pow(h1(mR), x, q))) + ',') + ct) if (verbose == True): print(('RingSign Result: ' + result)) print('---- RingSign Completed ---- \n') return result
-5,456,669,802,386,871,000
input: x is the privkey from user i, | all public keys: pubkeys, | the message output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn), | R: all the pubkeys concatenated, | cj,tj: random number within Zq
libsig/FZZ_unique_ring_signature.py
ringsign
vs-uulm/libsig_pets
python
@staticmethod def ringsign(x, pubkey, message, verbose=False): '\n input: x is the privkey from user i, \n | all public keys: pubkeys,\n | the message\n \n output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn),\n | R: all the pubkeys concatenated,\n | cj,tj: random number within Zq\n ' R = list_to_string(pubkey) g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 mR = (message + str(R)) C = list() T = list() A = list() B = list() ri = (- 1) for i in pubkey: a = 0 b = 0 c = 0 t = 0 if (pow(g, x, q) != i): (c, t) = (randint(1, q), randint(1, q)) a = ((pow(g, t) * pow(int(i), c)) % q) b = ((pow(h1(mR), t) * pow(pow(h1(mR), x), c)) % q) else: ri = randint(1, q) a = pow(g, ri, q) b = pow(h1(mR), ri, q) c = (- 1) t = (- 1) A.append(a) B.append(b) C.append(c) T.append(t) cj = 0 ab = .join(('{}{}'.format(*t) for t in zip(A, B))) usernr = 0 for i in range(len(pubkey)): if (pubkey[i] != pow(g, x, q)): cj = ((cj + C[i]) % q) else: usernr = i ci = (h2(((message + R) + ab)) - (cj % (q - 1))) C[usernr] = ci ti = ((ri - (C[usernr] * x)) % (q - 1)) if (ti < 0): ti = ((q - 1) + ti) T[usernr] = ti ct = ','.join(('{},{}'.format(*t) for t in zip(C, T))) result = ((((((R + ',') + message) + ',') + str(pow(h1(mR), x, q))) + ',') + ct) if (verbose == True): print(('RingSign Result: ' + result)) print('---- RingSign Completed ---- \n') return result
@staticmethod def verify(R, message, signature, verbose=False): '\n Input: the public keys R\n | the message\n | the signature computed with ringsign\n\n Output: whether the message was signed by R or not\n ' g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 parsed = signature.split(',') tt = int(parsed[2]) cjs = list() tjs = list() for i in range(0, int(((len(parsed) / 2) - 1))): cjs.append(int(parsed[(3 + (2 * i))])) tjs.append(int(parsed[(4 + (2 * i))])) mR = list_to_string(R) val1 = (sum(cjs) % q) gyh1 = '' for i in range(len(tjs)): if (tjs[i] < 0): tjs[i] = ((q - 1) + tjs[i]) if (cjs[i] < 0): cjs[i] = ((q - 1) + cjs[i]) gy = ((pow(g, tjs[i], q) * pow(R[i], cjs[i], q)) % q) h = ((pow(int(h1((message + mR))), int(tjs[i])) * pow(tt, int(cjs[i]))) % q) gyh1 = ((gyh1 + str(gy)) + str(h)) val2 = str(h2(((message + list_to_string(R)) + gyh1))) if (int(val1) == int(val2)): if (verbose == True): print('Signature is valid!\n') print(('Common Result: ' + str(val1))) print('---- Validation Completed ---- \n') return True else: if (verbose == True): print('Signature is not valid!\n') print(((str(val1) + ' != ') + str(val2))) print('---- Validation Completed ---- \n') return False
-5,893,289,199,147,430,000
Input: the public keys R | the message | the signature computed with ringsign Output: whether the message was signed by R or not
libsig/FZZ_unique_ring_signature.py
verify
vs-uulm/libsig_pets
python
@staticmethod def verify(R, message, signature, verbose=False): '\n Input: the public keys R\n | the message\n | the signature computed with ringsign\n\n Output: whether the message was signed by R or not\n ' g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 parsed = signature.split(',') tt = int(parsed[2]) cjs = list() tjs = list() for i in range(0, int(((len(parsed) / 2) - 1))): cjs.append(int(parsed[(3 + (2 * i))])) tjs.append(int(parsed[(4 + (2 * i))])) mR = list_to_string(R) val1 = (sum(cjs) % q) gyh1 = for i in range(len(tjs)): if (tjs[i] < 0): tjs[i] = ((q - 1) + tjs[i]) if (cjs[i] < 0): cjs[i] = ((q - 1) + cjs[i]) gy = ((pow(g, tjs[i], q) * pow(R[i], cjs[i], q)) % q) h = ((pow(int(h1((message + mR))), int(tjs[i])) * pow(tt, int(cjs[i]))) % q) gyh1 = ((gyh1 + str(gy)) + str(h)) val2 = str(h2(((message + list_to_string(R)) + gyh1))) if (int(val1) == int(val2)): if (verbose == True): print('Signature is valid!\n') print(('Common Result: ' + str(val1))) print('---- Validation Completed ---- \n') return True else: if (verbose == True): print('Signature is not valid!\n') print(((str(val1) + ' != ') + str(val2))) print('---- Validation Completed ---- \n') return False
def current_flow_betweenness_centrality_subset(G, sources, targets, normalized=True, weight='weight', dtype=float, solver='lu'): 'Compute current-flow betweenness centrality for subsets of nodes.\n\n Current-flow betweenness centrality uses an electrical current\n model for information spreading in contrast to betweenness\n centrality which uses shortest paths.\n\n Current-flow betweenness centrality is also known as\n random-walk betweenness centrality [2]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph \n\n sources: list of nodes\n Nodes to use as sources for current\n\n targets: list of nodes\n Nodes to use as sinks for current\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by b=b/(n-1)(n-2) where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=\'weight\')\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n\n dtype: data type (float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver: string (default=\'lu\')\n Type of linear solver to use for computing the flow matrix.\n Options are "full" (uses most memory), "lu" (recommended), and \n "cg" (uses least memory).\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with betweenness centrality as the value.\n \n See Also\n --------\n approximate_current_flow_betweenness_centrality\n betweenness_centrality\n edge_betweenness_centrality\n edge_current_flow_betweenness_centrality\n\n Notes\n -----\n Current-flow betweenness can be computed in `O(I(n-1)+mn \\log n)`\n time [1]_, where `I(n-1)` is the time needed to compute the \n inverse Laplacian. For a full matrix this is `O(n^3)` but using\n sparse methods you can achieve `O(nm{\\sqrt k})` where `k` is the\n Laplacian matrix condition number. \n\n The space required is `O(nw) where `w` is the width of the sparse\n Laplacian matrix. Worse case is `w=n` for `O(n^2)`.\n\n If the edges have a \'weight\' attribute they will be used as \n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Centrality Measures Based on Current Flow. \n Ulrik Brandes and Daniel Fleischer,\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS \'05). \n LNCS 3404, pp. 533-544. Springer-Verlag, 2005. \n http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf\n\n .. [2] A measure of betweenness centrality based on random walks,\n M. E. J. Newman, Social Networks 27, 39-54 (2005).\n ' from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('current_flow_betweenness_centrality() ', 'not defined for digraphs.') if (not nx.is_connected(G)): raise nx.NetworkXError('Graph not connected.') n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) mapping = dict(zip(ordering, range(n))) H = nx.relabel_nodes(G, mapping) betweenness = dict.fromkeys(H, 0.0) for (row, (s, t)) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i = mapping[ss] for tt in targets: j = mapping[tt] betweenness[s] += (0.5 * np.abs((row[i] - row[j]))) betweenness[t] += (0.5 * np.abs((row[i] - row[j]))) if normalized: nb = ((n - 1.0) * (n - 2.0)) else: nb = 2.0 for v in H: betweenness[v] = ((betweenness[v] / nb) + (1.0 / (2 - n))) return dict(((ordering[k], v) for (k, v) in betweenness.items()))
8,374,067,259,139,407,000
Compute current-flow betweenness centrality for subsets of nodes. Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. Current-flow betweenness centrality is also known as random-walk betweenness centrality [2]_. Parameters ---------- G : graph A NetworkX graph sources: list of nodes Nodes to use as sources for current targets: list of nodes Nodes to use as sinks for current normalized : bool, optional (default=True) If True the betweenness values are normalized by b=b/(n-1)(n-2) where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). Returns ------- nodes : dictionary Dictionary of nodes with betweenness centrality as the value. See Also -------- approximate_current_flow_betweenness_centrality betweenness_centrality edge_betweenness_centrality edge_current_flow_betweenness_centrality Notes ----- Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)` time [1]_, where `I(n-1)` is the time needed to compute the inverse Laplacian. For a full matrix this is `O(n^3)` but using sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the Laplacian matrix condition number. The space required is `O(nw) where `w` is the width of the sparse Laplacian matrix. Worse case is `w=n` for `O(n^2)`. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Centrality Measures Based on Current Flow. Ulrik Brandes and Daniel Fleischer, Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf .. [2] A measure of betweenness centrality based on random walks, M. E. J. Newman, Social Networks 27, 39-54 (2005).
networkx/algorithms/centrality/current_flow_betweenness_subset.py
current_flow_betweenness_centrality_subset
AllenDowney/networkx
python
def current_flow_betweenness_centrality_subset(G, sources, targets, normalized=True, weight='weight', dtype=float, solver='lu'): 'Compute current-flow betweenness centrality for subsets of nodes.\n\n Current-flow betweenness centrality uses an electrical current\n model for information spreading in contrast to betweenness\n centrality which uses shortest paths.\n\n Current-flow betweenness centrality is also known as\n random-walk betweenness centrality [2]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph \n\n sources: list of nodes\n Nodes to use as sources for current\n\n targets: list of nodes\n Nodes to use as sinks for current\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by b=b/(n-1)(n-2) where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=\'weight\')\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n\n dtype: data type (float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver: string (default=\'lu\')\n Type of linear solver to use for computing the flow matrix.\n Options are "full" (uses most memory), "lu" (recommended), and \n "cg" (uses least memory).\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with betweenness centrality as the value.\n \n See Also\n --------\n approximate_current_flow_betweenness_centrality\n betweenness_centrality\n edge_betweenness_centrality\n edge_current_flow_betweenness_centrality\n\n Notes\n -----\n Current-flow betweenness can be computed in `O(I(n-1)+mn \\log n)`\n time [1]_, where `I(n-1)` is the time needed to compute the \n inverse Laplacian. For a full matrix this is `O(n^3)` but using\n sparse methods you can achieve `O(nm{\\sqrt k})` where `k` is the\n Laplacian matrix condition number. \n\n The space required is `O(nw) where `w` is the width of the sparse\n Laplacian matrix. Worse case is `w=n` for `O(n^2)`.\n\n If the edges have a \'weight\' attribute they will be used as \n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Centrality Measures Based on Current Flow. \n Ulrik Brandes and Daniel Fleischer,\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS \'05). \n LNCS 3404, pp. 533-544. Springer-Verlag, 2005. \n http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf\n\n .. [2] A measure of betweenness centrality based on random walks,\n M. E. J. Newman, Social Networks 27, 39-54 (2005).\n ' from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('current_flow_betweenness_centrality() ', 'not defined for digraphs.') if (not nx.is_connected(G)): raise nx.NetworkXError('Graph not connected.') n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) mapping = dict(zip(ordering, range(n))) H = nx.relabel_nodes(G, mapping) betweenness = dict.fromkeys(H, 0.0) for (row, (s, t)) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i = mapping[ss] for tt in targets: j = mapping[tt] betweenness[s] += (0.5 * np.abs((row[i] - row[j]))) betweenness[t] += (0.5 * np.abs((row[i] - row[j]))) if normalized: nb = ((n - 1.0) * (n - 2.0)) else: nb = 2.0 for v in H: betweenness[v] = ((betweenness[v] / nb) + (1.0 / (2 - n))) return dict(((ordering[k], v) for (k, v) in betweenness.items()))
def edge_current_flow_betweenness_centrality_subset(G, sources, targets, normalized=True, weight='weight', dtype=float, solver='lu'): 'Compute current-flow betweenness centrality for edges using subsets \n of nodes.\n\n Current-flow betweenness centrality uses an electrical current\n model for information spreading in contrast to betweenness\n centrality which uses shortest paths.\n\n Current-flow betweenness centrality is also known as\n random-walk betweenness centrality [2]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph \n\n sources: list of nodes\n Nodes to use as sources for current\n\n targets: list of nodes\n Nodes to use as sinks for current\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by b=b/(n-1)(n-2) where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=\'weight\')\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n\n dtype: data type (float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver: string (default=\'lu\')\n Type of linear solver to use for computing the flow matrix.\n Options are "full" (uses most memory), "lu" (recommended), and \n "cg" (uses least memory).\n\n Returns\n -------\n nodes : dictionary\n Dictionary of edge tuples with betweenness centrality as the value.\n \n See Also\n --------\n betweenness_centrality\n edge_betweenness_centrality\n current_flow_betweenness_centrality\n\n Notes\n -----\n Current-flow betweenness can be computed in `O(I(n-1)+mn \\log n)`\n time [1]_, where `I(n-1)` is the time needed to compute the \n inverse Laplacian. For a full matrix this is `O(n^3)` but using\n sparse methods you can achieve `O(nm{\\sqrt k})` where `k` is the\n Laplacian matrix condition number. \n\n The space required is `O(nw) where `w` is the width of the sparse\n Laplacian matrix. Worse case is `w=n` for `O(n^2)`.\n\n If the edges have a \'weight\' attribute they will be used as \n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Centrality Measures Based on Current Flow. \n Ulrik Brandes and Daniel Fleischer,\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS \'05). \n LNCS 3404, pp. 533-544. Springer-Verlag, 2005. \n http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf\n\n .. [2] A measure of betweenness centrality based on random walks, \n M. E. J. Newman, Social Networks 27, 39-54 (2005).\n ' from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('edge_current_flow_betweenness_centrality ', 'not defined for digraphs.') if (not nx.is_connected(G)): raise nx.NetworkXError('Graph not connected.') n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) mapping = dict(zip(ordering, range(n))) H = nx.relabel_nodes(G, mapping) betweenness = dict.fromkeys(H.edges(), 0.0) if normalized: nb = ((n - 1.0) * (n - 2.0)) else: nb = 2.0 for (row, e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i = mapping[ss] for tt in targets: j = mapping[tt] betweenness[e] += (0.5 * np.abs((row[i] - row[j]))) betweenness[e] /= nb return dict((((ordering[s], ordering[t]), v) for ((s, t), v) in betweenness.items()))
-270,612,836,393,175,170
Compute current-flow betweenness centrality for edges using subsets of nodes. Current-flow betweenness centrality uses an electrical current model for information spreading in contrast to betweenness centrality which uses shortest paths. Current-flow betweenness centrality is also known as random-walk betweenness centrality [2]_. Parameters ---------- G : graph A NetworkX graph sources: list of nodes Nodes to use as sources for current targets: list of nodes Nodes to use as sinks for current normalized : bool, optional (default=True) If True the betweenness values are normalized by b=b/(n-1)(n-2) where n is the number of nodes in G. weight : string or None, optional (default='weight') Key for edge data used as the edge weight. If None, then use 1 as each edge weight. dtype: data type (float) Default data type for internal matrices. Set to np.float32 for lower memory consumption. solver: string (default='lu') Type of linear solver to use for computing the flow matrix. Options are "full" (uses most memory), "lu" (recommended), and "cg" (uses least memory). Returns ------- nodes : dictionary Dictionary of edge tuples with betweenness centrality as the value. See Also -------- betweenness_centrality edge_betweenness_centrality current_flow_betweenness_centrality Notes ----- Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)` time [1]_, where `I(n-1)` is the time needed to compute the inverse Laplacian. For a full matrix this is `O(n^3)` but using sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the Laplacian matrix condition number. The space required is `O(nw) where `w` is the width of the sparse Laplacian matrix. Worse case is `w=n` for `O(n^2)`. If the edges have a 'weight' attribute they will be used as weights in this algorithm. Unspecified weights are set to 1. References ---------- .. [1] Centrality Measures Based on Current Flow. Ulrik Brandes and Daniel Fleischer, Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). LNCS 3404, pp. 533-544. Springer-Verlag, 2005. http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf .. [2] A measure of betweenness centrality based on random walks, M. E. J. Newman, Social Networks 27, 39-54 (2005).
networkx/algorithms/centrality/current_flow_betweenness_subset.py
edge_current_flow_betweenness_centrality_subset
AllenDowney/networkx
python
def edge_current_flow_betweenness_centrality_subset(G, sources, targets, normalized=True, weight='weight', dtype=float, solver='lu'): 'Compute current-flow betweenness centrality for edges using subsets \n of nodes.\n\n Current-flow betweenness centrality uses an electrical current\n model for information spreading in contrast to betweenness\n centrality which uses shortest paths.\n\n Current-flow betweenness centrality is also known as\n random-walk betweenness centrality [2]_.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph \n\n sources: list of nodes\n Nodes to use as sources for current\n\n targets: list of nodes\n Nodes to use as sinks for current\n\n normalized : bool, optional (default=True)\n If True the betweenness values are normalized by b=b/(n-1)(n-2) where\n n is the number of nodes in G.\n\n weight : string or None, optional (default=\'weight\')\n Key for edge data used as the edge weight.\n If None, then use 1 as each edge weight.\n\n dtype: data type (float)\n Default data type for internal matrices.\n Set to np.float32 for lower memory consumption.\n\n solver: string (default=\'lu\')\n Type of linear solver to use for computing the flow matrix.\n Options are "full" (uses most memory), "lu" (recommended), and \n "cg" (uses least memory).\n\n Returns\n -------\n nodes : dictionary\n Dictionary of edge tuples with betweenness centrality as the value.\n \n See Also\n --------\n betweenness_centrality\n edge_betweenness_centrality\n current_flow_betweenness_centrality\n\n Notes\n -----\n Current-flow betweenness can be computed in `O(I(n-1)+mn \\log n)`\n time [1]_, where `I(n-1)` is the time needed to compute the \n inverse Laplacian. For a full matrix this is `O(n^3)` but using\n sparse methods you can achieve `O(nm{\\sqrt k})` where `k` is the\n Laplacian matrix condition number. \n\n The space required is `O(nw) where `w` is the width of the sparse\n Laplacian matrix. Worse case is `w=n` for `O(n^2)`.\n\n If the edges have a \'weight\' attribute they will be used as \n weights in this algorithm. Unspecified weights are set to 1.\n\n References\n ----------\n .. [1] Centrality Measures Based on Current Flow. \n Ulrik Brandes and Daniel Fleischer,\n Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS \'05). \n LNCS 3404, pp. 533-544. Springer-Verlag, 2005. \n http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf\n\n .. [2] A measure of betweenness centrality based on random walks, \n M. E. J. Newman, Social Networks 27, 39-54 (2005).\n ' from networkx.utils import reverse_cuthill_mckee_ordering try: import numpy as np except ImportError: raise ImportError('current_flow_betweenness_centrality requires NumPy ', 'http://scipy.org/') try: import scipy except ImportError: raise ImportError('current_flow_betweenness_centrality requires SciPy ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError('edge_current_flow_betweenness_centrality ', 'not defined for digraphs.') if (not nx.is_connected(G)): raise nx.NetworkXError('Graph not connected.') n = G.number_of_nodes() ordering = list(reverse_cuthill_mckee_ordering(G)) mapping = dict(zip(ordering, range(n))) H = nx.relabel_nodes(G, mapping) betweenness = dict.fromkeys(H.edges(), 0.0) if normalized: nb = ((n - 1.0) * (n - 2.0)) else: nb = 2.0 for (row, e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): for ss in sources: i = mapping[ss] for tt in targets: j = mapping[tt] betweenness[e] += (0.5 * np.abs((row[i] - row[j]))) betweenness[e] /= nb return dict((((ordering[s], ordering[t]), v) for ((s, t), v) in betweenness.items()))
def is_inside_offset(inner, outer): 'Checks if the first offset is contained in the second offset\n\n Args:\n inner: inner offset tuple\n outer: outer offset tuple\n\n Returns: bool' return (outer[0] <= inner[0] <= inner[1] <= outer[1])
6,477,983,249,331,503,000
Checks if the first offset is contained in the second offset Args: inner: inner offset tuple outer: outer offset tuple Returns: bool
solcast/nodes.py
is_inside_offset
danhper/py-solc-ast
python
def is_inside_offset(inner, outer): 'Checks if the first offset is contained in the second offset\n\n Args:\n inner: inner offset tuple\n outer: outer offset tuple\n\n Returns: bool' return (outer[0] <= inner[0] <= inner[1] <= outer[1])
def children(self, depth=None, include_self=False, include_parents=True, include_children=True, required_offset=None, offset_limits=None, filters=None, exclude_filter=None): "Get childen nodes of this node.\n\n Arguments:\n depth: Number of levels of children to traverse. 0 returns only this node.\n include_self: Includes this node in the results.\n include_parents: Includes nodes that match in the results, when they also have\n child nodes that match.\n include_children: If True, as soon as a match is found it's children will not\n be included in the search.\n required_offset: Only match nodes with a source offset that contains this offset.\n offset_limits: Only match nodes when their source offset is contained inside\n this source offset.\n filters: Dictionary of {attribute: value} that children must match. Can also\n be given as a list of dicts, children that match one of the dicts\n will be returned.\n exclude_filter: Dictionary of {attribute:value} that children cannot match.\n\n Returns:\n List of node objects." if (filters is None): filters = {} if (exclude_filter is None): exclude_filter = {} if isinstance(filters, dict): filters = [filters] filter_fn = functools.partial(_check_filters, required_offset, offset_limits, filters, exclude_filter) find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children) result = find_fn(find_fn, depth, self) if (include_self or (not result) or (result[0] != self)): return result return result[1:]
870,645,974,598,897,000
Get childen nodes of this node. Arguments: depth: Number of levels of children to traverse. 0 returns only this node. include_self: Includes this node in the results. include_parents: Includes nodes that match in the results, when they also have child nodes that match. include_children: If True, as soon as a match is found it's children will not be included in the search. required_offset: Only match nodes with a source offset that contains this offset. offset_limits: Only match nodes when their source offset is contained inside this source offset. filters: Dictionary of {attribute: value} that children must match. Can also be given as a list of dicts, children that match one of the dicts will be returned. exclude_filter: Dictionary of {attribute:value} that children cannot match. Returns: List of node objects.
solcast/nodes.py
children
danhper/py-solc-ast
python
def children(self, depth=None, include_self=False, include_parents=True, include_children=True, required_offset=None, offset_limits=None, filters=None, exclude_filter=None): "Get childen nodes of this node.\n\n Arguments:\n depth: Number of levels of children to traverse. 0 returns only this node.\n include_self: Includes this node in the results.\n include_parents: Includes nodes that match in the results, when they also have\n child nodes that match.\n include_children: If True, as soon as a match is found it's children will not\n be included in the search.\n required_offset: Only match nodes with a source offset that contains this offset.\n offset_limits: Only match nodes when their source offset is contained inside\n this source offset.\n filters: Dictionary of {attribute: value} that children must match. Can also\n be given as a list of dicts, children that match one of the dicts\n will be returned.\n exclude_filter: Dictionary of {attribute:value} that children cannot match.\n\n Returns:\n List of node objects." if (filters is None): filters = {} if (exclude_filter is None): exclude_filter = {} if isinstance(filters, dict): filters = [filters] filter_fn = functools.partial(_check_filters, required_offset, offset_limits, filters, exclude_filter) find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children) result = find_fn(find_fn, depth, self) if (include_self or (not result) or (result[0] != self)): return result return result[1:]
def parents(self, depth=(- 1), filters=None): "Get parent nodes of this node.\n\n Arguments:\n depth: Depth limit. If given as a negative value, it will be subtracted\n from this object's depth.\n filters: Dictionary of {attribute: value} that parents must match.\n\n Returns: list of nodes" if (filters and (not isinstance(filters, dict))): raise TypeError('Filters must be a dict') if (depth < 0): depth = (self.depth + depth) if ((depth >= self.depth) or (depth < 0)): raise IndexError('Given depth exceeds node depth') node_list = [] parent = self while True: parent = parent._parent if ((not filters) or _check_filter(parent, filters, {})): node_list.append(parent) if (parent.depth == depth): return node_list
-8,765,994,480,727,989,000
Get parent nodes of this node. Arguments: depth: Depth limit. If given as a negative value, it will be subtracted from this object's depth. filters: Dictionary of {attribute: value} that parents must match. Returns: list of nodes
solcast/nodes.py
parents
danhper/py-solc-ast
python
def parents(self, depth=(- 1), filters=None): "Get parent nodes of this node.\n\n Arguments:\n depth: Depth limit. If given as a negative value, it will be subtracted\n from this object's depth.\n filters: Dictionary of {attribute: value} that parents must match.\n\n Returns: list of nodes" if (filters and (not isinstance(filters, dict))): raise TypeError('Filters must be a dict') if (depth < 0): depth = (self.depth + depth) if ((depth >= self.depth) or (depth < 0)): raise IndexError('Given depth exceeds node depth') node_list = [] parent = self while True: parent = parent._parent if ((not filters) or _check_filter(parent, filters, {})): node_list.append(parent) if (parent.depth == depth): return node_list
def parent(self, depth=(- 1), filters=None): "Get a parent node of this node.\n\n Arguments:\n depth: Depth limit. If given as a negative value, it will be subtracted\n from this object's depth. The parent at this exact depth is returned.\n filters: Dictionary of {attribute: value} that the parent must match.\n\n If a filter value is given, will return the first parent that meets the filters\n up to the given depth. If none is found, returns None.\n\n If no filter is given, returns the parent at the given depth." if (filters and (not isinstance(filters, dict))): raise TypeError('Filters must be a dict') if (depth < 0): depth = (self.depth + depth) if ((depth >= self.depth) or (depth < 0)): raise IndexError('Given depth exceeds node depth') parent = self while (parent.depth > depth): parent = parent._parent if ((parent.depth == depth) and (not filters)): return parent if (filters and _check_filter(parent, filters, {})): return parent return None
-2,546,208,489,780,907,000
Get a parent node of this node. Arguments: depth: Depth limit. If given as a negative value, it will be subtracted from this object's depth. The parent at this exact depth is returned. filters: Dictionary of {attribute: value} that the parent must match. If a filter value is given, will return the first parent that meets the filters up to the given depth. If none is found, returns None. If no filter is given, returns the parent at the given depth.
solcast/nodes.py
parent
danhper/py-solc-ast
python
def parent(self, depth=(- 1), filters=None): "Get a parent node of this node.\n\n Arguments:\n depth: Depth limit. If given as a negative value, it will be subtracted\n from this object's depth. The parent at this exact depth is returned.\n filters: Dictionary of {attribute: value} that the parent must match.\n\n If a filter value is given, will return the first parent that meets the filters\n up to the given depth. If none is found, returns None.\n\n If no filter is given, returns the parent at the given depth." if (filters and (not isinstance(filters, dict))): raise TypeError('Filters must be a dict') if (depth < 0): depth = (self.depth + depth) if ((depth >= self.depth) or (depth < 0)): raise IndexError('Given depth exceeds node depth') parent = self while (parent.depth > depth): parent = parent._parent if ((parent.depth == depth) and (not filters)): return parent if (filters and _check_filter(parent, filters, {})): return parent return None
def is_child_of(self, node): 'Checks if this object is a child of the given node object.' if (node.depth >= self.depth): return False return (self.parent(node.depth) == node)
7,342,769,154,843,425,000
Checks if this object is a child of the given node object.
solcast/nodes.py
is_child_of
danhper/py-solc-ast
python
def is_child_of(self, node): if (node.depth >= self.depth): return False return (self.parent(node.depth) == node)
def is_parent_of(self, node): 'Checks if this object is a parent of the given node object.' if (node.depth <= self.depth): return False return (node.parent(self.depth) == self)
7,078,512,650,039,579,000
Checks if this object is a parent of the given node object.
solcast/nodes.py
is_parent_of
danhper/py-solc-ast
python
def is_parent_of(self, node): if (node.depth <= self.depth): return False return (node.parent(self.depth) == self)
def get(self, key, default=None): '\n Gets an attribute from this node, if that attribute exists.\n\n Arguments:\n key: Field name to return. May contain decimals to return a value\n from a child node.\n default: Default value to return.\n\n Returns: Field value if it exists. Default value if not.\n ' if (key is None): raise TypeError('Cannot match against None') obj = self for k in key.split('.'): if isinstance(obj, dict): obj = obj.get(k) else: obj = getattr(obj, k, None) return (obj or default)
1,159,391,490,741,558,500
Gets an attribute from this node, if that attribute exists. Arguments: key: Field name to return. May contain decimals to return a value from a child node. default: Default value to return. Returns: Field value if it exists. Default value if not.
solcast/nodes.py
get
danhper/py-solc-ast
python
def get(self, key, default=None): '\n Gets an attribute from this node, if that attribute exists.\n\n Arguments:\n key: Field name to return. May contain decimals to return a value\n from a child node.\n default: Default value to return.\n\n Returns: Field value if it exists. Default value if not.\n ' if (key is None): raise TypeError('Cannot match against None') obj = self for k in key.split('.'): if isinstance(obj, dict): obj = obj.get(k) else: obj = getattr(obj, k, None) return (obj or default)
def pqcost(gencost, ng, on=None): 'Splits the gencost variable into two pieces if costs are given for Qg.\n\n Checks whether C{gencost} has cost information for reactive power\n generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}\n rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves\n C{qcost} empty. Also does some error checking.\n If C{on} is specified (list of indices of generators which are on line)\n it only returns the rows corresponding to these generators.\n\n @author: Ray Zimmerman (PSERC Cornell)\n ' if (on is None): on = arange(ng) if (gencost.shape[0] == ng): pcost = gencost[on, :] qcost = array([]) elif (gencost.shape[0] == (2 * ng)): pcost = gencost[on, :] qcost = gencost[(on + ng), :] else: stderr.write('pqcost: gencost has wrong number of rows\n') return (pcost, qcost)
-4,256,423,973,439,093,000
Splits the gencost variable into two pieces if costs are given for Qg. Checks whether C{gencost} has cost information for reactive power generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng} rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves C{qcost} empty. Also does some error checking. If C{on} is specified (list of indices of generators which are on line) it only returns the rows corresponding to these generators. @author: Ray Zimmerman (PSERC Cornell)
pandapower/pypower/pqcost.py
pqcost
AdrienGougeon/pandapower
python
def pqcost(gencost, ng, on=None): 'Splits the gencost variable into two pieces if costs are given for Qg.\n\n Checks whether C{gencost} has cost information for reactive power\n generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}\n rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves\n C{qcost} empty. Also does some error checking.\n If C{on} is specified (list of indices of generators which are on line)\n it only returns the rows corresponding to these generators.\n\n @author: Ray Zimmerman (PSERC Cornell)\n ' if (on is None): on = arange(ng) if (gencost.shape[0] == ng): pcost = gencost[on, :] qcost = array([]) elif (gencost.shape[0] == (2 * ng)): pcost = gencost[on, :] qcost = gencost[(on + ng), :] else: stderr.write('pqcost: gencost has wrong number of rows\n') return (pcost, qcost)
def update_sarsa_table(sarsa, state, action, reward, next_state, next_action, alpha, gamma): '\n update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy\n return action\n ' next_max = sarsa[(next_state, next_action)] sarsa[(state, action)] = (sarsa[(state, action)] + (alpha * ((reward + (gamma * next_max)) - sarsa[(state, action)])))
5,557,110,078,792,144,000
update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy return action
TD/double_q_learning.py
update_sarsa_table
hadleyhzy34/reinforcement_learning
python
def update_sarsa_table(sarsa, state, action, reward, next_state, next_action, alpha, gamma): '\n update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy\n return action\n ' next_max = sarsa[(next_state, next_action)] sarsa[(state, action)] = (sarsa[(state, action)] + (alpha * ((reward + (gamma * next_max)) - sarsa[(state, action)])))
def epsilon_greedy_policy_sarsa(env, state, sarsa, epsilon): '\n epsilon greedy policy for q learning to generate actions\n ' if (random.uniform(0, 1) < epsilon): return env.action_space.sample() else: return np.argmax(sarsa[state])
-8,075,559,200,367,544,000
epsilon greedy policy for q learning to generate actions
TD/double_q_learning.py
epsilon_greedy_policy_sarsa
hadleyhzy34/reinforcement_learning
python
def epsilon_greedy_policy_sarsa(env, state, sarsa, epsilon): '\n \n ' if (random.uniform(0, 1) < epsilon): return env.action_space.sample() else: return np.argmax(sarsa[state])
def epsilon_greedy_policy(env, state, q, epsilon): '\n epsilon greedy policy for q learning to generate actions\n ' if (random.uniform(0, 1) < epsilon): return env.action_space.sample() else: return np.argmax(q[state])
750,251,734,879,276,300
epsilon greedy policy for q learning to generate actions
TD/double_q_learning.py
epsilon_greedy_policy
hadleyhzy34/reinforcement_learning
python
def epsilon_greedy_policy(env, state, q, epsilon): '\n \n ' if (random.uniform(0, 1) < epsilon): return env.action_space.sample() else: return np.argmax(q[state])
def extendMarkdown(self, md, md_globals): ' Add MetaPreprocessor to Markdown instance. ' md.preprocessors.add('meta', MetaPreprocessor(md), '>normalize_whitespace')
-7,437,616,332,312,968,000
Add MetaPreprocessor to Markdown instance.
venv/lib/python3.6/site-packages/markdown/extensions/meta.py
extendMarkdown
AzDan/Sac-Portal
python
def extendMarkdown(self, md, md_globals): ' ' md.preprocessors.add('meta', MetaPreprocessor(md), '>normalize_whitespace')
def run(self, lines): ' Parse Meta-Data and store in Markdown.Meta. ' meta = {} key = None if (lines and BEGIN_RE.match(lines[0])): lines.pop(0) while lines: line = lines.pop(0) m1 = META_RE.match(line) if ((line.strip() == '') or END_RE.match(line)): break if m1: key = m1.group('key').lower().strip() value = m1.group('value').strip() try: meta[key].append(value) except KeyError: meta[key] = [value] else: m2 = META_MORE_RE.match(line) if (m2 and key): meta[key].append(m2.group('value').strip()) else: lines.insert(0, line) break self.markdown.Meta = meta return lines
4,990,314,545,543,673,000
Parse Meta-Data and store in Markdown.Meta.
venv/lib/python3.6/site-packages/markdown/extensions/meta.py
run
AzDan/Sac-Portal
python
def run(self, lines): ' ' meta = {} key = None if (lines and BEGIN_RE.match(lines[0])): lines.pop(0) while lines: line = lines.pop(0) m1 = META_RE.match(line) if ((line.strip() == ) or END_RE.match(line)): break if m1: key = m1.group('key').lower().strip() value = m1.group('value').strip() try: meta[key].append(value) except KeyError: meta[key] = [value] else: m2 = META_MORE_RE.match(line) if (m2 and key): meta[key].append(m2.group('value').strip()) else: lines.insert(0, line) break self.markdown.Meta = meta return lines
def thumbIncrementCheck(lmList: list[list[int]]) -> int: 'Checks whether your thumb is up or not.\n No matter what hand you use.\n returns 1 if thumb is up else 0' count = 0 t_x = lmList[4][1] p_x = lmList[17][1] if (t_x > p_x): if (lmList[4][1] >= lmList[2][1]): count += 1 elif (lmList[4][1] <= lmList[2][1]): count += 1 return count
1,383,995,815,912,717,000
Checks whether your thumb is up or not. No matter what hand you use. returns 1 if thumb is up else 0
forOutput.py
thumbIncrementCheck
laughingclouds/dt-mst-project
python
def thumbIncrementCheck(lmList: list[list[int]]) -> int: 'Checks whether your thumb is up or not.\n No matter what hand you use.\n returns 1 if thumb is up else 0' count = 0 t_x = lmList[4][1] p_x = lmList[17][1] if (t_x > p_x): if (lmList[4][1] >= lmList[2][1]): count += 1 elif (lmList[4][1] <= lmList[2][1]): count += 1 return count
def textOutput(count, cc) -> str: 'Returns an appropriate text output depending on\n `count` and `cc`.' text = 'NOTHING' if ((count, cc) == (2, 2)): text = 'SCISSOR' elif (count == 0): text = 'ROCK' elif (count == 5): text = 'PAPER' else: pass return text
1,949,855,737,496,309,200
Returns an appropriate text output depending on `count` and `cc`.
forOutput.py
textOutput
laughingclouds/dt-mst-project
python
def textOutput(count, cc) -> str: 'Returns an appropriate text output depending on\n `count` and `cc`.' text = 'NOTHING' if ((count, cc) == (2, 2)): text = 'SCISSOR' elif (count == 0): text = 'ROCK' elif (count == 5): text = 'PAPER' else: pass return text
def add(self, block): 'Adds block on top of the stack.' self.register_child(block)
4,228,037,508,592,754,700
Adds block on top of the stack.
python/mxnet/gluon/nn/basic_layers.py
add
IIMarch/mxnet
python
def add(self, block): self.register_child(block)
def add(self, block): 'Adds block on top of the stack.' self.register_child(block)
4,228,037,508,592,754,700
Adds block on top of the stack.
python/mxnet/gluon/nn/basic_layers.py
add
IIMarch/mxnet
python
def add(self, block): self.register_child(block)
def _block_device_mapping_handler(self): '\n Handles requests which are generated by code similar to:\n\n instance.modify_attribute(\'blockDeviceMapping\', {\'/dev/sda1\': True})\n\n The querystring contains information similar to:\n\n BlockDeviceMapping.1.Ebs.DeleteOnTermination : [\'true\']\n BlockDeviceMapping.1.DeviceName : [\'/dev/sda1\']\n\n For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination"\n configuration, but it should be trivial to add anything else.\n ' mapping_counter = 1 mapping_device_name_fmt = 'BlockDeviceMapping.%s.DeviceName' mapping_del_on_term_fmt = 'BlockDeviceMapping.%s.Ebs.DeleteOnTermination' while True: mapping_device_name = (mapping_device_name_fmt % mapping_counter) if (mapping_device_name not in self.querystring.keys()): break mapping_del_on_term = (mapping_del_on_term_fmt % mapping_counter) del_on_term_value_str = self.querystring[mapping_del_on_term][0] del_on_term_value = (True if ('true' == del_on_term_value_str) else False) device_name_value = self.querystring[mapping_device_name][0] instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): block_device_type = instance.block_device_mapping[device_name_value] block_device_type.delete_on_termination = del_on_term_value mapping_counter += 1 if (mapping_counter > 1): return EC2_MODIFY_INSTANCE_ATTRIBUTE
-8,310,963,183,193,581,000
Handles requests which are generated by code similar to: instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) The querystring contains information similar to: BlockDeviceMapping.1.Ebs.DeleteOnTermination : ['true'] BlockDeviceMapping.1.DeviceName : ['/dev/sda1'] For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination" configuration, but it should be trivial to add anything else.
moto/ec2/responses/instances.py
_block_device_mapping_handler
adtsys-cloud/moto-aws-mock
python
def _block_device_mapping_handler(self): '\n Handles requests which are generated by code similar to:\n\n instance.modify_attribute(\'blockDeviceMapping\', {\'/dev/sda1\': True})\n\n The querystring contains information similar to:\n\n BlockDeviceMapping.1.Ebs.DeleteOnTermination : [\'true\']\n BlockDeviceMapping.1.DeviceName : [\'/dev/sda1\']\n\n For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination"\n configuration, but it should be trivial to add anything else.\n ' mapping_counter = 1 mapping_device_name_fmt = 'BlockDeviceMapping.%s.DeviceName' mapping_del_on_term_fmt = 'BlockDeviceMapping.%s.Ebs.DeleteOnTermination' while True: mapping_device_name = (mapping_device_name_fmt % mapping_counter) if (mapping_device_name not in self.querystring.keys()): break mapping_del_on_term = (mapping_del_on_term_fmt % mapping_counter) del_on_term_value_str = self.querystring[mapping_del_on_term][0] del_on_term_value = (True if ('true' == del_on_term_value_str) else False) device_name_value = self.querystring[mapping_device_name][0] instance_ids = instance_ids_from_querystring(self.querystring) instance_id = instance_ids[0] instance = self.ec2_backend.get_instance(instance_id) if self.is_not_dryrun('ModifyInstanceAttribute'): block_device_type = instance.block_device_mapping[device_name_value] block_device_type.delete_on_termination = del_on_term_value mapping_counter += 1 if (mapping_counter > 1): return EC2_MODIFY_INSTANCE_ATTRIBUTE
def get_fs(path, opts=None, rtype='instance'): "Helper to infer filesystem correctly.\n\n Gets filesystem options from settings and updates them with given `opts`.\n\n Parameters\n ----------\n path: str\n Path for which we want to infer filesystem.\n opts: dict\n Kwargs that will be passed to inferred filesystem instance.\n rtype: str\n Either 'instance' (default) or 'class'.\n " from drfs.filesystems import FILESYSTEMS try: protocol = path.scheme except AttributeError: protocol = _get_protocol(path) try: cls = FILESYSTEMS[protocol] if (rtype == 'class'): return cls except KeyError: raise KeyError(f'No filesystem for protocol {protocol}. Try installing it. Available protocols are: {set(FILESYSTEMS.keys())}') config_scheme_key = (protocol if protocol else 'file') opts_ = config['fs_opts'][config_scheme_key].get(dict).copy() if (opts is not None): opts_.update(opts) opts_ = _fix_opts_abfs(cls, path, opts_) return cls(**opts_)
-6,416,019,460,632,777,000
Helper to infer filesystem correctly. Gets filesystem options from settings and updates them with given `opts`. Parameters ---------- path: str Path for which we want to infer filesystem. opts: dict Kwargs that will be passed to inferred filesystem instance. rtype: str Either 'instance' (default) or 'class'.
drfs/filesystems/util.py
get_fs
datarevenue-berlin/drfs
python
def get_fs(path, opts=None, rtype='instance'): "Helper to infer filesystem correctly.\n\n Gets filesystem options from settings and updates them with given `opts`.\n\n Parameters\n ----------\n path: str\n Path for which we want to infer filesystem.\n opts: dict\n Kwargs that will be passed to inferred filesystem instance.\n rtype: str\n Either 'instance' (default) or 'class'.\n " from drfs.filesystems import FILESYSTEMS try: protocol = path.scheme except AttributeError: protocol = _get_protocol(path) try: cls = FILESYSTEMS[protocol] if (rtype == 'class'): return cls except KeyError: raise KeyError(f'No filesystem for protocol {protocol}. Try installing it. Available protocols are: {set(FILESYSTEMS.keys())}') config_scheme_key = (protocol if protocol else 'file') opts_ = config['fs_opts'][config_scheme_key].get(dict).copy() if (opts is not None): opts_.update(opts) opts_ = _fix_opts_abfs(cls, path, opts_) return cls(**opts_)
def allow_pathlib(func): 'Allow methods to receive pathlib.Path objects.\n\n Parameters\n ----------\n func: callable\n function to decorate must have the following signature\n self, path, *args, **kwargs\n\n Returns\n -------\n wrapper: callable\n ' @wraps(func) def wrapper(self, path, *args, **kwargs): from drfs.path import asstr p = asstr(path) return func(self, p, *args, **kwargs) return wrapper
8,810,367,554,903,184,000
Allow methods to receive pathlib.Path objects. Parameters ---------- func: callable function to decorate must have the following signature self, path, *args, **kwargs Returns ------- wrapper: callable
drfs/filesystems/util.py
allow_pathlib
datarevenue-berlin/drfs
python
def allow_pathlib(func): 'Allow methods to receive pathlib.Path objects.\n\n Parameters\n ----------\n func: callable\n function to decorate must have the following signature\n self, path, *args, **kwargs\n\n Returns\n -------\n wrapper: callable\n ' @wraps(func) def wrapper(self, path, *args, **kwargs): from drfs.path import asstr p = asstr(path) return func(self, p, *args, **kwargs) return wrapper
def return_schemes(func): 'Make sure method returns full path with scheme.' @wraps(func) def wrapper(self, path, *args, **kwargs): res = func(self, path, *args, **kwargs) try: res = list(map(partial(prepend_scheme, self.scheme), res)) except TypeError: res = prepend_scheme(self.scheme, res) return res return wrapper
29,942,895,302,851,876
Make sure method returns full path with scheme.
drfs/filesystems/util.py
return_schemes
datarevenue-berlin/drfs
python
def return_schemes(func): @wraps(func) def wrapper(self, path, *args, **kwargs): res = func(self, path, *args, **kwargs) try: res = list(map(partial(prepend_scheme, self.scheme), res)) except TypeError: res = prepend_scheme(self.scheme, res) return res return wrapper
def maybe_remove_scheme(func): 'Remove scheme from args and kwargs in case underlying fs does not support it.' @wraps(func) def wrapper(self, path, *args, **kwargs): if (not self.supports_scheme): path = remove_scheme(path, raise_=False) args = [remove_scheme(a, raise_=False) for a in args] kwargs = {k: (remove_scheme(v, raise_=False) if isinstance(v, (Path, str)) else v) for (k, v) in kwargs.items()} return func(self, path, *args, **kwargs) return wrapper
6,853,007,073,676,262,000
Remove scheme from args and kwargs in case underlying fs does not support it.
drfs/filesystems/util.py
maybe_remove_scheme
datarevenue-berlin/drfs
python
def maybe_remove_scheme(func): @wraps(func) def wrapper(self, path, *args, **kwargs): if (not self.supports_scheme): path = remove_scheme(path, raise_=False) args = [remove_scheme(a, raise_=False) for a in args] kwargs = {k: (remove_scheme(v, raise_=False) if isinstance(v, (Path, str)) else v) for (k, v) in kwargs.items()} return func(self, path, *args, **kwargs) return wrapper
def _default_stim_chs(info): 'Return default stim channels for SQD files.' return pick_types(info, meg=False, ref_meg=False, misc=True, exclude=[])[:8]
-4,594,179,421,141,542,000
Return default stim channels for SQD files.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
_default_stim_chs
alexisicte/aviate
python
def _default_stim_chs(info): return pick_types(info, meg=False, ref_meg=False, misc=True, exclude=[])[:8]
def _make_stim_channel(trigger_chs, slope, threshold, stim_code, trigger_values): 'Create synthetic stim channel from multiple trigger channels.' if (slope == '+'): trig_chs_bin = (trigger_chs > threshold) elif (slope == '-'): trig_chs_bin = (trigger_chs < threshold) else: raise ValueError("slope needs to be '+' or '-'") if (stim_code == 'binary'): trigger_values = (2 ** np.arange(len(trigger_chs))) elif (stim_code != 'channel'): raise ValueError(("stim_code must be 'binary' or 'channel', got %s" % repr(stim_code))) trig_chs = (trig_chs_bin * trigger_values[:, np.newaxis]) return np.array(trig_chs.sum(axis=0), ndmin=2)
7,044,268,555,867,526,000
Create synthetic stim channel from multiple trigger channels.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
_make_stim_channel
alexisicte/aviate
python
def _make_stim_channel(trigger_chs, slope, threshold, stim_code, trigger_values): if (slope == '+'): trig_chs_bin = (trigger_chs > threshold) elif (slope == '-'): trig_chs_bin = (trigger_chs < threshold) else: raise ValueError("slope needs to be '+' or '-'") if (stim_code == 'binary'): trigger_values = (2 ** np.arange(len(trigger_chs))) elif (stim_code != 'channel'): raise ValueError(("stim_code must be 'binary' or 'channel', got %s" % repr(stim_code))) trig_chs = (trig_chs_bin * trigger_values[:, np.newaxis]) return np.array(trig_chs.sum(axis=0), ndmin=2)
@verbose def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): 'Extract all the information from the sqd/con file.\n\n Parameters\n ----------\n rawfile : str\n KIT file to be read.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n info : instance of Info\n An Info for the instance.\n sqd : dict\n A dict containing all the sqd parameter settings.\n ' sqd = dict() sqd['rawfile'] = rawfile unsupported_format = False sqd['dirs'] = dirs = list() with open(rawfile, 'rb', buffering=0) as fid: dirs.append(_read_dir(fid)) dirs.extend((_read_dir(fid) for _ in range((dirs[0]['count'] - 1)))) assert (len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']) fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset']) (version, revision) = unpack('2i', fid.read((2 * KIT.INT))) if ((version < 2) or ((version == 2) and (revision < 3))): version_string = ('V%iR%03i' % (version, revision)) if allow_unknown_format: unsupported_format = True logger.warning('Force loading KIT format %s', version_string) else: raise UnsupportedKITFormat(version_string, ('SQD file format %s is not officially supported. Set allow_unknown_format=True to load it anyways.' % (version_string,))) sysid = unpack('i', fid.read(KIT.INT))[0] system_name = unpack('128s', fid.read(128))[0].decode() model_name = unpack('128s', fid.read(128))[0].decode() sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0] comment = unpack('256s', fid.read(256))[0].decode() (create_time, last_modified_time) = unpack('2i', fid.read((2 * KIT.INT))) fid.seek((KIT.INT * 3), SEEK_CUR) dewar_style = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 3), SEEK_CUR) fll_type = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 3), SEEK_CUR) trigger_type = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 3), SEEK_CUR) adboard_type = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 29), SEEK_CUR) if ((version < 2) or ((version == 2) and (revision <= 3))): adc_range = float(unpack('i', fid.read(KIT.INT))[0]) else: adc_range = unpack('d', fid.read(KIT.DOUBLE))[0] (adc_polarity, adc_allocated, adc_stored) = unpack('3i', fid.read((3 * KIT.INT))) system_name = system_name.replace('\x00', '') system_name = system_name.strip().replace('\n', '/') model_name = model_name.replace('\x00', '') model_name = model_name.strip().replace('\n', '/') full_version = f'V{version:d}R{revision:03d}' logger.debug('SQD file basic information:') logger.debug('Meg160 version = %s', full_version) logger.debug('System ID = %i', sysid) logger.debug('System name = %s', system_name) logger.debug('Model name = %s', model_name) logger.debug('Channel count = %i', channel_count) logger.debug('Comment = %s', comment) logger.debug('Dewar style = %i', dewar_style) logger.debug('FLL type = %i', fll_type) logger.debug('Trigger type = %i', trigger_type) logger.debug('A/D board type = %i', adboard_type) logger.debug('ADC range = +/-%s[V]', (adc_range / 2.0)) logger.debug('ADC allocate = %i[bit]', adc_allocated) logger.debug('ADC bit = %i[bit]', adc_stored) description = f'{system_name} ({sysid}) {full_version} {model_name}' sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}')) if (fll_type not in KIT.FLL_SETTINGS): fll_types = sorted(KIT.FLL_SETTINGS.keys()) use_fll_type = fll_types[(np.searchsorted(fll_types, fll_type) - 1)] warn(('Unknown site filter settings (FLL) for system "%s" model "%s" (ID %s), will assume FLL %d->%d, check your data for correctness, including channel scales and filter settings!' % (system_name, model_name, sysid, fll_type, use_fll_type))) fll_type = use_fll_type chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] (chan_offset, chan_size) = (chan_dir['offset'], chan_dir['size']) sqd['channels'] = channels = [] exg_gains = list() for i in range(channel_count): fid.seek((chan_offset + (chan_size * i))) (channel_type,) = unpack('i', fid.read(KIT.INT)) if ((sysid == 52) and (i < 160) and (channel_type == KIT.CHANNEL_NULL)): channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE if (channel_type in KIT.CHANNELS_MEG): if (channel_type not in KIT.CH_TO_FIFF_COIL): raise NotImplementedError(('KIT channel type %i can not be read. Please contact the mne-python developers.' % channel_type)) channels.append({'type': channel_type, 'loc': np.fromfile(fid, dtype='d', count=5)}) if (channel_type in KIT.CHANNEL_NAME_NCHAR): fid.seek(16, SEEK_CUR) channels[(- 1)]['name'] = _read_name(fid, channel_type) elif (channel_type in KIT.CHANNELS_MISC): (channel_no,) = unpack('i', fid.read(KIT.INT)) fid.seek(4, SEEK_CUR) name = _read_name(fid, channel_type) channels.append({'type': channel_type, 'no': channel_no, 'name': name}) if (channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)): offset = (6 if (channel_type == KIT.CHANNEL_EEG) else 8) fid.seek(offset, SEEK_CUR) exg_gains.append(np.fromfile(fid, 'd', 1)[0]) elif (channel_type == KIT.CHANNEL_NULL): channels.append({'type': channel_type}) else: raise IOError(('Unknown KIT channel type: %i' % channel_type)) exg_gains = np.array(exg_gains) fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset']) sensitivity = np.fromfile(fid, dtype='d', count=(channel_count * 2)) sensitivity.shape = (channel_count, 2) (channel_offset, channel_gain) = sensitivity.T assert (channel_offset == 0).all() fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset']) amp_data = unpack('i', fid.read(KIT.INT))[0] if (fll_type >= 100): gain1 = ((amp_data & 28672) >> 12) gain2 = ((amp_data & 1879048192) >> 28) gain3 = ((amp_data & 117440512) >> 24) amp_gain = ((KIT.GAINS[gain1] * KIT.GAINS[gain2]) * KIT.GAINS[gain3]) hpf = ((amp_data & 1792) >> 8) lpf = ((amp_data & 458752) >> 16) bef = ((amp_data & 3) >> 0) else: input_gain = ((amp_data & 6144) >> 11) output_gain = ((amp_data & 7) >> 0) amp_gain = (KIT.GAINS[input_gain] * KIT.GAINS[output_gain]) hpf = ((amp_data & 7) >> 4) lpf = ((amp_data & 1792) >> 8) bef = ((amp_data & 49152) >> 14) (hpf_options, lpf_options, bef_options) = KIT.FLL_SETTINGS[fll_type] sqd['highpass'] = KIT.HPFS[hpf_options][hpf] sqd['lowpass'] = KIT.LPFS[lpf_options][lpf] sqd['notch'] = KIT.BEFS[bef_options][bef] fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset']) (sqd['acq_type'],) = (acq_type,) = unpack('i', fid.read(KIT.INT)) (sqd['sfreq'],) = unpack('d', fid.read(KIT.DOUBLE)) if (acq_type == KIT.CONTINUOUS): fid.seek(KIT.INT, SEEK_CUR) (sqd['n_samples'],) = unpack('i', fid.read(KIT.INT)) elif ((acq_type == KIT.EVOKED) or (acq_type == KIT.EPOCHS)): (sqd['frame_length'],) = unpack('i', fid.read(KIT.INT)) (sqd['pretrigger_length'],) = unpack('i', fid.read(KIT.INT)) (sqd['average_count'],) = unpack('i', fid.read(KIT.INT)) (sqd['n_epochs'],) = unpack('i', fid.read(KIT.INT)) if (acq_type == KIT.EVOKED): sqd['n_samples'] = sqd['frame_length'] else: sqd['n_samples'] = (sqd['frame_length'] * sqd['n_epochs']) else: raise IOError(('Invalid acquisition type: %i. Your file is neither continuous nor epoched data.' % (acq_type,))) dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS] cor_dir = dirs[KIT.DIR_INDEX_COREG] dig = dict() hsp = list() if ((dig_dir['count'] > 0) and (cor_dir['count'] > 0)): fid.seek(dig_dir['offset']) for _ in range(dig_dir['count']): name = _read_name(fid, n=8).strip() name = name.lower() rr = np.fromfile(fid, 'd', 3) if name: assert (name not in dig) dig[name] = rr else: hsp.append(rr) elp = [dig.pop(key) for key in ('fidnz', 'fidt9', 'fidt10', 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')] if (('hpi_5' in dig) and dig['hpi_5'].any()): elp.append(dig.pop('hpi_5')) elp = np.array(elp) hsp = np.array(hsp, float).reshape((- 1), 3) assert (elp.shape in ((7, 3), (8, 3))) fid.seek(cor_dir['offset']) mrk = np.zeros(((elp.shape[0] - 3), 3)) for _ in range(cor_dir['count']): done = np.fromfile(fid, np.int32, 1)[0] fid.seek(((16 * KIT.DOUBLE) + (16 * KIT.DOUBLE)), SEEK_CUR) marker_count = np.fromfile(fid, np.int32, 1)[0] if (not done): continue assert (marker_count >= len(mrk)) for mi in range(len(mrk)): (mri_type, meg_type, mri_done, meg_done) = np.fromfile(fid, np.int32, 4) assert meg_done fid.seek((3 * KIT.DOUBLE), SEEK_CUR) mrk[mi] = np.fromfile(fid, 'd', 3) fid.seek(256, SEEK_CUR) sqd.update(hsp=hsp, elp=elp, mrk=mrk) all_names = set((ch.get('name', '') for ch in channels)) if ((standardize_names is None) and all_names.difference({'', 'EEG'})): standardize_names = True warn('standardize_names defaults to True in 0.21 but will change to False in 0.22', DeprecationWarning) if unsupported_format: if (sysid not in LEGACY_AMP_PARAMS): raise IOError(('Legacy parameters for system ID %i unavailable' % (sysid,))) (adc_range, adc_stored) = LEGACY_AMP_PARAMS[sysid] is_meg = np.array([(ch['type'] in KIT.CHANNELS_MEG) for ch in channels]) ad_to_volt = (adc_range / (2 ** adc_stored)) ad_to_tesla = ((ad_to_volt / amp_gain) * channel_gain) conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) is_exg = [(ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)) for ch in channels] exg_gains /= (2 ** (adc_stored - 14)) conv_factor[is_exg] = exg_gains sqd['conv_factor'] = conv_factor[:, np.newaxis] info = _empty_info(float(sqd['sfreq'])) info.update(meas_date=_stamp_to_dt((create_time, 0)), lowpass=sqd['lowpass'], highpass=sqd['highpass'], kit_system_id=sysid, description=description) logger.info('Setting channel info structure...') info['chs'] = fiff_channels = [] channel_index = defaultdict((lambda : 0)) sqd['eeg_dig'] = OrderedDict() for (idx, ch) in enumerate(channels, 1): if (ch['type'] in KIT.CHANNELS_MEG): ch_name = ch.get('name', '') if ((ch_name == '') or standardize_names): ch_name = ('MEG %03d' % idx) (theta, phi) = np.radians(ch['loc'][3:]) x = (sin(theta) * cos(phi)) y = (sin(theta) * sin(phi)) z = cos(theta) vec_z = np.array([x, y, z]) vec_z /= linalg.norm(vec_z) vec_x = np.zeros(vec_z.size, dtype=np.float64) if (vec_z[1] < vec_z[2]): if (vec_z[0] < vec_z[1]): vec_x[0] = 1.0 else: vec_x[1] = 1.0 elif (vec_z[0] < vec_z[2]): vec_x[0] = 1.0 else: vec_x[2] = 1.0 vec_x -= (np.sum((vec_x * vec_z)) * vec_z) vec_x /= linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) vecs = apply_trans(als_ras_trans, vecs) unit = FIFF.FIFF_UNIT_T loc = vecs.ravel() else: ch_type_label = KIT.CH_LABEL[ch['type']] channel_index[ch_type_label] += 1 ch_type_index = channel_index[ch_type_label] ch_name = ch.get('name', '') eeg_name = ch_name.lower() if ((ch_name in ('', 'EEG')) or standardize_names): ch_name = ('%s %03i' % (ch_type_label, ch_type_index)) unit = FIFF.FIFF_UNIT_V loc = np.zeros(12) if (eeg_name and (eeg_name in dig)): loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name] fiff_channels.append(dict(cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE, unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name, coord_frame=FIFF.FIFFV_COORD_DEVICE, coil_type=KIT.CH_TO_FIFF_COIL[ch['type']], kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc)) info._update_redundant() return (info, sqd)
-8,458,605,956,946,132,000
Extract all the information from the sqd/con file. Parameters ---------- rawfile : str KIT file to be read. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- info : instance of Info An Info for the instance. sqd : dict A dict containing all the sqd parameter settings.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
get_kit_info
alexisicte/aviate
python
@verbose def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): 'Extract all the information from the sqd/con file.\n\n Parameters\n ----------\n rawfile : str\n KIT file to be read.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n info : instance of Info\n An Info for the instance.\n sqd : dict\n A dict containing all the sqd parameter settings.\n ' sqd = dict() sqd['rawfile'] = rawfile unsupported_format = False sqd['dirs'] = dirs = list() with open(rawfile, 'rb', buffering=0) as fid: dirs.append(_read_dir(fid)) dirs.extend((_read_dir(fid) for _ in range((dirs[0]['count'] - 1)))) assert (len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']) fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset']) (version, revision) = unpack('2i', fid.read((2 * KIT.INT))) if ((version < 2) or ((version == 2) and (revision < 3))): version_string = ('V%iR%03i' % (version, revision)) if allow_unknown_format: unsupported_format = True logger.warning('Force loading KIT format %s', version_string) else: raise UnsupportedKITFormat(version_string, ('SQD file format %s is not officially supported. Set allow_unknown_format=True to load it anyways.' % (version_string,))) sysid = unpack('i', fid.read(KIT.INT))[0] system_name = unpack('128s', fid.read(128))[0].decode() model_name = unpack('128s', fid.read(128))[0].decode() sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0] comment = unpack('256s', fid.read(256))[0].decode() (create_time, last_modified_time) = unpack('2i', fid.read((2 * KIT.INT))) fid.seek((KIT.INT * 3), SEEK_CUR) dewar_style = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 3), SEEK_CUR) fll_type = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 3), SEEK_CUR) trigger_type = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 3), SEEK_CUR) adboard_type = unpack('i', fid.read(KIT.INT))[0] fid.seek((KIT.INT * 29), SEEK_CUR) if ((version < 2) or ((version == 2) and (revision <= 3))): adc_range = float(unpack('i', fid.read(KIT.INT))[0]) else: adc_range = unpack('d', fid.read(KIT.DOUBLE))[0] (adc_polarity, adc_allocated, adc_stored) = unpack('3i', fid.read((3 * KIT.INT))) system_name = system_name.replace('\x00', ) system_name = system_name.strip().replace('\n', '/') model_name = model_name.replace('\x00', ) model_name = model_name.strip().replace('\n', '/') full_version = f'V{version:d}R{revision:03d}' logger.debug('SQD file basic information:') logger.debug('Meg160 version = %s', full_version) logger.debug('System ID = %i', sysid) logger.debug('System name = %s', system_name) logger.debug('Model name = %s', model_name) logger.debug('Channel count = %i', channel_count) logger.debug('Comment = %s', comment) logger.debug('Dewar style = %i', dewar_style) logger.debug('FLL type = %i', fll_type) logger.debug('Trigger type = %i', trigger_type) logger.debug('A/D board type = %i', adboard_type) logger.debug('ADC range = +/-%s[V]', (adc_range / 2.0)) logger.debug('ADC allocate = %i[bit]', adc_allocated) logger.debug('ADC bit = %i[bit]', adc_stored) description = f'{system_name} ({sysid}) {full_version} {model_name}' sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}')) if (fll_type not in KIT.FLL_SETTINGS): fll_types = sorted(KIT.FLL_SETTINGS.keys()) use_fll_type = fll_types[(np.searchsorted(fll_types, fll_type) - 1)] warn(('Unknown site filter settings (FLL) for system "%s" model "%s" (ID %s), will assume FLL %d->%d, check your data for correctness, including channel scales and filter settings!' % (system_name, model_name, sysid, fll_type, use_fll_type))) fll_type = use_fll_type chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] (chan_offset, chan_size) = (chan_dir['offset'], chan_dir['size']) sqd['channels'] = channels = [] exg_gains = list() for i in range(channel_count): fid.seek((chan_offset + (chan_size * i))) (channel_type,) = unpack('i', fid.read(KIT.INT)) if ((sysid == 52) and (i < 160) and (channel_type == KIT.CHANNEL_NULL)): channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE if (channel_type in KIT.CHANNELS_MEG): if (channel_type not in KIT.CH_TO_FIFF_COIL): raise NotImplementedError(('KIT channel type %i can not be read. Please contact the mne-python developers.' % channel_type)) channels.append({'type': channel_type, 'loc': np.fromfile(fid, dtype='d', count=5)}) if (channel_type in KIT.CHANNEL_NAME_NCHAR): fid.seek(16, SEEK_CUR) channels[(- 1)]['name'] = _read_name(fid, channel_type) elif (channel_type in KIT.CHANNELS_MISC): (channel_no,) = unpack('i', fid.read(KIT.INT)) fid.seek(4, SEEK_CUR) name = _read_name(fid, channel_type) channels.append({'type': channel_type, 'no': channel_no, 'name': name}) if (channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)): offset = (6 if (channel_type == KIT.CHANNEL_EEG) else 8) fid.seek(offset, SEEK_CUR) exg_gains.append(np.fromfile(fid, 'd', 1)[0]) elif (channel_type == KIT.CHANNEL_NULL): channels.append({'type': channel_type}) else: raise IOError(('Unknown KIT channel type: %i' % channel_type)) exg_gains = np.array(exg_gains) fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset']) sensitivity = np.fromfile(fid, dtype='d', count=(channel_count * 2)) sensitivity.shape = (channel_count, 2) (channel_offset, channel_gain) = sensitivity.T assert (channel_offset == 0).all() fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset']) amp_data = unpack('i', fid.read(KIT.INT))[0] if (fll_type >= 100): gain1 = ((amp_data & 28672) >> 12) gain2 = ((amp_data & 1879048192) >> 28) gain3 = ((amp_data & 117440512) >> 24) amp_gain = ((KIT.GAINS[gain1] * KIT.GAINS[gain2]) * KIT.GAINS[gain3]) hpf = ((amp_data & 1792) >> 8) lpf = ((amp_data & 458752) >> 16) bef = ((amp_data & 3) >> 0) else: input_gain = ((amp_data & 6144) >> 11) output_gain = ((amp_data & 7) >> 0) amp_gain = (KIT.GAINS[input_gain] * KIT.GAINS[output_gain]) hpf = ((amp_data & 7) >> 4) lpf = ((amp_data & 1792) >> 8) bef = ((amp_data & 49152) >> 14) (hpf_options, lpf_options, bef_options) = KIT.FLL_SETTINGS[fll_type] sqd['highpass'] = KIT.HPFS[hpf_options][hpf] sqd['lowpass'] = KIT.LPFS[lpf_options][lpf] sqd['notch'] = KIT.BEFS[bef_options][bef] fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset']) (sqd['acq_type'],) = (acq_type,) = unpack('i', fid.read(KIT.INT)) (sqd['sfreq'],) = unpack('d', fid.read(KIT.DOUBLE)) if (acq_type == KIT.CONTINUOUS): fid.seek(KIT.INT, SEEK_CUR) (sqd['n_samples'],) = unpack('i', fid.read(KIT.INT)) elif ((acq_type == KIT.EVOKED) or (acq_type == KIT.EPOCHS)): (sqd['frame_length'],) = unpack('i', fid.read(KIT.INT)) (sqd['pretrigger_length'],) = unpack('i', fid.read(KIT.INT)) (sqd['average_count'],) = unpack('i', fid.read(KIT.INT)) (sqd['n_epochs'],) = unpack('i', fid.read(KIT.INT)) if (acq_type == KIT.EVOKED): sqd['n_samples'] = sqd['frame_length'] else: sqd['n_samples'] = (sqd['frame_length'] * sqd['n_epochs']) else: raise IOError(('Invalid acquisition type: %i. Your file is neither continuous nor epoched data.' % (acq_type,))) dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS] cor_dir = dirs[KIT.DIR_INDEX_COREG] dig = dict() hsp = list() if ((dig_dir['count'] > 0) and (cor_dir['count'] > 0)): fid.seek(dig_dir['offset']) for _ in range(dig_dir['count']): name = _read_name(fid, n=8).strip() name = name.lower() rr = np.fromfile(fid, 'd', 3) if name: assert (name not in dig) dig[name] = rr else: hsp.append(rr) elp = [dig.pop(key) for key in ('fidnz', 'fidt9', 'fidt10', 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')] if (('hpi_5' in dig) and dig['hpi_5'].any()): elp.append(dig.pop('hpi_5')) elp = np.array(elp) hsp = np.array(hsp, float).reshape((- 1), 3) assert (elp.shape in ((7, 3), (8, 3))) fid.seek(cor_dir['offset']) mrk = np.zeros(((elp.shape[0] - 3), 3)) for _ in range(cor_dir['count']): done = np.fromfile(fid, np.int32, 1)[0] fid.seek(((16 * KIT.DOUBLE) + (16 * KIT.DOUBLE)), SEEK_CUR) marker_count = np.fromfile(fid, np.int32, 1)[0] if (not done): continue assert (marker_count >= len(mrk)) for mi in range(len(mrk)): (mri_type, meg_type, mri_done, meg_done) = np.fromfile(fid, np.int32, 4) assert meg_done fid.seek((3 * KIT.DOUBLE), SEEK_CUR) mrk[mi] = np.fromfile(fid, 'd', 3) fid.seek(256, SEEK_CUR) sqd.update(hsp=hsp, elp=elp, mrk=mrk) all_names = set((ch.get('name', ) for ch in channels)) if ((standardize_names is None) and all_names.difference({, 'EEG'})): standardize_names = True warn('standardize_names defaults to True in 0.21 but will change to False in 0.22', DeprecationWarning) if unsupported_format: if (sysid not in LEGACY_AMP_PARAMS): raise IOError(('Legacy parameters for system ID %i unavailable' % (sysid,))) (adc_range, adc_stored) = LEGACY_AMP_PARAMS[sysid] is_meg = np.array([(ch['type'] in KIT.CHANNELS_MEG) for ch in channels]) ad_to_volt = (adc_range / (2 ** adc_stored)) ad_to_tesla = ((ad_to_volt / amp_gain) * channel_gain) conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) is_exg = [(ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)) for ch in channels] exg_gains /= (2 ** (adc_stored - 14)) conv_factor[is_exg] = exg_gains sqd['conv_factor'] = conv_factor[:, np.newaxis] info = _empty_info(float(sqd['sfreq'])) info.update(meas_date=_stamp_to_dt((create_time, 0)), lowpass=sqd['lowpass'], highpass=sqd['highpass'], kit_system_id=sysid, description=description) logger.info('Setting channel info structure...') info['chs'] = fiff_channels = [] channel_index = defaultdict((lambda : 0)) sqd['eeg_dig'] = OrderedDict() for (idx, ch) in enumerate(channels, 1): if (ch['type'] in KIT.CHANNELS_MEG): ch_name = ch.get('name', ) if ((ch_name == ) or standardize_names): ch_name = ('MEG %03d' % idx) (theta, phi) = np.radians(ch['loc'][3:]) x = (sin(theta) * cos(phi)) y = (sin(theta) * sin(phi)) z = cos(theta) vec_z = np.array([x, y, z]) vec_z /= linalg.norm(vec_z) vec_x = np.zeros(vec_z.size, dtype=np.float64) if (vec_z[1] < vec_z[2]): if (vec_z[0] < vec_z[1]): vec_x[0] = 1.0 else: vec_x[1] = 1.0 elif (vec_z[0] < vec_z[2]): vec_x[0] = 1.0 else: vec_x[2] = 1.0 vec_x -= (np.sum((vec_x * vec_z)) * vec_z) vec_x /= linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) vecs = apply_trans(als_ras_trans, vecs) unit = FIFF.FIFF_UNIT_T loc = vecs.ravel() else: ch_type_label = KIT.CH_LABEL[ch['type']] channel_index[ch_type_label] += 1 ch_type_index = channel_index[ch_type_label] ch_name = ch.get('name', ) eeg_name = ch_name.lower() if ((ch_name in (, 'EEG')) or standardize_names): ch_name = ('%s %03i' % (ch_type_label, ch_type_index)) unit = FIFF.FIFF_UNIT_V loc = np.zeros(12) if (eeg_name and (eeg_name in dig)): loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name] fiff_channels.append(dict(cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE, unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name, coord_frame=FIFF.FIFFV_COORD_DEVICE, coil_type=KIT.CH_TO_FIFF_COIL[ch['type']], kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc)) info._update_redundant() return (info, sqd)
@fill_doc def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', slope='-', stimthresh=1, preload=False, stim_code='binary', allow_unknown_format=False, standardize_names=None, verbose=None): "Reader function for Ricoh/KIT conversion to FIF.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n stim : list of int | '<' | '>'\n Channel-value correspondence when converting KIT trigger channels to a\n Neuromag-style stim channel. For '<', the largest values are assigned\n to the first channel (default). For '>', the largest values are\n assigned to the last channel. Can also be specified as a list of\n trigger channel indexes.\n slope : '+' | '-'\n How to interpret values on KIT trigger channels when synthesizing a\n Neuromag-style stim channel. With '+', a positive slope (low-to-high)\n is interpreted as an event. With '-', a negative slope (high-to-low)\n is interpreted as an event.\n stimthresh : float\n The threshold level for accepting voltage changes in KIT trigger\n channels as a trigger event.\n %(preload)s\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n raw : instance of RawKIT\n A Raw object containing KIT data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n\n Notes\n -----\n If mrk, hsp or elp are array_like inputs, then the numbers in xyz\n coordinates should be in units of meters.\n " return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp, stim=stim, slope=slope, stimthresh=stimthresh, preload=preload, stim_code=stim_code, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose)
-419,446,750,408,291,400
Reader function for Ricoh/KIT conversion to FIF. Parameters ---------- input_fname : str Path to the sqd file. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. stim : list of int | '<' | '>' Channel-value correspondence when converting KIT trigger channels to a Neuromag-style stim channel. For '<', the largest values are assigned to the first channel (default). For '>', the largest values are assigned to the last channel. Can also be specified as a list of trigger channel indexes. slope : '+' | '-' How to interpret values on KIT trigger channels when synthesizing a Neuromag-style stim channel. With '+', a positive slope (low-to-high) is interpreted as an event. With '-', a negative slope (high-to-low) is interpreted as an event. stimthresh : float The threshold level for accepting voltage changes in KIT trigger channels as a trigger event. %(preload)s stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- raw : instance of RawKIT A Raw object containing KIT data. See Also -------- mne.io.Raw : Documentation of attribute and methods. Notes ----- If mrk, hsp or elp are array_like inputs, then the numbers in xyz coordinates should be in units of meters.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
read_raw_kit
alexisicte/aviate
python
@fill_doc def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', slope='-', stimthresh=1, preload=False, stim_code='binary', allow_unknown_format=False, standardize_names=None, verbose=None): "Reader function for Ricoh/KIT conversion to FIF.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n stim : list of int | '<' | '>'\n Channel-value correspondence when converting KIT trigger channels to a\n Neuromag-style stim channel. For '<', the largest values are assigned\n to the first channel (default). For '>', the largest values are\n assigned to the last channel. Can also be specified as a list of\n trigger channel indexes.\n slope : '+' | '-'\n How to interpret values on KIT trigger channels when synthesizing a\n Neuromag-style stim channel. With '+', a positive slope (low-to-high)\n is interpreted as an event. With '-', a negative slope (high-to-low)\n is interpreted as an event.\n stimthresh : float\n The threshold level for accepting voltage changes in KIT trigger\n channels as a trigger event.\n %(preload)s\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n raw : instance of RawKIT\n A Raw object containing KIT data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n\n Notes\n -----\n If mrk, hsp or elp are array_like inputs, then the numbers in xyz\n coordinates should be in units of meters.\n " return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp, stim=stim, slope=slope, stimthresh=stimthresh, preload=preload, stim_code=stim_code, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose)
@fill_doc def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, hsp=None, allow_unknown_format=False, standardize_names=None, verbose=None): "Reader function for Ricoh/KIT epochs files.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n events : array, shape (n_events, 3)\n The events typically returned by the read_events function.\n If some events don't match the events of interest as specified\n by event_id, they will be marked as 'IGNORED' in the drop log.\n event_id : int | list of int | dict | None\n The id of the event to consider. If dict,\n the keys can later be used to access associated events. Example:\n dict(auditory=1, visual=3). If int, a dict will be created with\n the id as string. If a list, all events with the IDs specified\n in the list are used. If None, all events will be used with\n and a dict is created with string integer names corresponding\n to the event id integers.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n epochs : instance of Epochs\n The epochs.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n " epochs = EpochsKIT(input_fname=input_fname, events=events, event_id=event_id, mrk=mrk, elp=elp, hsp=hsp, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose) return epochs
2,132,265,082,483,621,600
Reader function for Ricoh/KIT epochs files. Parameters ---------- input_fname : str Path to the sqd file. events : array, shape (n_events, 3) The events typically returned by the read_events function. If some events don't match the events of interest as specified by event_id, they will be marked as 'IGNORED' in the drop log. event_id : int | list of int | dict | None The id of the event to consider. If dict, the keys can later be used to access associated events. Example: dict(auditory=1, visual=3). If int, a dict will be created with the id as string. If a list, all events with the IDs specified in the list are used. If None, all events will be used with and a dict is created with string integer names corresponding to the event id integers. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- epochs : instance of Epochs The epochs. Notes ----- .. versionadded:: 0.9.0
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
read_epochs_kit
alexisicte/aviate
python
@fill_doc def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, hsp=None, allow_unknown_format=False, standardize_names=None, verbose=None): "Reader function for Ricoh/KIT epochs files.\n\n Parameters\n ----------\n input_fname : str\n Path to the sqd file.\n events : array, shape (n_events, 3)\n The events typically returned by the read_events function.\n If some events don't match the events of interest as specified\n by event_id, they will be marked as 'IGNORED' in the drop log.\n event_id : int | list of int | dict | None\n The id of the event to consider. If dict,\n the keys can later be used to access associated events. Example:\n dict(auditory=1, visual=3). If int, a dict will be created with\n the id as string. If a list, all events with the IDs specified\n in the list are used. If None, all events will be used with\n and a dict is created with string integer names corresponding\n to the event id integers.\n mrk : None | str | array_like, shape (5, 3) | list of str or array_like\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n elp : None | str | array_like, shape (8, 3)\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10,000 points are in the head shape, they are automatically decimated.\n allow_unknown_format : bool\n Force reading old data that is not officially supported. Alternatively,\n read and re-save the data with the KIT MEG Laboratory application.\n %(standardize_names)s\n %(verbose)s\n\n Returns\n -------\n epochs : instance of Epochs\n The epochs.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n " epochs = EpochsKIT(input_fname=input_fname, events=events, event_id=event_id, mrk=mrk, elp=elp, hsp=hsp, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose) return epochs
def read_stim_ch(self, buffer_size=100000.0): 'Read events from data.\n\n Parameter\n ---------\n buffer_size : int\n The size of chunk to by which the data are scanned.\n\n Returns\n -------\n events : array, [samples]\n The event vector (1 x samples).\n ' buffer_size = int(buffer_size) start = int(self.first_samp) stop = int((self.last_samp + 1)) pick = pick_types(self.info, meg=False, ref_meg=False, stim=True, exclude=[]) stim_ch = np.empty((1, stop), dtype=np.int64) for b_start in range(start, stop, buffer_size): b_stop = (b_start + buffer_size) x = self[pick, b_start:b_stop][0] stim_ch[:, b_start:(b_start + x.shape[1])] = x return stim_ch
-1,490,638,908,371,606,300
Read events from data. Parameter --------- buffer_size : int The size of chunk to by which the data are scanned. Returns ------- events : array, [samples] The event vector (1 x samples).
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
read_stim_ch
alexisicte/aviate
python
def read_stim_ch(self, buffer_size=100000.0): 'Read events from data.\n\n Parameter\n ---------\n buffer_size : int\n The size of chunk to by which the data are scanned.\n\n Returns\n -------\n events : array, [samples]\n The event vector (1 x samples).\n ' buffer_size = int(buffer_size) start = int(self.first_samp) stop = int((self.last_samp + 1)) pick = pick_types(self.info, meg=False, ref_meg=False, stim=True, exclude=[]) stim_ch = np.empty((1, stop), dtype=np.int64) for b_start in range(start, stop, buffer_size): b_stop = (b_start + buffer_size) x = self[pick, b_start:b_stop][0] stim_ch[:, b_start:(b_start + x.shape[1])] = x return stim_ch
def _set_stimchannels(self, info, stim, stim_code): "Specify how the trigger channel is synthesized from analog channels.\n\n Has to be done before loading data. For a RawKIT instance that has been\n created with preload=True, this method will raise a\n NotImplementedError.\n\n Parameters\n ----------\n info : instance of MeasInfo\n The measurement info.\n stim : list of int | '<' | '>'\n Can be submitted as list of trigger channels.\n If a list is not specified, the default triggers extracted from\n misc channels will be used with specified directionality.\n '<' means that largest values assigned to the first channel\n in sequence.\n '>' means the largest trigger assigned to the last channel\n in sequence.\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n " if self.preload: raise NotImplementedError("Can't change stim channel after loading data") _check_option('stim_code', stim_code, ['binary', 'channel']) if (stim is not None): if isinstance(stim, str): picks = _default_stim_chs(info) if (stim == '<'): stim = picks[::(- 1)] elif (stim == '>'): stim = picks else: raise ValueError(("stim needs to be list of int, '>' or '<', not %r" % str(stim))) else: stim = np.asarray(stim, int) if (stim.max() >= self._raw_extras[0]['nchan']): raise ValueError(('Got stim=%s, but sqd file only has %i channels' % (stim, self._raw_extras[0]['nchan']))) nchan = (self._raw_extras[0]['nchan'] + 1) info['chs'].append(dict(cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0, unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE, ch_name='STI 014', coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan), kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) info._update_redundant() self._raw_extras[0]['stim'] = stim self._raw_extras[0]['stim_code'] = stim_code
-26,248,256,616,927,200
Specify how the trigger channel is synthesized from analog channels. Has to be done before loading data. For a RawKIT instance that has been created with preload=True, this method will raise a NotImplementedError. Parameters ---------- info : instance of MeasInfo The measurement info. stim : list of int | '<' | '>' Can be submitted as list of trigger channels. If a list is not specified, the default triggers extracted from misc channels will be used with specified directionality. '<' means that largest values assigned to the first channel in sequence. '>' means the largest trigger assigned to the last channel in sequence. stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
_set_stimchannels
alexisicte/aviate
python
def _set_stimchannels(self, info, stim, stim_code): "Specify how the trigger channel is synthesized from analog channels.\n\n Has to be done before loading data. For a RawKIT instance that has been\n created with preload=True, this method will raise a\n NotImplementedError.\n\n Parameters\n ----------\n info : instance of MeasInfo\n The measurement info.\n stim : list of int | '<' | '>'\n Can be submitted as list of trigger channels.\n If a list is not specified, the default triggers extracted from\n misc channels will be used with specified directionality.\n '<' means that largest values assigned to the first channel\n in sequence.\n '>' means the largest trigger assigned to the last channel\n in sequence.\n stim_code : 'binary' | 'channel'\n How to decode trigger values from stim channels. 'binary' read stim\n channel events as binary code, 'channel' encodes channel number.\n " if self.preload: raise NotImplementedError("Can't change stim channel after loading data") _check_option('stim_code', stim_code, ['binary', 'channel']) if (stim is not None): if isinstance(stim, str): picks = _default_stim_chs(info) if (stim == '<'): stim = picks[::(- 1)] elif (stim == '>'): stim = picks else: raise ValueError(("stim needs to be list of int, '>' or '<', not %r" % str(stim))) else: stim = np.asarray(stim, int) if (stim.max() >= self._raw_extras[0]['nchan']): raise ValueError(('Got stim=%s, but sqd file only has %i channels' % (stim, self._raw_extras[0]['nchan']))) nchan = (self._raw_extras[0]['nchan'] + 1) info['chs'].append(dict(cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0, unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE, ch_name='STI 014', coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan), kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) info._update_redundant() self._raw_extras[0]['stim'] = stim self._raw_extras[0]['stim_code'] = stim_code
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): 'Read a chunk of raw data.' sqd = self._raw_extras[fi] nchan = sqd['nchan'] data_left = ((stop - start) * nchan) conv_factor = sqd['conv_factor'] n_bytes = sqd['dtype'].itemsize assert (n_bytes in (2, 4)) blk_size = min(data_left, (((100000000 // n_bytes) // nchan) * nchan)) with open(self._filenames[fi], 'rb', buffering=0) as fid: pointer = ((start * nchan) * n_bytes) fid.seek((sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)) stim = sqd['stim'] for blk_start in (np.arange(0, data_left, blk_size) // nchan): blk_size = min(blk_size, (data_left - (blk_start * nchan))) block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size) block = block.reshape(nchan, (- 1), order='F').astype(float) blk_stop = (blk_start + block.shape[1]) data_view = data[:, blk_start:blk_stop] block *= conv_factor if (stim is not None): stim_ch = _make_stim_channel(block[stim, :], sqd['slope'], sqd['stimthresh'], sqd['stim_code'], stim) block = np.vstack((block, stim_ch)) _mult_cal_one(data_view, block, idx, cals, mult)
-5,310,060,896,194,029,000
Read a chunk of raw data.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
_read_segment_file
alexisicte/aviate
python
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): sqd = self._raw_extras[fi] nchan = sqd['nchan'] data_left = ((stop - start) * nchan) conv_factor = sqd['conv_factor'] n_bytes = sqd['dtype'].itemsize assert (n_bytes in (2, 4)) blk_size = min(data_left, (((100000000 // n_bytes) // nchan) * nchan)) with open(self._filenames[fi], 'rb', buffering=0) as fid: pointer = ((start * nchan) * n_bytes) fid.seek((sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)) stim = sqd['stim'] for blk_start in (np.arange(0, data_left, blk_size) // nchan): blk_size = min(blk_size, (data_left - (blk_start * nchan))) block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size) block = block.reshape(nchan, (- 1), order='F').astype(float) blk_stop = (blk_start + block.shape[1]) data_view = data[:, blk_start:blk_stop] block *= conv_factor if (stim is not None): stim_ch = _make_stim_channel(block[stim, :], sqd['slope'], sqd['stimthresh'], sqd['stim_code'], stim) block = np.vstack((block, stim_ch)) _mult_cal_one(data_view, block, idx, cals, mult)
def _read_kit_data(self): 'Read epochs data.\n\n Returns\n -------\n data : array, [channels x samples]\n the data matrix (channels x samples).\n times : array, [samples]\n returns the time values corresponding to the samples.\n ' info = self._raw_extras[0] epoch_length = info['frame_length'] n_epochs = info['n_epochs'] n_samples = info['n_samples'] filename = info['filename'] dtype = info['dtype'] nchan = info['nchan'] with open(filename, 'rb', buffering=0) as fid: fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset']) count = (n_samples * nchan) data = np.fromfile(fid, dtype=dtype, count=count) data = data.reshape((n_samples, nchan)).T data = (data * info['conv_factor']) data = data.reshape((nchan, n_epochs, epoch_length)) data = data.transpose((1, 0, 2)) return data
6,968,618,934,094,841,000
Read epochs data. Returns ------- data : array, [channels x samples] the data matrix (channels x samples). times : array, [samples] returns the time values corresponding to the samples.
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
_read_kit_data
alexisicte/aviate
python
def _read_kit_data(self): 'Read epochs data.\n\n Returns\n -------\n data : array, [channels x samples]\n the data matrix (channels x samples).\n times : array, [samples]\n returns the time values corresponding to the samples.\n ' info = self._raw_extras[0] epoch_length = info['frame_length'] n_epochs = info['n_epochs'] n_samples = info['n_samples'] filename = info['filename'] dtype = info['dtype'] nchan = info['nchan'] with open(filename, 'rb', buffering=0) as fid: fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset']) count = (n_samples * nchan) data = np.fromfile(fid, dtype=dtype, count=count) data = data.reshape((n_samples, nchan)).T data = (data * info['conv_factor']) data = data.reshape((nchan, n_epochs, epoch_length)) data = data.transpose((1, 0, 2)) return data
@lru_cache() def get_linux_distribution(): 'Compatibility wrapper for {platform,distro}.linux_distribution().\n ' if hasattr(platform, 'linux_distribution'): with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=DeprecationWarning) result = platform.linux_distribution() else: import distro result = distro.linux_distribution(full_distribution_name=False) return result
2,094,807,504,367,020,000
Compatibility wrapper for {platform,distro}.linux_distribution().
datalad/utils.py
get_linux_distribution
AKSoo/datalad
python
@lru_cache() def get_linux_distribution(): '\n ' if hasattr(platform, 'linux_distribution'): with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=DeprecationWarning) result = platform.linux_distribution() else: import distro result = distro.linux_distribution(full_distribution_name=False) return result
def getargspec(func, *, include_kwonlyargs=False): "Compat shim for getargspec deprecated in python 3.\n\n The main difference from inspect.getargspec (and inspect.getfullargspec\n for that matter) is that by using inspect.signature we are providing\n correct args/defaults for functools.wraps'ed functions.\n\n `include_kwonlyargs` option was added to centralize getting all args,\n even the ones which are kwonly (follow the ``*,``).\n\n For internal use and not advised for use in 3rd party code.\n Please use inspect.signature directly.\n " f_sign = inspect.signature(func) args4 = [[], None, None, {}] kwonlyargs = {} (args, defaults) = (args4[0], args4[3]) P = inspect.Parameter for (p_name, p) in f_sign.parameters.items(): if (p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD)): assert (not kwonlyargs) args.append(p_name) if (p.default is not P.empty): defaults[p_name] = p.default elif (p.kind == P.VAR_POSITIONAL): args4[1] = p_name elif (p.kind == P.VAR_KEYWORD): args4[2] = p_name elif (p.kind == P.KEYWORD_ONLY): assert (p.default is not P.empty) kwonlyargs[p_name] = p.default if kwonlyargs: if (not include_kwonlyargs): raise ValueError('Function has keyword-only parameters or annotations, either use inspect.signature() API which can support them, or provide include_kwonlyargs=True to this function') else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) args4[3] = (None if (not defaults) else tuple(defaults.values())) return ArgSpecFake(*args4)
-6,180,160,135,232,466,000
Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly.
datalad/utils.py
getargspec
AKSoo/datalad
python
def getargspec(func, *, include_kwonlyargs=False): "Compat shim for getargspec deprecated in python 3.\n\n The main difference from inspect.getargspec (and inspect.getfullargspec\n for that matter) is that by using inspect.signature we are providing\n correct args/defaults for functools.wraps'ed functions.\n\n `include_kwonlyargs` option was added to centralize getting all args,\n even the ones which are kwonly (follow the ``*,``).\n\n For internal use and not advised for use in 3rd party code.\n Please use inspect.signature directly.\n " f_sign = inspect.signature(func) args4 = [[], None, None, {}] kwonlyargs = {} (args, defaults) = (args4[0], args4[3]) P = inspect.Parameter for (p_name, p) in f_sign.parameters.items(): if (p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD)): assert (not kwonlyargs) args.append(p_name) if (p.default is not P.empty): defaults[p_name] = p.default elif (p.kind == P.VAR_POSITIONAL): args4[1] = p_name elif (p.kind == P.VAR_KEYWORD): args4[2] = p_name elif (p.kind == P.KEYWORD_ONLY): assert (p.default is not P.empty) kwonlyargs[p_name] = p.default if kwonlyargs: if (not include_kwonlyargs): raise ValueError('Function has keyword-only parameters or annotations, either use inspect.signature() API which can support them, or provide include_kwonlyargs=True to this function') else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) args4[3] = (None if (not defaults) else tuple(defaults.values())) return ArgSpecFake(*args4)
def any_re_search(regexes, value): 'Return if any of regexes (list or str) searches successfully for value' for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False
4,265,866,037,996,960,000
Return if any of regexes (list or str) searches successfully for value
datalad/utils.py
any_re_search
AKSoo/datalad
python
def any_re_search(regexes, value): for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False
def not_supported_on_windows(msg=None): 'A little helper to be invoked to consistently fail whenever functionality is\n not supported (yet) on Windows\n ' if on_windows: raise NotImplementedError(('This functionality is not yet implemented for Windows OS' + ((': %s' % msg) if msg else '')))
-9,206,899,521,312,776,000
A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows
datalad/utils.py
not_supported_on_windows
AKSoo/datalad
python
def not_supported_on_windows(msg=None): 'A little helper to be invoked to consistently fail whenever functionality is\n not supported (yet) on Windows\n ' if on_windows: raise NotImplementedError(('This functionality is not yet implemented for Windows OS' + ((': %s' % msg) if msg else )))
def get_home_envvars(new_home): 'Return dict with env variables to be adjusted for a new HOME\n\n Only variables found in current os.environ are adjusted.\n\n Parameters\n ----------\n new_home: str or Path\n New home path, in native to OS "schema"\n ' new_home = str(new_home) out = {'HOME': new_home} if on_windows: out['USERPROFILE'] = new_home (out['HOMEDRIVE'], out['HOMEPATH']) = splitdrive(new_home) return {v: val for (v, val) in out.items() if (v in os.environ)}
4,516,429,125,876,582,400
Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema"
datalad/utils.py
get_home_envvars
AKSoo/datalad
python
def get_home_envvars(new_home): 'Return dict with env variables to be adjusted for a new HOME\n\n Only variables found in current os.environ are adjusted.\n\n Parameters\n ----------\n new_home: str or Path\n New home path, in native to OS "schema"\n ' new_home = str(new_home) out = {'HOME': new_home} if on_windows: out['USERPROFILE'] = new_home (out['HOMEDRIVE'], out['HOMEPATH']) = splitdrive(new_home) return {v: val for (v, val) in out.items() if (v in os.environ)}
def auto_repr(cls): 'Decorator for a class to assign it an automagic quick and dirty __repr__\n\n It uses public class attributes to prepare repr of a class\n\n Original idea: http://stackoverflow.com/a/27799004/1265472\n ' cls.__repr__ = __auto_repr__ return cls
-7,868,090,968,649,534,000
Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472
datalad/utils.py
auto_repr
AKSoo/datalad
python
def auto_repr(cls): 'Decorator for a class to assign it an automagic quick and dirty __repr__\n\n It uses public class attributes to prepare repr of a class\n\n Original idea: http://stackoverflow.com/a/27799004/1265472\n ' cls.__repr__ = __auto_repr__ return cls
def is_interactive(): 'Return True if all in/outs are open and tty.\n\n Note that in a somewhat abnormal case where e.g. stdin is explicitly\n closed, and any operation on it would raise a\n `ValueError("I/O operation on closed file")` exception, this function\n would just return False, since the session cannot be used interactively.\n ' return all((_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)))
8,509,922,991,414,469,000
Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively.
datalad/utils.py
is_interactive
AKSoo/datalad
python
def is_interactive(): 'Return True if all in/outs are open and tty.\n\n Note that in a somewhat abnormal case where e.g. stdin is explicitly\n closed, and any operation on it would raise a\n `ValueError("I/O operation on closed file")` exception, this function\n would just return False, since the session cannot be used interactively.\n ' return all((_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)))
def get_ipython_shell(): 'Detect if running within IPython and returns its `ip` (shell) object\n\n Returns None if not under ipython (no `get_ipython` function)\n ' try: return get_ipython() except NameError: return None
7,239,483,232,310,486,000
Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function)
datalad/utils.py
get_ipython_shell
AKSoo/datalad
python
def get_ipython_shell(): 'Detect if running within IPython and returns its `ip` (shell) object\n\n Returns None if not under ipython (no `get_ipython` function)\n ' try: return get_ipython() except NameError: return None
def md5sum(filename): 'Compute an MD5 sum for the given file\n ' from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5']
-6,846,947,516,037,588,000
Compute an MD5 sum for the given file
datalad/utils.py
md5sum
AKSoo/datalad
python
def md5sum(filename): '\n ' from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5']
def sorted_files(path): 'Return a (sorted) list of files under path\n ' return sorted(sum([[op.join(r, f)[(len(path) + 1):] for f in files] for (r, d, files) in os.walk(path) if (not ('.git' in r))], []))
-8,662,916,188,476,686,000
Return a (sorted) list of files under path
datalad/utils.py
sorted_files
AKSoo/datalad
python
def sorted_files(path): '\n ' return sorted(sum([[op.join(r, f)[(len(path) + 1):] for f in files] for (r, d, files) in os.walk(path) if (not ('.git' in r))], []))
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): 'Generator to find files matching regex\n\n Parameters\n ----------\n regex: basestring\n exclude: basestring, optional\n Matches to exclude\n exclude_vcs:\n If True, excludes commonly known VCS subdirectories. If string, used\n as regex to exclude those files (regex: `%r`)\n exclude_datalad:\n If True, excludes files known to be datalad meta-data files (e.g. under\n .datalad/ subdirectory) (regex: `%r`)\n topdir: basestring, optional\n Directory where to search\n dirs: bool, optional\n Whether to match directories as well as files\n ' for (dirpath, dirnames, filenames) in os.walk(topdir): names = ((dirnames + filenames) if dirs else filenames) paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if (exclude and re.search(exclude, path)): continue if (exclude_vcs and re.search(_VCS_REGEX, path)): continue if (exclude_datalad and re.search(_DATALAD_REGEX, path)): continue (yield path)
-8,840,877,839,042,412,000
Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files
datalad/utils.py
find_files
AKSoo/datalad
python
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): 'Generator to find files matching regex\n\n Parameters\n ----------\n regex: basestring\n exclude: basestring, optional\n Matches to exclude\n exclude_vcs:\n If True, excludes commonly known VCS subdirectories. If string, used\n as regex to exclude those files (regex: `%r`)\n exclude_datalad:\n If True, excludes files known to be datalad meta-data files (e.g. under\n .datalad/ subdirectory) (regex: `%r`)\n topdir: basestring, optional\n Directory where to search\n dirs: bool, optional\n Whether to match directories as well as files\n ' for (dirpath, dirnames, filenames) in os.walk(topdir): names = ((dirnames + filenames) if dirs else filenames) paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if (exclude and re.search(exclude, path)): continue if (exclude_vcs and re.search(_VCS_REGEX, path)): continue if (exclude_datalad and re.search(_DATALAD_REGEX, path)): continue (yield path)
def expandpath(path, force_absolute=True): 'Expand all variables and user handles in a path.\n\n By default return an absolute path\n ' path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path
-7,795,529,707,431,371,000
Expand all variables and user handles in a path. By default return an absolute path
datalad/utils.py
expandpath
AKSoo/datalad
python
def expandpath(path, force_absolute=True): 'Expand all variables and user handles in a path.\n\n By default return an absolute path\n ' path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path
def posix_relpath(path, start=None): 'Behave like os.path.relpath, but always return POSIX paths...\n\n on any platform.' return posixpath.join(*split(relpath(path, start=(start if (start is not None) else ''))))
4,046,002,054,909,217,300
Behave like os.path.relpath, but always return POSIX paths... on any platform.
datalad/utils.py
posix_relpath
AKSoo/datalad
python
def posix_relpath(path, start=None): 'Behave like os.path.relpath, but always return POSIX paths...\n\n on any platform.' return posixpath.join(*split(relpath(path, start=(start if (start is not None) else ))))
def is_explicit_path(path): "Return whether a path explicitly points to a location\n\n Any absolute path, or relative path starting with either '../' or\n './' is assumed to indicate a location on the filesystem. Any other\n path format is not considered explicit." path = expandpath(path, force_absolute=False) return (isabs(path) or path.startswith((os.curdir + os.sep)) or path.startswith((os.pardir + os.sep)))
1,506,708,320,360,227,800
Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.
datalad/utils.py
is_explicit_path
AKSoo/datalad
python
def is_explicit_path(path): "Return whether a path explicitly points to a location\n\n Any absolute path, or relative path starting with either '../' or\n './' is assumed to indicate a location on the filesystem. Any other\n path format is not considered explicit." path = expandpath(path, force_absolute=False) return (isabs(path) or path.startswith((os.curdir + os.sep)) or path.startswith((os.pardir + os.sep)))
def rotree(path, ro=True, chmod_files=True): 'To make tree read-only or writable\n\n Parameters\n ----------\n path : string\n Path to the tree/directory to chmod\n ro : bool, optional\n Whether to make it R/O (default) or RW\n chmod_files : bool, optional\n Whether to operate also on files (not just directories)\n ' if ro: chmod = (lambda f: os.chmod(f, (os.stat(f).st_mode & (~ stat.S_IWRITE)))) else: chmod = (lambda f: os.chmod(f, ((os.stat(f).st_mode | stat.S_IWRITE) | stat.S_IREAD))) for (root, dirs, files) in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) if exists(fullf): chmod(fullf) chmod(root)
-5,822,748,150,281,047,000
To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories)
datalad/utils.py
rotree
AKSoo/datalad
python
def rotree(path, ro=True, chmod_files=True): 'To make tree read-only or writable\n\n Parameters\n ----------\n path : string\n Path to the tree/directory to chmod\n ro : bool, optional\n Whether to make it R/O (default) or RW\n chmod_files : bool, optional\n Whether to operate also on files (not just directories)\n ' if ro: chmod = (lambda f: os.chmod(f, (os.stat(f).st_mode & (~ stat.S_IWRITE)))) else: chmod = (lambda f: os.chmod(f, ((os.stat(f).st_mode | stat.S_IWRITE) | stat.S_IREAD))) for (root, dirs, files) in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) if exists(fullf): chmod(fullf) chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): "To remove git-annex .git it is needed to make all files and directories writable again first\n\n Parameters\n ----------\n path: Path or str\n Path to remove\n chmod_files : string or bool, optional\n Whether to make files writable also before removal. Usually it is just\n a matter of directories to have write permissions.\n If 'auto' it would chmod files on windows by default\n children_only : bool, optional\n If set, all files and subdirectories would be removed while the path\n itself (must be a directory) would be preserved\n `*args` :\n `**kwargs` :\n Passed into shutil.rmtree call\n " if (chmod_files == 'auto'): chmod_files = on_windows assert_no_open_files(path) path = str(path) if children_only: if (not isdir(path)): raise ValueError('Can remove children only of directories') for p in os.listdir(path): rmtree(op.join(path, p)) return if (not (islink(path) or (not isdir(path)))): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: path = ('\\\\?\\ '.strip() + path) _rmtree(path, *args, **kwargs) else: unlink(path)
-4,424,460,177,924,723,000
To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call
datalad/utils.py
rmtree
AKSoo/datalad
python
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): "To remove git-annex .git it is needed to make all files and directories writable again first\n\n Parameters\n ----------\n path: Path or str\n Path to remove\n chmod_files : string or bool, optional\n Whether to make files writable also before removal. Usually it is just\n a matter of directories to have write permissions.\n If 'auto' it would chmod files on windows by default\n children_only : bool, optional\n If set, all files and subdirectories would be removed while the path\n itself (must be a directory) would be preserved\n `*args` :\n `**kwargs` :\n Passed into shutil.rmtree call\n " if (chmod_files == 'auto'): chmod_files = on_windows assert_no_open_files(path) path = str(path) if children_only: if (not isdir(path)): raise ValueError('Can remove children only of directories') for p in os.listdir(path): rmtree(op.join(path, p)) return if (not (islink(path) or (not isdir(path)))): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: path = ('\\\\?\\ '.strip() + path) _rmtree(path, *args, **kwargs) else: unlink(path)
def rmdir(path, *args, **kwargs): 'os.rmdir with our optional checking for open files' assert_no_open_files(path) os.rmdir(path)
-3,323,220,695,162,720,000
os.rmdir with our optional checking for open files
datalad/utils.py
rmdir
AKSoo/datalad
python
def rmdir(path, *args, **kwargs): assert_no_open_files(path) os.rmdir(path)
def get_open_files(path, log_open=False): 'Get open files under a path\n\n Note: This function is very slow on Windows.\n\n Parameters\n ----------\n path : str\n File or directory to check for open files under\n log_open : bool or int\n If set - logger level to use\n\n Returns\n -------\n dict\n path : pid\n\n ' import psutil files = {} path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = ([p.path for p in proc.open_files()] + [proc.cwd()]) for p in open_paths: if path_startswith(p, path): files[p] = proc except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if (files and log_open): lgr.log(log_open, 'Open files under %s: %s', path, files) return files
5,590,518,783,537,523,000
Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid
datalad/utils.py
get_open_files
AKSoo/datalad
python
def get_open_files(path, log_open=False): 'Get open files under a path\n\n Note: This function is very slow on Windows.\n\n Parameters\n ----------\n path : str\n File or directory to check for open files under\n log_open : bool or int\n If set - logger level to use\n\n Returns\n -------\n dict\n path : pid\n\n ' import psutil files = {} path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = ([p.path for p in proc.open_files()] + [proc.cwd()]) for p in open_paths: if path_startswith(p, path): files[p] = proc except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if (files and log_open): lgr.log(log_open, 'Open files under %s: %s', path, files) return files
def rmtemp(f, *args, **kwargs): 'Wrapper to centralize removing of temp files so we could keep them around\n\n It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP\n environment variable is defined\n ' if (not os.environ.get('DATALAD_TESTS_TEMP_KEEP')): if (not os.path.lexists(f)): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, 'Removing temp file: %s', f) if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info('Keeping temp file: %s', f)
-2,435,413,394,078,092,000
Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined
datalad/utils.py
rmtemp
AKSoo/datalad
python
def rmtemp(f, *args, **kwargs): 'Wrapper to centralize removing of temp files so we could keep them around\n\n It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP\n environment variable is defined\n ' if (not os.environ.get('DATALAD_TESTS_TEMP_KEEP')): if (not os.path.lexists(f)): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, 'Removing temp file: %s', f) if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info('Keeping temp file: %s', f)
def file_basename(name, return_ext=False): '\n Strips up to 2 extensions of length up to 4 characters and starting with alpha\n not a digit, so we could get rid of .tar.gz etc\n ' bname = basename(name) fbname = re.sub('(\\.[a-zA-Z_]\\S{1,4}){0,2}$', '', bname) if return_ext: return (fbname, bname[(len(fbname) + 1):]) else: return fbname
-4,242,195,939,967,012,400
Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc
datalad/utils.py
file_basename
AKSoo/datalad
python
def file_basename(name, return_ext=False): '\n Strips up to 2 extensions of length up to 4 characters and starting with alpha\n not a digit, so we could get rid of .tar.gz etc\n ' bname = basename(name) fbname = re.sub('(\\.[a-zA-Z_]\\S{1,4}){0,2}$', , bname) if return_ext: return (fbname, bname[(len(fbname) + 1):]) else: return fbname
def escape_filename(filename): 'Surround filename in "" and escape " in the filename\n ' filename = filename.replace('"', '\\"').replace('`', '\\`') filename = ('"%s"' % filename) return filename
-4,358,157,444,839,086,600
Surround filename in "" and escape " in the filename
datalad/utils.py
escape_filename
AKSoo/datalad
python
def escape_filename(filename): '\n ' filename = filename.replace('"', '\\"').replace('`', '\\`') filename = ('"%s"' % filename) return filename
def encode_filename(filename): 'Encode unicode filename\n ' if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename
-9,183,947,159,578,037,000
Encode unicode filename
datalad/utils.py
encode_filename
AKSoo/datalad
python
def encode_filename(filename): '\n ' if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename
def decode_input(s): 'Given input string/bytes, decode according to stdin codepage (or UTF-8)\n if not defined\n\n If fails -- issue warning and decode allowing for errors\n being replaced\n ' if isinstance(s, str): return s else: encoding = (sys.stdin.encoding or 'UTF-8') try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning('Failed to decode input string using %s encoding. Decoding allowing for errors', encoding) return s.decode(encoding, errors='replace')
-4,714,567,132,680,451,000
Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced
datalad/utils.py
decode_input
AKSoo/datalad
python
def decode_input(s): 'Given input string/bytes, decode according to stdin codepage (or UTF-8)\n if not defined\n\n If fails -- issue warning and decode allowing for errors\n being replaced\n ' if isinstance(s, str): return s else: encoding = (sys.stdin.encoding or 'UTF-8') try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning('Failed to decode input string using %s encoding. Decoding allowing for errors', encoding) return s.decode(encoding, errors='replace')
def ensure_tuple_or_list(obj): 'Given an object, wrap into a tuple if not list or tuple\n ' if isinstance(obj, (list, tuple)): return obj return (obj,)
-6,620,389,213,814,764,000
Given an object, wrap into a tuple if not list or tuple
datalad/utils.py
ensure_tuple_or_list
AKSoo/datalad
python
def ensure_tuple_or_list(obj): '\n ' if isinstance(obj, (list, tuple)): return obj return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True): 'Given not a list, would place it into a list. If None - empty list is returned\n\n Parameters\n ----------\n s: list or anything\n cls: class\n Which iterable class to ensure\n copy: bool, optional\n If correct iterable is passed, it would generate its shallow copy\n iterate: bool, optional\n If it is not a list, but something iterable (but not a str)\n iterate over it.\n ' if isinstance(s, cls): return (s if (not copy) else shallow_copy(s)) elif isinstance(s, str): return cls((s,)) elif (iterate and hasattr(s, '__iter__')): return cls(s) elif (s is None): return cls() else: return cls((s,))
-8,192,520,584,669,999,000
Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it.
datalad/utils.py
ensure_iter
AKSoo/datalad
python
def ensure_iter(s, cls, copy=False, iterate=True): 'Given not a list, would place it into a list. If None - empty list is returned\n\n Parameters\n ----------\n s: list or anything\n cls: class\n Which iterable class to ensure\n copy: bool, optional\n If correct iterable is passed, it would generate its shallow copy\n iterate: bool, optional\n If it is not a list, but something iterable (but not a str)\n iterate over it.\n ' if isinstance(s, cls): return (s if (not copy) else shallow_copy(s)) elif isinstance(s, str): return cls((s,)) elif (iterate and hasattr(s, '__iter__')): return cls(s) elif (s is None): return cls() else: return cls((s,))
def ensure_list(s, copy=False, iterate=True): 'Given not a list, would place it into a list. If None - empty list is returned\n\n Parameters\n ----------\n s: list or anything\n copy: bool, optional\n If list is passed, it would generate a shallow copy of the list\n iterate: bool, optional\n If it is not a list, but something iterable (but not a str)\n iterate over it.\n ' return ensure_iter(s, list, copy=copy, iterate=iterate)
-7,572,995,100,661,384,000
Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it.
datalad/utils.py
ensure_list
AKSoo/datalad
python
def ensure_list(s, copy=False, iterate=True): 'Given not a list, would place it into a list. If None - empty list is returned\n\n Parameters\n ----------\n s: list or anything\n copy: bool, optional\n If list is passed, it would generate a shallow copy of the list\n iterate: bool, optional\n If it is not a list, but something iterable (but not a str)\n iterate over it.\n ' return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'): 'Given a multiline string convert it to a list of return None if empty\n\n Parameters\n ----------\n s: str or list\n ' if (not s): return None if isinstance(s, list): return s return s.split(sep)
-3,554,266,283,958,024,000
Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list
datalad/utils.py
ensure_list_from_str
AKSoo/datalad
python
def ensure_list_from_str(s, sep='\n'): 'Given a multiline string convert it to a list of return None if empty\n\n Parameters\n ----------\n s: str or list\n ' if (not s): return None if isinstance(s, list): return s return s.split(sep)
def ensure_dict_from_str(s, **kwargs): 'Given a multiline string with key=value items convert it to a dictionary\n\n Parameters\n ----------\n s: str or dict\n\n Returns None if input s is empty\n ' if (not s): return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if ('=' not in value_str): raise ValueError('{} is not in key=value format'.format(repr(value_str))) (k, v) = value_str.split('=', 1) if (k in out): err = 'key {} was already defined in {}, but new value {} was provided'.format(k, out, v) raise ValueError(err) out[k] = v return out
3,087,126,313,354,959,400
Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty
datalad/utils.py
ensure_dict_from_str
AKSoo/datalad
python
def ensure_dict_from_str(s, **kwargs): 'Given a multiline string with key=value items convert it to a dictionary\n\n Parameters\n ----------\n s: str or dict\n\n Returns None if input s is empty\n ' if (not s): return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if ('=' not in value_str): raise ValueError('{} is not in key=value format'.format(repr(value_str))) (k, v) = value_str.split('=', 1) if (k in out): err = 'key {} was already defined in {}, but new value {} was provided'.format(k, out, v) raise ValueError(err) out[k] = v return out
def ensure_bytes(s, encoding='utf-8'): 'Convert/encode unicode string to bytes.\n\n If `s` isn\'t a string, return it as is.\n\n Parameters\n ----------\n encoding: str, optional\n Encoding to use. "utf-8" is the default\n ' if (not isinstance(s, str)): return s return s.encode(encoding)
-7,053,226,920,306,413,000
Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default
datalad/utils.py
ensure_bytes
AKSoo/datalad
python
def ensure_bytes(s, encoding='utf-8'): 'Convert/encode unicode string to bytes.\n\n If `s` isn\'t a string, return it as is.\n\n Parameters\n ----------\n encoding: str, optional\n Encoding to use. "utf-8" is the default\n ' if (not isinstance(s, str)): return s return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None): 'Convert/decode bytestring to unicode.\n\n If `s` isn\'t a bytestring, return it as is.\n\n Parameters\n ----------\n encoding: str, optional\n Encoding to use. If None, "utf-8" is tried, and then if not a valid\n UTF-8, encoding will be guessed\n confidence: float, optional\n A value between 0 and 1, so if guessing of encoding is of lower than\n specified confidence, ValueError is raised\n ' if (not isinstance(s, bytes)): return s if (encoding is None): try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug('Failed to decode a string as utf-8: %s', CapturedException(exc)) from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if ((confidence is not None) and (denc_confidence < confidence)): raise ValueError(('Failed to auto-detect encoding with high enough confidence. Highest confidence was %s for %s' % (denc_confidence, denc))) lgr.log(5, 'Auto-detected encoding to be %s', denc) return s.decode(denc) else: raise ValueError(('Could not decode value as utf-8, or to guess its encoding: %s' % repr(s))) else: return s.decode(encoding)
-1,053,787,349,956,682,400
Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised
datalad/utils.py
ensure_unicode
AKSoo/datalad
python
def ensure_unicode(s, encoding=None, confidence=None): 'Convert/decode bytestring to unicode.\n\n If `s` isn\'t a bytestring, return it as is.\n\n Parameters\n ----------\n encoding: str, optional\n Encoding to use. If None, "utf-8" is tried, and then if not a valid\n UTF-8, encoding will be guessed\n confidence: float, optional\n A value between 0 and 1, so if guessing of encoding is of lower than\n specified confidence, ValueError is raised\n ' if (not isinstance(s, bytes)): return s if (encoding is None): try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug('Failed to decode a string as utf-8: %s', CapturedException(exc)) from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if ((confidence is not None) and (denc_confidence < confidence)): raise ValueError(('Failed to auto-detect encoding with high enough confidence. Highest confidence was %s for %s' % (denc_confidence, denc))) lgr.log(5, 'Auto-detected encoding to be %s', denc) return s.decode(denc) else: raise ValueError(('Could not decode value as utf-8, or to guess its encoding: %s' % repr(s))) else: return s.decode(encoding)
def ensure_bool(s): 'Convert value into boolean following convention for strings\n\n to recognize on,True,yes as True, off,False,no as False\n ' if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if (sl in {'y', 'yes', 'true', 'on'}): return True elif (sl in {'n', 'no', 'false', 'off'}): return False else: raise ValueError(('Do not know how to treat %r as a boolean' % s)) return bool(s)
5,458,731,529,325,434,000
Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False
datalad/utils.py
ensure_bool
AKSoo/datalad
python
def ensure_bool(s): 'Convert value into boolean following convention for strings\n\n to recognize on,True,yes as True, off,False,no as False\n ' if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if (sl in {'y', 'yes', 'true', 'on'}): return True elif (sl in {'n', 'no', 'false', 'off'}): return False else: raise ValueError(('Do not know how to treat %r as a boolean' % s)) return bool(s)
def as_unicode(val, cast_types=object): 'Given an arbitrary value, would try to obtain unicode value of it\n \n For unicode it would return original value, for python2 str or python3\n bytes it would use ensure_unicode, for None - an empty (unicode) string,\n and for any other type (see `cast_types`) - would apply the unicode \n constructor. If value is not an instance of `cast_types`, TypeError\n is thrown\n \n Parameters\n ----------\n cast_types: type\n Which types to cast to unicode by providing to constructor\n ' if (val is None): return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError(('Value %r is not of any of known or provided %s types' % (val, cast_types)))
-5,259,481,238,356,841,000
Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor
datalad/utils.py
as_unicode
AKSoo/datalad
python
def as_unicode(val, cast_types=object): 'Given an arbitrary value, would try to obtain unicode value of it\n \n For unicode it would return original value, for python2 str or python3\n bytes it would use ensure_unicode, for None - an empty (unicode) string,\n and for any other type (see `cast_types`) - would apply the unicode \n constructor. If value is not an instance of `cast_types`, TypeError\n is thrown\n \n Parameters\n ----------\n cast_types: type\n Which types to cast to unicode by providing to constructor\n ' if (val is None): return u elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError(('Value %r is not of any of known or provided %s types' % (val, cast_types)))
def unique(seq, key=None, reverse=False): 'Given a sequence return a list only with unique elements while maintaining order\n\n This is the fastest solution. See\n https://www.peterbe.com/plog/uniqifiers-benchmark\n and\n http://stackoverflow.com/a/480227/1265472\n for more information.\n Enhancement -- added ability to compare for uniqueness using a key function\n\n Parameters\n ----------\n seq:\n Sequence to analyze\n key: callable, optional\n Function to call on each element so we could decide not on a full\n element, but on its member etc\n reverse: bool, optional\n If True, uniqueness checked in the reverse order, so that the later ones\n will take the order\n ' seen = set() seen_add = seen.add trans = (reversed if reverse else (lambda x: x)) if (not key): out = [x for x in trans(seq) if (not ((x in seen) or seen_add(x)))] else: out = [x for x in trans(seq) if (not ((key(x) in seen) or seen_add(key(x))))] return (out[::(- 1)] if reverse else out)
4,005,972,471,792,257,000
Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order
datalad/utils.py
unique
AKSoo/datalad
python
def unique(seq, key=None, reverse=False): 'Given a sequence return a list only with unique elements while maintaining order\n\n This is the fastest solution. See\n https://www.peterbe.com/plog/uniqifiers-benchmark\n and\n http://stackoverflow.com/a/480227/1265472\n for more information.\n Enhancement -- added ability to compare for uniqueness using a key function\n\n Parameters\n ----------\n seq:\n Sequence to analyze\n key: callable, optional\n Function to call on each element so we could decide not on a full\n element, but on its member etc\n reverse: bool, optional\n If True, uniqueness checked in the reverse order, so that the later ones\n will take the order\n ' seen = set() seen_add = seen.add trans = (reversed if reverse else (lambda x: x)) if (not key): out = [x for x in trans(seq) if (not ((x in seen) or seen_add(x)))] else: out = [x for x in trans(seq) if (not ((key(x) in seen) or seen_add(key(x))))] return (out[::(- 1)] if reverse else out)
def all_same(items): 'Quick check if all items are the same.\n\n Identical to a check like len(set(items)) == 1 but\n should be more efficient while working on generators, since would\n return False as soon as any difference detected thus possibly avoiding\n unnecessary evaluations\n ' first = True first_item = None for item in items: if first: first = False first_item = item elif (item != first_item): return False return (not first)
3,611,848,074,102,730,000
Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations
datalad/utils.py
all_same
AKSoo/datalad
python
def all_same(items): 'Quick check if all items are the same.\n\n Identical to a check like len(set(items)) == 1 but\n should be more efficient while working on generators, since would\n return False as soon as any difference detected thus possibly avoiding\n unnecessary evaluations\n ' first = True first_item = None for item in items: if first: first = False first_item = item elif (item != first_item): return False return (not first)