body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def profiler_main(): 'Manage a profiler daemon.' parser = _common_args() parser.add_argument('--action', required=True, choices=('start', 'stop', 'restart')) (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') proc = cfg.daemon.process(source_transport=cfg.transport.source, error_transport=cfg.transport.error, results_transport=cfg.transport.result, profiler=cfg.daemon.profiler, pidfile=cfg.daemon.pidfile) if (args.action == 'stop'): proc.stop() if (args.action == 'start'): proc.start() if (args.action == 'restart'): proc.restart()
8,079,506,965,881,688,000
Manage a profiler daemon.
pyperf/cmd/daemons.py
profiler_main
kevinconway/PyPerf
python
def profiler_main(): parser = _common_args() parser.add_argument('--action', required=True, choices=('start', 'stop', 'restart')) (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') proc = cfg.daemon.process(source_transport=cfg.transport.source, error_transport=cfg.transport.error, results_transport=cfg.transport.result, profiler=cfg.daemon.profiler, pidfile=cfg.daemon.pidfile) if (args.action == 'stop'): proc.stop() if (args.action == 'start'): proc.start() if (args.action == 'restart'): proc.restart()
def send_request(): 'Send a profile request to the daemon.' parser = _common_args() parser.add_argument('--identifier', required=True, help='The unique message identifier.') parser.add_argument('--setup', default='pass', help='Any setup code if needed for the profile.') parser.add_argument('--code', required=True, help='The code to profile.') (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') cfg.transport.source().send(messages.ProfileRequest(identifier=args.identifier, setup=args.setup, code=args.code))
305,964,982,401,217,100
Send a profile request to the daemon.
pyperf/cmd/daemons.py
send_request
kevinconway/PyPerf
python
def send_request(): parser = _common_args() parser.add_argument('--identifier', required=True, help='The unique message identifier.') parser.add_argument('--setup', default='pass', help='Any setup code if needed for the profile.') parser.add_argument('--code', required=True, help='The code to profile.') (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') cfg.transport.source().send(messages.ProfileRequest(identifier=args.identifier, setup=args.setup, code=args.code))
def fetch_result(): 'Fetch a result from the transport.' parser = _common_args() (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') transport = cfg.transport.result() msg = transport.fetch() if (msg is not None): transport.complete(msg) pprint.pprint(msg.json)
-8,180,299,108,872,845,000
Fetch a result from the transport.
pyperf/cmd/daemons.py
fetch_result
kevinconway/PyPerf
python
def fetch_result(): parser = _common_args() (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') transport = cfg.transport.result() msg = transport.fetch() if (msg is not None): transport.complete(msg) pprint.pprint(msg.json)
def fetch_error(): 'Fetch an error from the transport.' parser = _common_args() (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') transport = cfg.transport.error() msg = transport.fetch() if (msg is not None): transport.complete(msg) pprint.pprint(msg.json)
-6,559,388,247,714,032,000
Fetch an error from the transport.
pyperf/cmd/daemons.py
fetch_error
kevinconway/PyPerf
python
def fetch_error(): parser = _common_args() (args, _) = parser.parse_known_args() cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF') transport = cfg.transport.error() msg = transport.fetch() if (msg is not None): transport.complete(msg) pprint.pprint(msg.json)
def main(): '\n Calls the TEST functions in this module, but ONLY if the method\n to be tested has at least a partial implementation. That is,\n a TEST function will not be called until you begin work\n on the code that it is testing.\n ' if m1t.is_implemented('__init__'): run_test_init() if m1t.is_implemented('clone'): run_test_clone() if m1t.is_implemented('reverse'): run_test_reverse() if m1t.is_implemented('slope'): run_test_slope() if m1t.is_implemented('length'): run_test_length() if m1t.is_implemented('get_number_of_clones'): run_test_get_number_of_clones() if m1t.is_implemented('line_plus'): run_test_line_plus() if m1t.is_implemented('line_minus'): run_test_line_minus() if m1t.is_implemented('midpoint'): run_test_midpoint() if m1t.is_implemented('is_parallel'): run_test_is_parallel() if m1t.is_implemented('reset'): run_test_reset()
5,256,363,472,586,403,000
Calls the TEST functions in this module, but ONLY if the method to be tested has at least a partial implementation. That is, a TEST function will not be called until you begin work on the code that it is testing.
src/m1_Line.py
main
jarskijr/10-MoreImplementingClasses
python
def main(): '\n Calls the TEST functions in this module, but ONLY if the method\n to be tested has at least a partial implementation. That is,\n a TEST function will not be called until you begin work\n on the code that it is testing.\n ' if m1t.is_implemented('__init__'): run_test_init() if m1t.is_implemented('clone'): run_test_clone() if m1t.is_implemented('reverse'): run_test_reverse() if m1t.is_implemented('slope'): run_test_slope() if m1t.is_implemented('length'): run_test_length() if m1t.is_implemented('get_number_of_clones'): run_test_get_number_of_clones() if m1t.is_implemented('line_plus'): run_test_line_plus() if m1t.is_implemented('line_minus'): run_test_line_minus() if m1t.is_implemented('midpoint'): run_test_midpoint() if m1t.is_implemented('is_parallel'): run_test_is_parallel() if m1t.is_implemented('reset'): run_test_reset()
def run_test_init(): ' Tests the __init__ method of the Line class. ' m1t.run_test_init() p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) print(line.start) print(line.end) print((line.start == p1)) print((line.start is p1)) print('The above should print:') print(' Point(30, 17)') print(' Point(50, 80)') print(' True') print(' False')
-6,119,020,046,497,032,000
Tests the __init__ method of the Line class.
src/m1_Line.py
run_test_init
jarskijr/10-MoreImplementingClasses
python
def run_test_init(): ' ' m1t.run_test_init() p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) print(line.start) print(line.end) print((line.start == p1)) print((line.start is p1)) print('The above should print:') print(' Point(30, 17)') print(' Point(50, 80)') print(' True') print(' False')
def run_test_clone(): ' Tests the clone method of the Line class. ' m1t.run_test_clone() p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) print(line2) print((line1 == line2)) print((line1 is line2)) print((line1.start is line2.start)) print((line1.end is line2.end)) line1.start = Point(11, 12) print(line1) print(line2) print((line1 == line2)) print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(30, 17), (50, 80)]') print(' True') print(' False') print(' False') print(' False') print(' Line[(11, 12), (50, 80)]') print(' Line[(30, 17), (50, 80)') print(' False')
1,896,268,899,303,365,400
Tests the clone method of the Line class.
src/m1_Line.py
run_test_clone
jarskijr/10-MoreImplementingClasses
python
def run_test_clone(): ' ' m1t.run_test_clone() p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) print(line2) print((line1 == line2)) print((line1 is line2)) print((line1.start is line2.start)) print((line1.end is line2.end)) line1.start = Point(11, 12) print(line1) print(line2) print((line1 == line2)) print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(30, 17), (50, 80)]') print(' True') print(' False') print(' False') print(' False') print(' Line[(11, 12), (50, 80)]') print(' Line[(30, 17), (50, 80)') print(' False')
def run_test_reverse(): ' Tests the reverse method of the Line class. ' m1t.run_test_reverse() p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) line1.reverse() print(line1) print((line1 == line2)) line1.reverse() print((line1 == line2)) print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(50, 80), (30, 17)') print(' False') print(' True')
6,098,073,931,951,551,000
Tests the reverse method of the Line class.
src/m1_Line.py
run_test_reverse
jarskijr/10-MoreImplementingClasses
python
def run_test_reverse(): ' ' m1t.run_test_reverse() p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = line1.clone() print(line1) line1.reverse() print(line1) print((line1 == line2)) line1.reverse() print((line1 == line2)) print('The above should print:') print(' Line[(30, 17), (50, 80)]') print(' Line[(50, 80), (30, 17)') print(' False') print(' True')
def run_test_slope(): ' Tests the slope method of the Line class. ' m1t.run_test_slope() p1 = Point(30, 3) p2 = Point(50, 8) line1 = Line(p1, p2) print(line1.slope()) line2 = Line(Point(10, 10), Point(10, 5)) print(line2.slope()) print((line2.slope() == 'inf')) print('The above should print:') print(' 0.25 (approximately)') print(' inf') print(' False')
-4,538,909,826,125,890,600
Tests the slope method of the Line class.
src/m1_Line.py
run_test_slope
jarskijr/10-MoreImplementingClasses
python
def run_test_slope(): ' ' m1t.run_test_slope() p1 = Point(30, 3) p2 = Point(50, 8) line1 = Line(p1, p2) print(line1.slope()) line2 = Line(Point(10, 10), Point(10, 5)) print(line2.slope()) print((line2.slope() == 'inf')) print('The above should print:') print(' 0.25 (approximately)') print(' inf') print(' False')
def run_test_length(): ' Tests the length method of the Line class. ' m1t.run_test_length() p1 = Point(166, 10) p2 = Point(100, 10) line1 = Line(p1, p2) print(line1.length()) p3 = Point(0, 0) p4 = Point(3, 4) line2 = Line(p3, p4) print(line2.length()) print('The above should print:') print(' 66.0') print(' 5.0 (approximately)')
7,216,471,456,114,710,000
Tests the length method of the Line class.
src/m1_Line.py
run_test_length
jarskijr/10-MoreImplementingClasses
python
def run_test_length(): ' ' m1t.run_test_length() p1 = Point(166, 10) p2 = Point(100, 10) line1 = Line(p1, p2) print(line1.length()) p3 = Point(0, 0) p4 = Point(3, 4) line2 = Line(p3, p4) print(line2.length()) print('The above should print:') print(' 66.0') print(' 5.0 (approximately)')
def run_test_get_number_of_clones(): ' Tests the get_number_of_clones method of the Line class. ' m1t.run_test_get_number_of_clones() line1 = Line(Point(500, 20), Point(100, 8)) line2 = line1.clone() line3 = line1.clone() line4 = line3.clone() line5 = line1.clone() print(line1.get_number_of_clones()) print(line2.get_number_of_clones()) print(line3.get_number_of_clones()) print(line4.get_number_of_clones()) print(line5.get_number_of_clones()) print('The above should print 3, then 0, then 1, then 0, then 0.')
849,224,913,853,879,400
Tests the get_number_of_clones method of the Line class.
src/m1_Line.py
run_test_get_number_of_clones
jarskijr/10-MoreImplementingClasses
python
def run_test_get_number_of_clones(): ' ' m1t.run_test_get_number_of_clones() line1 = Line(Point(500, 20), Point(100, 8)) line2 = line1.clone() line3 = line1.clone() line4 = line3.clone() line5 = line1.clone() print(line1.get_number_of_clones()) print(line2.get_number_of_clones()) print(line3.get_number_of_clones()) print(line4.get_number_of_clones()) print(line5.get_number_of_clones()) print('The above should print 3, then 0, then 1, then 0, then 0.')
def run_test_line_plus(): ' Tests the line_plus method of the Line class. ' m1t.run_test_line_plus() line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) print('The above should print: Line[(600, 33), (500, 16)]')
7,124,646,992,694,879,000
Tests the line_plus method of the Line class.
src/m1_Line.py
run_test_line_plus
jarskijr/10-MoreImplementingClasses
python
def run_test_line_plus(): ' ' m1t.run_test_line_plus() line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) print('The above should print: Line[(600, 33), (500, 16)]')
def run_test_line_minus(): ' Tests the line_minus method of the Line class. ' m1t.run_test_line_minus() line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) print('The above should print: Line[(400, 7), (-300, 0)]')
-5,429,520,580,984,952,000
Tests the line_minus method of the Line class.
src/m1_Line.py
run_test_line_minus
jarskijr/10-MoreImplementingClasses
python
def run_test_line_minus(): ' ' m1t.run_test_line_minus() line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) print('The above should print: Line[(400, 7), (-300, 0)]')
def run_test_midpoint(): ' Tests the midpoint method of the Line class. ' m1t.run_test_midpoint() p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) print('The above should print: Point(6, 15)')
-3,315,284,670,767,092,000
Tests the midpoint method of the Line class.
src/m1_Line.py
run_test_midpoint
jarskijr/10-MoreImplementingClasses
python
def run_test_midpoint(): ' ' m1t.run_test_midpoint() p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) print('The above should print: Point(6, 15)')
def run_test_is_parallel(): ' Tests the is_parallel method of the Line class. ' m1t.run_test_is_parallel() line1 = Line(Point(15, 30), Point(17, 50)) line2 = Line(Point(10, 10), Point(15, 60)) line3 = Line(Point(10, 10), Point(80, 80)) line4 = Line(Point(10, 10), Point(10, 20)) print(line1.is_parallel(line2)) print(line2.is_parallel(line1)) print(line1.is_parallel(line3)) print(line1.is_parallel(line4)) print(line1.is_parallel(line1)) print(line4.is_parallel(line4)) print('The above should print:') print(' True, True, False, False, True, True')
-3,567,636,817,371,249,700
Tests the is_parallel method of the Line class.
src/m1_Line.py
run_test_is_parallel
jarskijr/10-MoreImplementingClasses
python
def run_test_is_parallel(): ' ' m1t.run_test_is_parallel() line1 = Line(Point(15, 30), Point(17, 50)) line2 = Line(Point(10, 10), Point(15, 60)) line3 = Line(Point(10, 10), Point(80, 80)) line4 = Line(Point(10, 10), Point(10, 20)) print(line1.is_parallel(line2)) print(line2.is_parallel(line1)) print(line1.is_parallel(line3)) print(line1.is_parallel(line4)) print(line1.is_parallel(line1)) print(line4.is_parallel(line4)) print('The above should print:') print(' True, True, False, False, True, True')
def run_test_reset(): ' Tests the reset method of the Line class. ' m1t.run_test_reset() p1 = Point((- 3), (- 4)) p2 = Point(3, 4) line1 = Line(p1, p2) line2 = Line(Point(0, 1), Point(10, 20)) line1.start = Point(100, 300) line2.end = Point(99, 4) line1.reverse() print(line1) print(line2) line1.reset() line2.reset() print(line1) print(line2) print('The above should print:') print(' Line[(3, 4), (100, 300)]') print(' Line[(0, 1), (99, 4)]') print(' Line[(-3, -4), (3, 4)]') print(' Line[(0, 1), (10, 20)]')
-8,753,697,459,281,271,000
Tests the reset method of the Line class.
src/m1_Line.py
run_test_reset
jarskijr/10-MoreImplementingClasses
python
def run_test_reset(): ' ' m1t.run_test_reset() p1 = Point((- 3), (- 4)) p2 = Point(3, 4) line1 = Line(p1, p2) line2 = Line(Point(0, 1), Point(10, 20)) line1.start = Point(100, 300) line2.end = Point(99, 4) line1.reverse() print(line1) print(line2) line1.reset() line2.reset() print(line1) print(line2) print('The above should print:') print(' Line[(3, 4), (100, 300)]') print(' Line[(0, 1), (99, 4)]') print(' Line[(-3, -4), (3, 4)]') print(' Line[(0, 1), (10, 20)]')
def __init__(self, x, y): ' Sets instance variables x and y to the given coordinates. ' self.x = x self.y = y
6,931,114,761,715,498,000
Sets instance variables x and y to the given coordinates.
src/m1_Line.py
__init__
jarskijr/10-MoreImplementingClasses
python
def __init__(self, x, y): ' ' self.x = x self.y = y
def __repr__(self): '\n Returns a string representation of this Point.\n For each coordinate (x and y), the representation:\n - Uses no decimal points if the number is close to an integer,\n - Else it uses 2 decimal places after the decimal point.\n Examples:\n Point(10, 3.14)\n Point(3.01, 2.99)\n ' decimal_places = 2 formats = [] numbers = [] for coordinate in (self.x, self.y): if (abs((coordinate - round(coordinate))) < (10 ** (- decimal_places))): formats.append('{}') numbers.append(round(coordinate)) else: formats.append((('{:.' + str(decimal_places)) + 'f}')) numbers.append(round(coordinate, decimal_places)) format_string = (((('Point(' + formats[0]) + ', ') + formats[1]) + ')') return format_string.format(numbers[0], numbers[1])
2,343,136,736,900,668,000
Returns a string representation of this Point. For each coordinate (x and y), the representation: - Uses no decimal points if the number is close to an integer, - Else it uses 2 decimal places after the decimal point. Examples: Point(10, 3.14) Point(3.01, 2.99)
src/m1_Line.py
__repr__
jarskijr/10-MoreImplementingClasses
python
def __repr__(self): '\n Returns a string representation of this Point.\n For each coordinate (x and y), the representation:\n - Uses no decimal points if the number is close to an integer,\n - Else it uses 2 decimal places after the decimal point.\n Examples:\n Point(10, 3.14)\n Point(3.01, 2.99)\n ' decimal_places = 2 formats = [] numbers = [] for coordinate in (self.x, self.y): if (abs((coordinate - round(coordinate))) < (10 ** (- decimal_places))): formats.append('{}') numbers.append(round(coordinate)) else: formats.append((('{:.' + str(decimal_places)) + 'f}')) numbers.append(round(coordinate, decimal_places)) format_string = (((('Point(' + formats[0]) + ', ') + formats[1]) + ')') return format_string.format(numbers[0], numbers[1])
def __eq__(self, p2): '\n Defines == for Points: a == b is equivalent to a.__eq__(b).\n Treats two numbers as "equal" if they are within 6 decimal\n places of each other for both x and y coordinates.\n ' return ((round(self.x, 6) == round(p2.x, 6)) and (round(self.y, 6) == round(p2.y, 6)))
-8,077,836,118,265,361,000
Defines == for Points: a == b is equivalent to a.__eq__(b). Treats two numbers as "equal" if they are within 6 decimal places of each other for both x and y coordinates.
src/m1_Line.py
__eq__
jarskijr/10-MoreImplementingClasses
python
def __eq__(self, p2): '\n Defines == for Points: a == b is equivalent to a.__eq__(b).\n Treats two numbers as "equal" if they are within 6 decimal\n places of each other for both x and y coordinates.\n ' return ((round(self.x, 6) == round(p2.x, 6)) and (round(self.y, 6) == round(p2.y, 6)))
def clone(self): ' Returns a new Point at the same (x, y) as this Point. ' return Point(self.x, self.y)
437,393,966,495,241,300
Returns a new Point at the same (x, y) as this Point.
src/m1_Line.py
clone
jarskijr/10-MoreImplementingClasses
python
def clone(self): ' ' return Point(self.x, self.y)
def distance_from(self, p2): ' Returns the distance this Point is from the given Point. ' dx_squared = ((self.x - p2.x) ** 2) dy_squared = ((self.y - p2.y) ** 2) return math.sqrt((dx_squared + dy_squared))
1,669,929,206,180,372,500
Returns the distance this Point is from the given Point.
src/m1_Line.py
distance_from
jarskijr/10-MoreImplementingClasses
python
def distance_from(self, p2): ' ' dx_squared = ((self.x - p2.x) ** 2) dy_squared = ((self.y - p2.y) ** 2) return math.sqrt((dx_squared + dy_squared))
def halfway_to(self, p2): '\n Given another Point object p2, returns a new Point\n that is half-way between this Point and the given Point (p2).\n ' return Point(((self.x + p2.x) / 2), ((self.y + p2.y) / 2))
-8,890,632,525,613,203,000
Given another Point object p2, returns a new Point that is half-way between this Point and the given Point (p2).
src/m1_Line.py
halfway_to
jarskijr/10-MoreImplementingClasses
python
def halfway_to(self, p2): '\n Given another Point object p2, returns a new Point\n that is half-way between this Point and the given Point (p2).\n ' return Point(((self.x + p2.x) / 2), ((self.y + p2.y) / 2))
def plus(self, p2): '\n Returns a Point whose coordinates are those of this Point\n PLUS the given Point. For example:\n p1 = Point(500, 20)\n p2 = Point(100, 13)\n p3 = p1.plus(p2)\n print(p3)\n would print: Point(600, 33)\n ' return Point((self.x + p2.x), (self.y + p2.y))
439,977,120,140,109,250
Returns a Point whose coordinates are those of this Point PLUS the given Point. For example: p1 = Point(500, 20) p2 = Point(100, 13) p3 = p1.plus(p2) print(p3) would print: Point(600, 33)
src/m1_Line.py
plus
jarskijr/10-MoreImplementingClasses
python
def plus(self, p2): '\n Returns a Point whose coordinates are those of this Point\n PLUS the given Point. For example:\n p1 = Point(500, 20)\n p2 = Point(100, 13)\n p3 = p1.plus(p2)\n print(p3)\n would print: Point(600, 33)\n ' return Point((self.x + p2.x), (self.y + p2.y))
def minus(self, p2): '\n Returns a Point whose coordinates are those of this Point\n MINUS the given Point. For example:\n p1 = Point(500, 20)\n p2 = Point(100, 13)\n p3 = p1.minus(p2)\n print(p3)\n would print: Point(400, 7)\n ' return Point((self.x - p2.x), (self.y - p2.y))
-226,412,463,627,691,400
Returns a Point whose coordinates are those of this Point MINUS the given Point. For example: p1 = Point(500, 20) p2 = Point(100, 13) p3 = p1.minus(p2) print(p3) would print: Point(400, 7)
src/m1_Line.py
minus
jarskijr/10-MoreImplementingClasses
python
def minus(self, p2): '\n Returns a Point whose coordinates are those of this Point\n MINUS the given Point. For example:\n p1 = Point(500, 20)\n p2 = Point(100, 13)\n p3 = p1.minus(p2)\n print(p3)\n would print: Point(400, 7)\n ' return Point((self.x - p2.x), (self.y - p2.y))
def __repr__(self): "\n What comes in:\n -- self\n What goes out: Returns a string representation of this Line,\n in the form:\n Line[(x1, y1), (x2, y2)]\n Side effects: None.\n Note: print(BLAH) causes BLAH's __repr__ to be called.\n BLAH's __repr__ returns a string,\n which the print function then prints.\n\n Example: Since the print function calls __repr__ on the\n object to be printed:\n p1 = Point(30, 17)\n p2 = Point(50, 80)\n line = Line(p1, p2) # Causes __init__ to run\n\n # The following statement causes __repr__ to run,\n # hence should print: Line[(30, 17), (50, 80)]\n print(line)\n\n Type hints:\n :rtype: str\n " start = repr(self.start).replace('Point', '') end = repr(self.end).replace('Point', '') return 'Line[{}, {}]'.format(start, end)
3,928,050,505,639,531,000
What comes in: -- self What goes out: Returns a string representation of this Line, in the form: Line[(x1, y1), (x2, y2)] Side effects: None. Note: print(BLAH) causes BLAH's __repr__ to be called. BLAH's __repr__ returns a string, which the print function then prints. Example: Since the print function calls __repr__ on the object to be printed: p1 = Point(30, 17) p2 = Point(50, 80) line = Line(p1, p2) # Causes __init__ to run # The following statement causes __repr__ to run, # hence should print: Line[(30, 17), (50, 80)] print(line) Type hints: :rtype: str
src/m1_Line.py
__repr__
jarskijr/10-MoreImplementingClasses
python
def __repr__(self): "\n What comes in:\n -- self\n What goes out: Returns a string representation of this Line,\n in the form:\n Line[(x1, y1), (x2, y2)]\n Side effects: None.\n Note: print(BLAH) causes BLAH's __repr__ to be called.\n BLAH's __repr__ returns a string,\n which the print function then prints.\n\n Example: Since the print function calls __repr__ on the\n object to be printed:\n p1 = Point(30, 17)\n p2 = Point(50, 80)\n line = Line(p1, p2) # Causes __init__ to run\n\n # The following statement causes __repr__ to run,\n # hence should print: Line[(30, 17), (50, 80)]\n print(line)\n\n Type hints:\n :rtype: str\n " start = repr(self.start).replace('Point', ) end = repr(self.end).replace('Point', ) return 'Line[{}, {}]'.format(start, end)
def __eq__(self, line2): "\n What comes in:\n -- self\n -- a Line object\n What goes out: Returns True if:\n this Line's start point is equal to line2's start point AND\n this Line's end point is equal to line2's end point.\n Returns False otherwise.\n Side effects: None.\n Note: a == b is equivalent to a.__eq__(b).\n\n Examples:\n p1 = Point(30, 17)\n p2 = Point(50, 80)\n\n line1 = Line(p1, p2)\n line2 = Line(p1, p2)\n line3 = Line(p2, p1)\n\n print(line1 == line1) # Should print: True\n print(line1 == line2) # Should print: True\n print(line1 == line3) # Should print: False\n\n line1.start = Point(0, 0)\n print(line1 == line2) # Should now print: False\n\n Type hints:\n :type line2: Line\n :rtype: bool\n " return ((self.start == line2.start) and (self.end == line2.end))
-2,334,047,882,251,001,000
What comes in: -- self -- a Line object What goes out: Returns True if: this Line's start point is equal to line2's start point AND this Line's end point is equal to line2's end point. Returns False otherwise. Side effects: None. Note: a == b is equivalent to a.__eq__(b). Examples: p1 = Point(30, 17) p2 = Point(50, 80) line1 = Line(p1, p2) line2 = Line(p1, p2) line3 = Line(p2, p1) print(line1 == line1) # Should print: True print(line1 == line2) # Should print: True print(line1 == line3) # Should print: False line1.start = Point(0, 0) print(line1 == line2) # Should now print: False Type hints: :type line2: Line :rtype: bool
src/m1_Line.py
__eq__
jarskijr/10-MoreImplementingClasses
python
def __eq__(self, line2): "\n What comes in:\n -- self\n -- a Line object\n What goes out: Returns True if:\n this Line's start point is equal to line2's start point AND\n this Line's end point is equal to line2's end point.\n Returns False otherwise.\n Side effects: None.\n Note: a == b is equivalent to a.__eq__(b).\n\n Examples:\n p1 = Point(30, 17)\n p2 = Point(50, 80)\n\n line1 = Line(p1, p2)\n line2 = Line(p1, p2)\n line3 = Line(p2, p1)\n\n print(line1 == line1) # Should print: True\n print(line1 == line2) # Should print: True\n print(line1 == line3) # Should print: False\n\n line1.start = Point(0, 0)\n print(line1 == line2) # Should now print: False\n\n Type hints:\n :type line2: Line\n :rtype: bool\n " return ((self.start == line2.start) and (self.end == line2.end))
def line_plus(self, other_line): "\n What comes in:\n -- self\n -- another Line object\n What goes out:\n -- Returns a Line whose:\n -- start is the sum of this Line's start (a Point)\n and the other_line's start (another Point).\n -- end is the sum of this Line's end (a Point)\n and the other_line's end (another Point).\n Side effects: None.\n\n Example:\n line1 = Line(Point(500, 20), Point(100, 8))\n line2 = Line(Point(100, 13), Point(400, 8))\n line3 = line1.line_plus(line2)\n print(line3)\n would print: Line[(600, 33), (500, 16)]\n\n Type hints:\n :type other_line: Line\n :rtype: Line:\n " start = Point((self.start.x + other_line.start.x), (self.start.y + other_line.start.y)) end = Point((self.end.x + other_line.end.x), (self.end.y + other_line.end.y)) line_plus = Line(start, end) return line_plus
7,048,618,442,946,980,000
What comes in: -- self -- another Line object What goes out: -- Returns a Line whose: -- start is the sum of this Line's start (a Point) and the other_line's start (another Point). -- end is the sum of this Line's end (a Point) and the other_line's end (another Point). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_plus(line2) print(line3) would print: Line[(600, 33), (500, 16)] Type hints: :type other_line: Line :rtype: Line:
src/m1_Line.py
line_plus
jarskijr/10-MoreImplementingClasses
python
def line_plus(self, other_line): "\n What comes in:\n -- self\n -- another Line object\n What goes out:\n -- Returns a Line whose:\n -- start is the sum of this Line's start (a Point)\n and the other_line's start (another Point).\n -- end is the sum of this Line's end (a Point)\n and the other_line's end (another Point).\n Side effects: None.\n\n Example:\n line1 = Line(Point(500, 20), Point(100, 8))\n line2 = Line(Point(100, 13), Point(400, 8))\n line3 = line1.line_plus(line2)\n print(line3)\n would print: Line[(600, 33), (500, 16)]\n\n Type hints:\n :type other_line: Line\n :rtype: Line:\n " start = Point((self.start.x + other_line.start.x), (self.start.y + other_line.start.y)) end = Point((self.end.x + other_line.end.x), (self.end.y + other_line.end.y)) line_plus = Line(start, end) return line_plus
def line_minus(self, other_line): "\n What comes in:\n -- self\n -- another Line object\n What goes out:\n -- Returns a Line whose:\n -- start is this Line's start (a Point)\n minus the other_line's start (another Point).\n -- end is this Line's end (a Point)\n minus the other_line's end (another Point).\n Side effects: None.\n\n Example:\n line1 = Line(Point(500, 20), Point(100, 8))\n line2 = Line(Point(100, 13), Point(400, 8))\n line3 = line1.line_minus(line2)\n print(line3)\n would print: Line[(400, 7), (-300, 0)]\n\n Type hints:\n :type other_line: Line\n :rtype: Line:\n " start = Point((self.start.x - other_line.start.x), (self.start.y - other_line.start.y)) end = Point((self.end.x - other_line.end.x), (self.end.y - other_line.end.y)) line_minus = Line(start, end) return line_minus
-2,517,935,610,742,812,700
What comes in: -- self -- another Line object What goes out: -- Returns a Line whose: -- start is this Line's start (a Point) minus the other_line's start (another Point). -- end is this Line's end (a Point) minus the other_line's end (another Point). Side effects: None. Example: line1 = Line(Point(500, 20), Point(100, 8)) line2 = Line(Point(100, 13), Point(400, 8)) line3 = line1.line_minus(line2) print(line3) would print: Line[(400, 7), (-300, 0)] Type hints: :type other_line: Line :rtype: Line:
src/m1_Line.py
line_minus
jarskijr/10-MoreImplementingClasses
python
def line_minus(self, other_line): "\n What comes in:\n -- self\n -- another Line object\n What goes out:\n -- Returns a Line whose:\n -- start is this Line's start (a Point)\n minus the other_line's start (another Point).\n -- end is this Line's end (a Point)\n minus the other_line's end (another Point).\n Side effects: None.\n\n Example:\n line1 = Line(Point(500, 20), Point(100, 8))\n line2 = Line(Point(100, 13), Point(400, 8))\n line3 = line1.line_minus(line2)\n print(line3)\n would print: Line[(400, 7), (-300, 0)]\n\n Type hints:\n :type other_line: Line\n :rtype: Line:\n " start = Point((self.start.x - other_line.start.x), (self.start.y - other_line.start.y)) end = Point((self.end.x - other_line.end.x), (self.end.y - other_line.end.y)) line_minus = Line(start, end) return line_minus
def midpoint(self): '\n What comes in:\n -- self\n What goes out: returns a Point at the midpoint of this Line.\n Side effects: None.\n\n Example:\n p1 = Point(3, 10)\n p2 = Point(9, 20)\n line1 = Line(p1, p2)\n\n print(line1.midpoint()) # Should print: Point(6, 15)\n\n Type hints:\n :rtype: Point\n ' midpoint = Point(((self.end.x + self.start.x) / 2), ((self.end.y + self.start.y) / 2)) return midpoint
-8,627,433,882,126,673,000
What comes in: -- self What goes out: returns a Point at the midpoint of this Line. Side effects: None. Example: p1 = Point(3, 10) p2 = Point(9, 20) line1 = Line(p1, p2) print(line1.midpoint()) # Should print: Point(6, 15) Type hints: :rtype: Point
src/m1_Line.py
midpoint
jarskijr/10-MoreImplementingClasses
python
def midpoint(self): '\n What comes in:\n -- self\n What goes out: returns a Point at the midpoint of this Line.\n Side effects: None.\n\n Example:\n p1 = Point(3, 10)\n p2 = Point(9, 20)\n line1 = Line(p1, p2)\n\n print(line1.midpoint()) # Should print: Point(6, 15)\n\n Type hints:\n :rtype: Point\n ' midpoint = Point(((self.end.x + self.start.x) / 2), ((self.end.y + self.start.y) / 2)) return midpoint
def is_parallel(self, line2): '\n What comes in:\n -- self\n -- another Line object (line2)\n What goes out: Returns True if this Line is parallel to the\n given Line (line2). Returns False otherwise.\n *** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers.\n Side effects: None.\n\n Examples:\n line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0\n line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0\n line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0\n line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf\n\n print(line1.is_parallel(line2)) # Should print: True\n print(line2.is_parallel(line1)) # Should print: True\n print(line1.is_parallel(line3)) # Should print: False\n print(line1.is_parallel(line4)) # Should print: False\n print(line1.is_parallel(line1)) # Should print: True\n print(line4.is_parallel(line4)) # Should print: True\n\n Type hints:\n :type line2: Line\n :rtype: bool\n ' selfslopex = (self.end.x - self.start.x) line2slopex = (line2.end.x - line2.start.x) if (line2slopex == 0): if (line2slopex == selfslopex): return True else: return False if (selfslopex == 0): return False selfslope = ((self.end.y - self.start.y) / (self.end.x - self.start.x)) line2slope = ((line2.end.y - line2.start.y) / (line2.end.x - line2.start.x)) if (round(line2slope, 10) == round(selfslope, 10)): return True else: return False
-5,667,649,960,552,140,000
What comes in: -- self -- another Line object (line2) What goes out: Returns True if this Line is parallel to the given Line (line2). Returns False otherwise. *** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers. Side effects: None. Examples: line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0 line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0 line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0 line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf print(line1.is_parallel(line2)) # Should print: True print(line2.is_parallel(line1)) # Should print: True print(line1.is_parallel(line3)) # Should print: False print(line1.is_parallel(line4)) # Should print: False print(line1.is_parallel(line1)) # Should print: True print(line4.is_parallel(line4)) # Should print: True Type hints: :type line2: Line :rtype: bool
src/m1_Line.py
is_parallel
jarskijr/10-MoreImplementingClasses
python
def is_parallel(self, line2): '\n What comes in:\n -- self\n -- another Line object (line2)\n What goes out: Returns True if this Line is parallel to the\n given Line (line2). Returns False otherwise.\n *** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers.\n Side effects: None.\n\n Examples:\n line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0\n line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0\n line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0\n line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf\n\n print(line1.is_parallel(line2)) # Should print: True\n print(line2.is_parallel(line1)) # Should print: True\n print(line1.is_parallel(line3)) # Should print: False\n print(line1.is_parallel(line4)) # Should print: False\n print(line1.is_parallel(line1)) # Should print: True\n print(line4.is_parallel(line4)) # Should print: True\n\n Type hints:\n :type line2: Line\n :rtype: bool\n ' selfslopex = (self.end.x - self.start.x) line2slopex = (line2.end.x - line2.start.x) if (line2slopex == 0): if (line2slopex == selfslopex): return True else: return False if (selfslopex == 0): return False selfslope = ((self.end.y - self.start.y) / (self.end.x - self.start.x)) line2slope = ((line2.end.y - line2.start.y) / (line2.end.x - line2.start.x)) if (round(line2slope, 10) == round(selfslope, 10)): return True else: return False
def _leaf_extent_id_to_clone_ops(ids_and_extents: Iterable[Tuple[(InodeID, Extent)]]): '\n To collect the parts of a Chunk that are cloned, we will run a variation\n on the standard interval-overlap algorithm. We first sort the starts &\n ends of each interval, and then do a sequential scan that uses starts to\n add, and ends to remove, a tracking object from a "current intervals"\n structure.\n\n This function simply prepares the set of interval starts & ends for each\n InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.\n ' leaf_extent_id_to_clone_ops = defaultdict(list) for (ino_id, extent) in ids_and_extents: file_offset = 0 for (leaf_idx, (offset, length, leaf_extent)) in enumerate(extent.gen_trimmed_leaves()): ref = _CloneExtentRef(clone=Clone(inode_id=ino_id, offset=file_offset, length=length), extent=leaf_extent, offset=offset, leaf_idx=leaf_idx) leaf_extent_id_to_clone_ops[id(leaf_extent)].extend([_CloneOp(pos=offset, action=_CloneOp.PUSH, ref=ref), _CloneOp(pos=(offset + length), action=_CloneOp.POP, ref=ref)]) file_offset += length return leaf_extent_id_to_clone_ops
527,612,937,277,127,500
To collect the parts of a Chunk that are cloned, we will run a variation on the standard interval-overlap algorithm. We first sort the starts & ends of each interval, and then do a sequential scan that uses starts to add, and ends to remove, a tracking object from a "current intervals" structure. This function simply prepares the set of interval starts & ends for each InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.
antlir/btrfs_diff/extents_to_chunks.py
_leaf_extent_id_to_clone_ops
SaurabhAgarwala/antlir
python
def _leaf_extent_id_to_clone_ops(ids_and_extents: Iterable[Tuple[(InodeID, Extent)]]): '\n To collect the parts of a Chunk that are cloned, we will run a variation\n on the standard interval-overlap algorithm. We first sort the starts &\n ends of each interval, and then do a sequential scan that uses starts to\n add, and ends to remove, a tracking object from a "current intervals"\n structure.\n\n This function simply prepares the set of interval starts & ends for each\n InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.\n ' leaf_extent_id_to_clone_ops = defaultdict(list) for (ino_id, extent) in ids_and_extents: file_offset = 0 for (leaf_idx, (offset, length, leaf_extent)) in enumerate(extent.gen_trimmed_leaves()): ref = _CloneExtentRef(clone=Clone(inode_id=ino_id, offset=file_offset, length=length), extent=leaf_extent, offset=offset, leaf_idx=leaf_idx) leaf_extent_id_to_clone_ops[id(leaf_extent)].extend([_CloneOp(pos=offset, action=_CloneOp.PUSH, ref=ref), _CloneOp(pos=(offset + length), action=_CloneOp.POP, ref=ref)]) file_offset += length return leaf_extent_id_to_clone_ops
def _leaf_ref_to_chunk_clones_from_clone_ops(extent_id: int, clone_ops: Iterable[_CloneOp]): 'As per `_leaf_extent_id_to_clone_ops`, this computes interval overlaps' active_ops: Dict[(_CloneExtentRef, _CloneOp)] = {} leaf_ref_to_chunk_clones = defaultdict(list) for op in sorted(clone_ops): if (op.action is _CloneOp.POP): pushed_op = active_ops.pop(op.ref) assert (pushed_op.ref is op.ref) assert (id(op.ref.extent) == extent_id) assert (pushed_op.pos == op.ref.offset) assert ((pushed_op.pos + op.ref.clone.length) == op.pos) for clone_op in active_ops.values(): assert (op.ref.extent is clone_op.ref.extent) bigger_offset = max(clone_op.ref.offset, op.ref.offset) leaf_ref_to_chunk_clones[op.ref].append(ChunkClone(offset=bigger_offset, clone=Clone(inode_id=clone_op.ref.clone.inode_id, offset=(clone_op.ref.clone.offset + (bigger_offset - clone_op.ref.offset)), length=(op.pos - bigger_offset)))) leaf_ref_to_chunk_clones[clone_op.ref].append(ChunkClone(offset=bigger_offset, clone=Clone(inode_id=op.ref.clone.inode_id, offset=(op.ref.clone.offset + (bigger_offset - op.ref.offset)), length=(op.pos - bigger_offset)))) elif (op.action == _CloneOp.PUSH): assert (op.ref not in active_ops) active_ops[op.ref] = op else: raise AssertionError(op) return leaf_ref_to_chunk_clones
-5,223,127,182,947,979,000
As per `_leaf_extent_id_to_clone_ops`, this computes interval overlaps
antlir/btrfs_diff/extents_to_chunks.py
_leaf_ref_to_chunk_clones_from_clone_ops
SaurabhAgarwala/antlir
python
def _leaf_ref_to_chunk_clones_from_clone_ops(extent_id: int, clone_ops: Iterable[_CloneOp]): active_ops: Dict[(_CloneExtentRef, _CloneOp)] = {} leaf_ref_to_chunk_clones = defaultdict(list) for op in sorted(clone_ops): if (op.action is _CloneOp.POP): pushed_op = active_ops.pop(op.ref) assert (pushed_op.ref is op.ref) assert (id(op.ref.extent) == extent_id) assert (pushed_op.pos == op.ref.offset) assert ((pushed_op.pos + op.ref.clone.length) == op.pos) for clone_op in active_ops.values(): assert (op.ref.extent is clone_op.ref.extent) bigger_offset = max(clone_op.ref.offset, op.ref.offset) leaf_ref_to_chunk_clones[op.ref].append(ChunkClone(offset=bigger_offset, clone=Clone(inode_id=clone_op.ref.clone.inode_id, offset=(clone_op.ref.clone.offset + (bigger_offset - clone_op.ref.offset)), length=(op.pos - bigger_offset)))) leaf_ref_to_chunk_clones[clone_op.ref].append(ChunkClone(offset=bigger_offset, clone=Clone(inode_id=op.ref.clone.inode_id, offset=(op.ref.clone.offset + (bigger_offset - op.ref.offset)), length=(op.pos - bigger_offset)))) elif (op.action == _CloneOp.PUSH): assert (op.ref not in active_ops) active_ops[op.ref] = op else: raise AssertionError(op) return leaf_ref_to_chunk_clones
def _id_to_leaf_idx_to_chunk_clones(ids_and_extents: Iterable[Tuple[(InodeID, Extent)]]): 'Aggregates newly created ChunkClones per InodeID, and per "trimmed leaf"' id_to_leaf_idx_to_chunk_clones = defaultdict(dict) for (extent_id, clone_ops) in _leaf_extent_id_to_clone_ops(ids_and_extents).items(): leaf_ref_to_chunk_clones = _leaf_ref_to_chunk_clones_from_clone_ops(extent_id, clone_ops) for (leaf_ref, offsets_clones) in leaf_ref_to_chunk_clones.items(): d = id_to_leaf_idx_to_chunk_clones[leaf_ref.clone.inode_id] assert (leaf_ref.leaf_idx not in d) d[leaf_ref.leaf_idx] = offsets_clones return id_to_leaf_idx_to_chunk_clones
-4,177,678,356,634,461,700
Aggregates newly created ChunkClones per InodeID, and per "trimmed leaf"
antlir/btrfs_diff/extents_to_chunks.py
_id_to_leaf_idx_to_chunk_clones
SaurabhAgarwala/antlir
python
def _id_to_leaf_idx_to_chunk_clones(ids_and_extents: Iterable[Tuple[(InodeID, Extent)]]): id_to_leaf_idx_to_chunk_clones = defaultdict(dict) for (extent_id, clone_ops) in _leaf_extent_id_to_clone_ops(ids_and_extents).items(): leaf_ref_to_chunk_clones = _leaf_ref_to_chunk_clones_from_clone_ops(extent_id, clone_ops) for (leaf_ref, offsets_clones) in leaf_ref_to_chunk_clones.items(): d = id_to_leaf_idx_to_chunk_clones[leaf_ref.clone.inode_id] assert (leaf_ref.leaf_idx not in d) d[leaf_ref.leaf_idx] = offsets_clones return id_to_leaf_idx_to_chunk_clones
def extents_to_chunks_with_clones(ids_and_extents: Sequence[Tuple[(InodeID, Extent)]]) -> Iterable[Tuple[(InodeID, Sequence[Chunk])]]: "\n Converts the nested, history-preserving `Extent` structures into flat\n sequences of `Chunk`s, while being careful to annotate cloned parts as\n described in this file's docblock. The `InodeID`s are needed to ensure\n that the `Chunk`s' `Clone` objects refer to the appropriate files.\n " id_to_leaf_idx_to_chunk_clones = _id_to_leaf_idx_to_chunk_clones(ids_and_extents) for (ino_id, extent) in ids_and_extents: leaf_to_chunk_clones = id_to_leaf_idx_to_chunk_clones.get(ino_id, {}) new_chunks = [] for (leaf_idx, (offset, length, extent)) in enumerate(extent.gen_trimmed_leaves()): chunk_clones = leaf_to_chunk_clones.get(leaf_idx, []) assert isinstance(extent.content, Extent.Kind) if (new_chunks and (new_chunks[(- 1)].kind == extent.content)): prev_length = new_chunks[(- 1)].length prev_clones = new_chunks[(- 1)].chunk_clones else: prev_length = 0 prev_clones = set() new_chunks.append(None) new_chunks[(- 1)] = Chunk(kind=extent.content, length=(length + prev_length), chunk_clones=prev_clones) new_chunks[(- 1)].chunk_clones.update((ChunkClone(clone=clone, offset=((clone_offset + prev_length) - offset)) for (clone_offset, clone) in chunk_clones)) (yield (ino_id, tuple((Chunk(kind=c.kind, length=c.length, chunk_clones=frozenset(c.chunk_clones)) for c in new_chunks))))
-30,538,634,863,049,348
Converts the nested, history-preserving `Extent` structures into flat sequences of `Chunk`s, while being careful to annotate cloned parts as described in this file's docblock. The `InodeID`s are needed to ensure that the `Chunk`s' `Clone` objects refer to the appropriate files.
antlir/btrfs_diff/extents_to_chunks.py
extents_to_chunks_with_clones
SaurabhAgarwala/antlir
python
def extents_to_chunks_with_clones(ids_and_extents: Sequence[Tuple[(InodeID, Extent)]]) -> Iterable[Tuple[(InodeID, Sequence[Chunk])]]: "\n Converts the nested, history-preserving `Extent` structures into flat\n sequences of `Chunk`s, while being careful to annotate cloned parts as\n described in this file's docblock. The `InodeID`s are needed to ensure\n that the `Chunk`s' `Clone` objects refer to the appropriate files.\n " id_to_leaf_idx_to_chunk_clones = _id_to_leaf_idx_to_chunk_clones(ids_and_extents) for (ino_id, extent) in ids_and_extents: leaf_to_chunk_clones = id_to_leaf_idx_to_chunk_clones.get(ino_id, {}) new_chunks = [] for (leaf_idx, (offset, length, extent)) in enumerate(extent.gen_trimmed_leaves()): chunk_clones = leaf_to_chunk_clones.get(leaf_idx, []) assert isinstance(extent.content, Extent.Kind) if (new_chunks and (new_chunks[(- 1)].kind == extent.content)): prev_length = new_chunks[(- 1)].length prev_clones = new_chunks[(- 1)].chunk_clones else: prev_length = 0 prev_clones = set() new_chunks.append(None) new_chunks[(- 1)] = Chunk(kind=extent.content, length=(length + prev_length), chunk_clones=prev_clones) new_chunks[(- 1)].chunk_clones.update((ChunkClone(clone=clone, offset=((clone_offset + prev_length) - offset)) for (clone_offset, clone) in chunk_clones)) (yield (ino_id, tuple((Chunk(kind=c.kind, length=c.length, chunk_clones=frozenset(c.chunk_clones)) for c in new_chunks))))
def test_coordinate_vars(): '\n Tests the coordinate variables functionality with respect to\n reorientation of coordinate systems.\n ' A = CoordSysCartesian('A') assert (BaseScalar('A.x', 0, A, 'A_x', '\\mathbf{{x}_{A}}') == A.x) assert (BaseScalar('A.y', 1, A, 'A_y', '\\mathbf{{y}_{A}}') == A.y) assert (BaseScalar('A.z', 2, A, 'A_z', '\\mathbf{{z}_{A}}') == A.z) assert (BaseScalar('A.x', 0, A, 'A_x', '\\mathbf{{x}_{A}}').__hash__() == A.x.__hash__()) assert (isinstance(A.x, BaseScalar) and isinstance(A.y, BaseScalar) and isinstance(A.z, BaseScalar)) assert ((A.x * A.y) == (A.y * A.x)) assert (A.scalar_map(A) == {A.x: A.x, A.y: A.y, A.z: A.z}) assert (A.x.system == A) assert (A.x.diff(A.x) == 1) B = A.orient_new_axis('B', q, A.k) assert (B.scalar_map(A) == {B.z: A.z, B.y: (((- A.x) * sin(q)) + (A.y * cos(q))), B.x: ((A.x * cos(q)) + (A.y * sin(q)))}) assert (A.scalar_map(B) == {A.x: ((B.x * cos(q)) - (B.y * sin(q))), A.y: ((B.x * sin(q)) + (B.y * cos(q))), A.z: B.z}) assert (express(B.x, A, variables=True) == ((A.x * cos(q)) + (A.y * sin(q)))) assert (express(B.y, A, variables=True) == (((- A.x) * sin(q)) + (A.y * cos(q)))) assert (express(B.z, A, variables=True) == A.z) assert (expand(express(((B.x * B.y) * B.z), A, variables=True)) == expand(((A.z * (((- A.x) * sin(q)) + (A.y * cos(q)))) * ((A.x * cos(q)) + (A.y * sin(q)))))) assert (express((((B.x * B.i) + (B.y * B.j)) + (B.z * B.k)), A) == (((((B.x * cos(q)) - (B.y * sin(q))) * A.i) + (((B.x * sin(q)) + (B.y * cos(q))) * A.j)) + (B.z * A.k))) assert (simplify(express((((B.x * B.i) + (B.y * B.j)) + (B.z * B.k)), A, variables=True)) == (((A.x * A.i) + (A.y * A.j)) + (A.z * A.k))) assert (express((((A.x * A.i) + (A.y * A.j)) + (A.z * A.k)), B) == (((((A.x * cos(q)) + (A.y * sin(q))) * B.i) + ((((- A.x) * sin(q)) + (A.y * cos(q))) * B.j)) + (A.z * B.k))) assert (simplify(express((((A.x * A.i) + (A.y * A.j)) + (A.z * A.k)), B, variables=True)) == (((B.x * B.i) + (B.y * B.j)) + (B.z * B.k))) N = B.orient_new_axis('N', (- q), B.k) assert (N.scalar_map(A) == {N.x: A.x, N.z: A.z, N.y: A.y}) C = A.orient_new_axis('C', q, ((A.i + A.j) + A.k)) mapping = A.scalar_map(C) assert (mapping[A.x] == ((((C.x * ((2 * cos(q)) + 1)) / 3) + ((C.y * (((- 2) * sin((q + (pi / 6)))) + 1)) / 3)) + ((C.z * (((- 2) * cos((q + (pi / 3)))) + 1)) / 3))) assert (mapping[A.y] == ((((C.x * (((- 2) * cos((q + (pi / 3)))) + 1)) / 3) + ((C.y * ((2 * cos(q)) + 1)) / 3)) + ((C.z * (((- 2) * sin((q + (pi / 6)))) + 1)) / 3))) assert (mapping[A.z] == ((((C.x * (((- 2) * sin((q + (pi / 6)))) + 1)) / 3) + ((C.y * (((- 2) * cos((q + (pi / 3)))) + 1)) / 3)) + ((C.z * ((2 * cos(q)) + 1)) / 3))) D = A.locate_new('D', (((a * A.i) + (b * A.j)) + (c * A.k))) assert (D.scalar_map(A) == {D.z: (A.z - c), D.x: (A.x - a), D.y: (A.y - b)}) E = A.orient_new_axis('E', a, A.k, (((a * A.i) + (b * A.j)) + (c * A.k))) assert (A.scalar_map(E) == {A.z: (E.z + c), A.x: (((E.x * cos(a)) - (E.y * sin(a))) + a), A.y: (((E.x * sin(a)) + (E.y * cos(a))) + b)}) assert (E.scalar_map(A) == {E.x: (((A.x - a) * cos(a)) + ((A.y - b) * sin(a))), E.y: ((((- A.x) + a) * sin(a)) + ((A.y - b) * cos(a))), E.z: (A.z - c)}) F = A.locate_new('F', Vector.zero) assert (A.scalar_map(F) == {A.z: F.z, A.x: F.x, A.y: F.y})
-5,084,472,095,242,765,000
Tests the coordinate variables functionality with respect to reorientation of coordinate systems.
sympy/vector/tests/test_coordsysrect.py
test_coordinate_vars
Anshnrag02/sympy
python
def test_coordinate_vars(): '\n Tests the coordinate variables functionality with respect to\n reorientation of coordinate systems.\n ' A = CoordSysCartesian('A') assert (BaseScalar('A.x', 0, A, 'A_x', '\\mathbf{{x}_{A}}') == A.x) assert (BaseScalar('A.y', 1, A, 'A_y', '\\mathbf{{y}_{A}}') == A.y) assert (BaseScalar('A.z', 2, A, 'A_z', '\\mathbf{{z}_{A}}') == A.z) assert (BaseScalar('A.x', 0, A, 'A_x', '\\mathbf{{x}_{A}}').__hash__() == A.x.__hash__()) assert (isinstance(A.x, BaseScalar) and isinstance(A.y, BaseScalar) and isinstance(A.z, BaseScalar)) assert ((A.x * A.y) == (A.y * A.x)) assert (A.scalar_map(A) == {A.x: A.x, A.y: A.y, A.z: A.z}) assert (A.x.system == A) assert (A.x.diff(A.x) == 1) B = A.orient_new_axis('B', q, A.k) assert (B.scalar_map(A) == {B.z: A.z, B.y: (((- A.x) * sin(q)) + (A.y * cos(q))), B.x: ((A.x * cos(q)) + (A.y * sin(q)))}) assert (A.scalar_map(B) == {A.x: ((B.x * cos(q)) - (B.y * sin(q))), A.y: ((B.x * sin(q)) + (B.y * cos(q))), A.z: B.z}) assert (express(B.x, A, variables=True) == ((A.x * cos(q)) + (A.y * sin(q)))) assert (express(B.y, A, variables=True) == (((- A.x) * sin(q)) + (A.y * cos(q)))) assert (express(B.z, A, variables=True) == A.z) assert (expand(express(((B.x * B.y) * B.z), A, variables=True)) == expand(((A.z * (((- A.x) * sin(q)) + (A.y * cos(q)))) * ((A.x * cos(q)) + (A.y * sin(q)))))) assert (express((((B.x * B.i) + (B.y * B.j)) + (B.z * B.k)), A) == (((((B.x * cos(q)) - (B.y * sin(q))) * A.i) + (((B.x * sin(q)) + (B.y * cos(q))) * A.j)) + (B.z * A.k))) assert (simplify(express((((B.x * B.i) + (B.y * B.j)) + (B.z * B.k)), A, variables=True)) == (((A.x * A.i) + (A.y * A.j)) + (A.z * A.k))) assert (express((((A.x * A.i) + (A.y * A.j)) + (A.z * A.k)), B) == (((((A.x * cos(q)) + (A.y * sin(q))) * B.i) + ((((- A.x) * sin(q)) + (A.y * cos(q))) * B.j)) + (A.z * B.k))) assert (simplify(express((((A.x * A.i) + (A.y * A.j)) + (A.z * A.k)), B, variables=True)) == (((B.x * B.i) + (B.y * B.j)) + (B.z * B.k))) N = B.orient_new_axis('N', (- q), B.k) assert (N.scalar_map(A) == {N.x: A.x, N.z: A.z, N.y: A.y}) C = A.orient_new_axis('C', q, ((A.i + A.j) + A.k)) mapping = A.scalar_map(C) assert (mapping[A.x] == ((((C.x * ((2 * cos(q)) + 1)) / 3) + ((C.y * (((- 2) * sin((q + (pi / 6)))) + 1)) / 3)) + ((C.z * (((- 2) * cos((q + (pi / 3)))) + 1)) / 3))) assert (mapping[A.y] == ((((C.x * (((- 2) * cos((q + (pi / 3)))) + 1)) / 3) + ((C.y * ((2 * cos(q)) + 1)) / 3)) + ((C.z * (((- 2) * sin((q + (pi / 6)))) + 1)) / 3))) assert (mapping[A.z] == ((((C.x * (((- 2) * sin((q + (pi / 6)))) + 1)) / 3) + ((C.y * (((- 2) * cos((q + (pi / 3)))) + 1)) / 3)) + ((C.z * ((2 * cos(q)) + 1)) / 3))) D = A.locate_new('D', (((a * A.i) + (b * A.j)) + (c * A.k))) assert (D.scalar_map(A) == {D.z: (A.z - c), D.x: (A.x - a), D.y: (A.y - b)}) E = A.orient_new_axis('E', a, A.k, (((a * A.i) + (b * A.j)) + (c * A.k))) assert (A.scalar_map(E) == {A.z: (E.z + c), A.x: (((E.x * cos(a)) - (E.y * sin(a))) + a), A.y: (((E.x * sin(a)) + (E.y * cos(a))) + b)}) assert (E.scalar_map(A) == {E.x: (((A.x - a) * cos(a)) + ((A.y - b) * sin(a))), E.y: ((((- A.x) + a) * sin(a)) + ((A.y - b) * cos(a))), E.z: (A.z - c)}) F = A.locate_new('F', Vector.zero) assert (A.scalar_map(F) == {A.z: F.z, A.x: F.x, A.y: F.y})
def test_vector(): '\n Tests the effects of orientation of coordinate systems on\n basic vector operations.\n ' N = CoordSysCartesian('N') A = N.orient_new_axis('A', q1, N.k) B = A.orient_new_axis('B', q2, A.i) C = B.orient_new_axis('C', q3, B.j) v1 = (((a * N.i) + (b * N.j)) + (c * N.k)) assert (v1.to_matrix(A) == Matrix([[((a * cos(q1)) + (b * sin(q1)))], [(((- a) * sin(q1)) + (b * cos(q1)))], [c]])) assert (N.i.dot(A.i) == cos(q1)) assert (N.i.dot(A.j) == (- sin(q1))) assert (N.i.dot(A.k) == 0) assert (N.j.dot(A.i) == sin(q1)) assert (N.j.dot(A.j) == cos(q1)) assert (N.j.dot(A.k) == 0) assert (N.k.dot(A.i) == 0) assert (N.k.dot(A.j) == 0) assert (N.k.dot(A.k) == 1) assert (N.i.dot((A.i + A.j)) == ((- sin(q1)) + cos(q1)) == (A.i + A.j).dot(N.i)) assert (A.i.dot(C.i) == cos(q3)) assert (A.i.dot(C.j) == 0) assert (A.i.dot(C.k) == sin(q3)) assert (A.j.dot(C.i) == (sin(q2) * sin(q3))) assert (A.j.dot(C.j) == cos(q2)) assert (A.j.dot(C.k) == ((- sin(q2)) * cos(q3))) assert (A.k.dot(C.i) == ((- cos(q2)) * sin(q3))) assert (A.k.dot(C.j) == sin(q2)) assert (A.k.dot(C.k) == (cos(q2) * cos(q3))) assert (N.i.cross(A.i) == (sin(q1) * A.k)) assert (N.i.cross(A.j) == (cos(q1) * A.k)) assert (N.i.cross(A.k) == (((- sin(q1)) * A.i) - (cos(q1) * A.j))) assert (N.j.cross(A.i) == ((- cos(q1)) * A.k)) assert (N.j.cross(A.j) == (sin(q1) * A.k)) assert (N.j.cross(A.k) == ((cos(q1) * A.i) - (sin(q1) * A.j))) assert (N.k.cross(A.i) == A.j) assert (N.k.cross(A.j) == (- A.i)) assert (N.k.cross(A.k) == Vector.zero) assert (N.i.cross(A.i) == (sin(q1) * A.k)) assert (N.i.cross(A.j) == (cos(q1) * A.k)) assert (N.i.cross((A.i + A.j)) == ((sin(q1) * A.k) + (cos(q1) * A.k))) assert ((A.i + A.j).cross(N.i) == (((- sin(q1)) - cos(q1)) * N.k)) assert (A.i.cross(C.i) == (sin(q3) * C.j)) assert (A.i.cross(C.j) == (((- sin(q3)) * C.i) + (cos(q3) * C.k))) assert (A.i.cross(C.k) == ((- cos(q3)) * C.j)) assert (C.i.cross(A.i) == ((((- sin(q3)) * cos(q2)) * A.j) + (((- sin(q2)) * sin(q3)) * A.k))) assert (C.j.cross(A.i) == ((sin(q2) * A.j) + ((- cos(q2)) * A.k))) assert (express(C.k.cross(A.i), C).trigsimp() == (cos(q3) * C.j))
-7,440,557,086,236,066,000
Tests the effects of orientation of coordinate systems on basic vector operations.
sympy/vector/tests/test_coordsysrect.py
test_vector
Anshnrag02/sympy
python
def test_vector(): '\n Tests the effects of orientation of coordinate systems on\n basic vector operations.\n ' N = CoordSysCartesian('N') A = N.orient_new_axis('A', q1, N.k) B = A.orient_new_axis('B', q2, A.i) C = B.orient_new_axis('C', q3, B.j) v1 = (((a * N.i) + (b * N.j)) + (c * N.k)) assert (v1.to_matrix(A) == Matrix([[((a * cos(q1)) + (b * sin(q1)))], [(((- a) * sin(q1)) + (b * cos(q1)))], [c]])) assert (N.i.dot(A.i) == cos(q1)) assert (N.i.dot(A.j) == (- sin(q1))) assert (N.i.dot(A.k) == 0) assert (N.j.dot(A.i) == sin(q1)) assert (N.j.dot(A.j) == cos(q1)) assert (N.j.dot(A.k) == 0) assert (N.k.dot(A.i) == 0) assert (N.k.dot(A.j) == 0) assert (N.k.dot(A.k) == 1) assert (N.i.dot((A.i + A.j)) == ((- sin(q1)) + cos(q1)) == (A.i + A.j).dot(N.i)) assert (A.i.dot(C.i) == cos(q3)) assert (A.i.dot(C.j) == 0) assert (A.i.dot(C.k) == sin(q3)) assert (A.j.dot(C.i) == (sin(q2) * sin(q3))) assert (A.j.dot(C.j) == cos(q2)) assert (A.j.dot(C.k) == ((- sin(q2)) * cos(q3))) assert (A.k.dot(C.i) == ((- cos(q2)) * sin(q3))) assert (A.k.dot(C.j) == sin(q2)) assert (A.k.dot(C.k) == (cos(q2) * cos(q3))) assert (N.i.cross(A.i) == (sin(q1) * A.k)) assert (N.i.cross(A.j) == (cos(q1) * A.k)) assert (N.i.cross(A.k) == (((- sin(q1)) * A.i) - (cos(q1) * A.j))) assert (N.j.cross(A.i) == ((- cos(q1)) * A.k)) assert (N.j.cross(A.j) == (sin(q1) * A.k)) assert (N.j.cross(A.k) == ((cos(q1) * A.i) - (sin(q1) * A.j))) assert (N.k.cross(A.i) == A.j) assert (N.k.cross(A.j) == (- A.i)) assert (N.k.cross(A.k) == Vector.zero) assert (N.i.cross(A.i) == (sin(q1) * A.k)) assert (N.i.cross(A.j) == (cos(q1) * A.k)) assert (N.i.cross((A.i + A.j)) == ((sin(q1) * A.k) + (cos(q1) * A.k))) assert ((A.i + A.j).cross(N.i) == (((- sin(q1)) - cos(q1)) * N.k)) assert (A.i.cross(C.i) == (sin(q3) * C.j)) assert (A.i.cross(C.j) == (((- sin(q3)) * C.i) + (cos(q3) * C.k))) assert (A.i.cross(C.k) == ((- cos(q3)) * C.j)) assert (C.i.cross(A.i) == ((((- sin(q3)) * cos(q2)) * A.j) + (((- sin(q2)) * sin(q3)) * A.k))) assert (C.j.cross(A.i) == ((sin(q2) * A.j) + ((- cos(q2)) * A.k))) assert (express(C.k.cross(A.i), C).trigsimp() == (cos(q3) * C.j))
def test_locatenew_point(): '\n Tests Point class, and locate_new method in CoordSysCartesian.\n ' A = CoordSysCartesian('A') assert isinstance(A.origin, Point) v = (((a * A.i) + (b * A.j)) + (c * A.k)) C = A.locate_new('C', v) assert (C.origin.position_wrt(A) == C.position_wrt(A) == C.origin.position_wrt(A.origin) == v) assert (A.origin.position_wrt(C) == A.position_wrt(C) == A.origin.position_wrt(C.origin) == (- v)) assert (A.origin.express_coordinates(C) == ((- a), (- b), (- c))) p = A.origin.locate_new('p', (- v)) assert (p.express_coordinates(A) == ((- a), (- b), (- c))) assert (p.position_wrt(C.origin) == p.position_wrt(C) == ((- 2) * v)) p1 = p.locate_new('p1', (2 * v)) assert (p1.position_wrt(C.origin) == Vector.zero) assert (p1.express_coordinates(C) == (0, 0, 0)) p2 = p.locate_new('p2', A.i) assert (p1.position_wrt(p2) == ((2 * v) - A.i)) assert (p2.express_coordinates(C) == ((((- 2) * a) + 1), ((- 2) * b), ((- 2) * c)))
-988,462,583,724,789,000
Tests Point class, and locate_new method in CoordSysCartesian.
sympy/vector/tests/test_coordsysrect.py
test_locatenew_point
Anshnrag02/sympy
python
def test_locatenew_point(): '\n \n ' A = CoordSysCartesian('A') assert isinstance(A.origin, Point) v = (((a * A.i) + (b * A.j)) + (c * A.k)) C = A.locate_new('C', v) assert (C.origin.position_wrt(A) == C.position_wrt(A) == C.origin.position_wrt(A.origin) == v) assert (A.origin.position_wrt(C) == A.position_wrt(C) == A.origin.position_wrt(C.origin) == (- v)) assert (A.origin.express_coordinates(C) == ((- a), (- b), (- c))) p = A.origin.locate_new('p', (- v)) assert (p.express_coordinates(A) == ((- a), (- b), (- c))) assert (p.position_wrt(C.origin) == p.position_wrt(C) == ((- 2) * v)) p1 = p.locate_new('p1', (2 * v)) assert (p1.position_wrt(C.origin) == Vector.zero) assert (p1.express_coordinates(C) == (0, 0, 0)) p2 = p.locate_new('p2', A.i) assert (p1.position_wrt(p2) == ((2 * v) - A.i)) assert (p2.express_coordinates(C) == ((((- 2) * a) + 1), ((- 2) * b), ((- 2) * c)))
def usage(): 'Print helpful, accurate usage statement to stdout.' print('Usage: rotate_molecule.py -f filename') print() print(' Description of command...') print(' [-f] filename') print(' Optional parameters:') print(' [-o] alternative output filename') print(" (default is 'rotated_' +filename)") print(' [-y] rotate around the y axis') print(' (default is rotation around the z axis)') print(' [-x] rotate around the x axis') print(' (default is rotation around the z axis)') print(" [-u] user-defined axis of rotation '1.0,2.0,-6.2'") print(' (default is rotation around the z axis)') print(' [-a] angle for rotation about axis ') print(' (default is rotation around the z axis)') print(' [-v] verbose output')
7,428,389,739,618,546,000
Print helpful, accurate usage statement to stdout.
AutoDockTools/Utilities24/rotate_molecule.py
usage
e-mayo/autodocktools-prepare-py3k
python
def usage(): print('Usage: rotate_molecule.py -f filename') print() print(' Description of command...') print(' [-f] filename') print(' Optional parameters:') print(' [-o] alternative output filename') print(" (default is 'rotated_' +filename)") print(' [-y] rotate around the y axis') print(' (default is rotation around the z axis)') print(' [-x] rotate around the x axis') print(' (default is rotation around the z axis)') print(" [-u] user-defined axis of rotation '1.0,2.0,-6.2'") print(' (default is rotation around the z axis)') print(' [-a] angle for rotation about axis ') print(' (default is rotation around the z axis)') print(' [-v] verbose output')
def __init__(self, port=None, path='redis-server', **extra_args): '\n :param port: port number to start the redis server on. Specify none to automatically generate\n :type port: int|None\n :param extra_args: any extra arguments kwargs will be passed to redis server as --key val\n ' self._port = port self.port = None self.extra_args = list(itertools.chain(*((('--%s' % k), v) for (k, v) in extra_args.items()))) self.path = os.getenv(REDIS_PATH_ENVVAR, path)
-7,137,479,020,550,939,000
:param port: port number to start the redis server on. Specify none to automatically generate :type port: int|None :param extra_args: any extra arguments kwargs will be passed to redis server as --key val
RAMP/disposableredis/__init__.py
__init__
MPalarya/RAMP
python
def __init__(self, port=None, path='redis-server', **extra_args): '\n :param port: port number to start the redis server on. Specify none to automatically generate\n :type port: int|None\n :param extra_args: any extra arguments kwargs will be passed to redis server as --key val\n ' self._port = port self.port = None self.extra_args = list(itertools.chain(*((('--%s' % k), v) for (k, v) in extra_args.items()))) self.path = os.getenv(REDIS_PATH_ENVVAR, path)
def client(self): '\n :rtype: redis.StrictRedis\n ' return redis.StrictRedis(port=self.port, decode_responses=True)
6,858,181,793,222,088,000
:rtype: redis.StrictRedis
RAMP/disposableredis/__init__.py
client
MPalarya/RAMP
python
def client(self): '\n \n ' return redis.StrictRedis(port=self.port, decode_responses=True)
def sample(population, k): 'Chooses k unique random elements from a bag.\n\n Returns a new bag containing elements from the population while\n leaving the original population unchanged.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db # doctest: +SKIP\n ... from dask.bag import random\n ...\n ... b = db.from_sequence(range(5), npartitions=2)\n ... list(random.sample(b, 3).compute())\n [1, 3, 5]\n ' return _sample(population=population, k=k, replace=False)
-23,125,142,183,938,040
Chooses k unique random elements from a bag. Returns a new bag containing elements from the population while leaving the original population unchanged. Parameters ---------- population: Bag Elements to sample. k: integer, optional Number of elements to sample. Examples -------- >>> import dask.bag as db # doctest: +SKIP ... from dask.bag import random ... ... b = db.from_sequence(range(5), npartitions=2) ... list(random.sample(b, 3).compute()) [1, 3, 5]
ServerComponent/venv/Lib/site-packages/dask/bag/random.py
sample
CDU55/FakeNews
python
def sample(population, k): 'Chooses k unique random elements from a bag.\n\n Returns a new bag containing elements from the population while\n leaving the original population unchanged.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db # doctest: +SKIP\n ... from dask.bag import random\n ...\n ... b = db.from_sequence(range(5), npartitions=2)\n ... list(random.sample(b, 3).compute())\n [1, 3, 5]\n ' return _sample(population=population, k=k, replace=False)
def choices(population, k=1): '\n Return a k sized list of elements chosen with replacement.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db # doctest: +SKIP\n ... from dask.bag import random\n ...\n ... b = db.from_sequence(range(5), npartitions=2)\n ... list(random.choices(b, 3).compute())\n [1, 1, 5]\n ' return _sample(population=population, k=k, replace=True)
-3,130,375,494,300,238,300
Return a k sized list of elements chosen with replacement. Parameters ---------- population: Bag Elements to sample. k: integer, optional Number of elements to sample. Examples -------- >>> import dask.bag as db # doctest: +SKIP ... from dask.bag import random ... ... b = db.from_sequence(range(5), npartitions=2) ... list(random.choices(b, 3).compute()) [1, 1, 5]
ServerComponent/venv/Lib/site-packages/dask/bag/random.py
choices
CDU55/FakeNews
python
def choices(population, k=1): '\n Return a k sized list of elements chosen with replacement.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db # doctest: +SKIP\n ... from dask.bag import random\n ...\n ... b = db.from_sequence(range(5), npartitions=2)\n ... list(random.choices(b, 3).compute())\n [1, 1, 5]\n ' return _sample(population=population, k=k, replace=True)
def _sample_map_partitions(population, k, replace): '\n Map function used on the sample and choices functions.\n Parameters\n ----------\n population : list\n List of elements to sample.\n k : int, optional\n Number of elements to sample. Default is 1.\n\n Returns\n -------\n sample: list\n List of sampled elements from the partition.\n lx: int\n Number of elements on the partition.\n k: int\n Number of elements to sample.\n ' lx = len(population) real_k = (k if (k <= lx) else lx) sample_func = (rnd.choices if replace else rnd.sample) sampled = ([] if (real_k == 0) else sample_func(population=population, k=real_k)) return (sampled, lx)
-4,155,118,698,904,341,000
Map function used on the sample and choices functions. Parameters ---------- population : list List of elements to sample. k : int, optional Number of elements to sample. Default is 1. Returns ------- sample: list List of sampled elements from the partition. lx: int Number of elements on the partition. k: int Number of elements to sample.
ServerComponent/venv/Lib/site-packages/dask/bag/random.py
_sample_map_partitions
CDU55/FakeNews
python
def _sample_map_partitions(population, k, replace): '\n Map function used on the sample and choices functions.\n Parameters\n ----------\n population : list\n List of elements to sample.\n k : int, optional\n Number of elements to sample. Default is 1.\n\n Returns\n -------\n sample: list\n List of sampled elements from the partition.\n lx: int\n Number of elements on the partition.\n k: int\n Number of elements to sample.\n ' lx = len(population) real_k = (k if (k <= lx) else lx) sample_func = (rnd.choices if replace else rnd.sample) sampled = ([] if (real_k == 0) else sample_func(population=population, k=real_k)) return (sampled, lx)
def _sample_reduce(reduce_iter, k, replace): '\n Reduce function used on the sample and choice functions.\n\n Parameters\n ----------\n reduce_iter : iterable\n Each element is a tuple coming generated by the _sample_map_partitions function.\n\n Returns a sequence of uniformly distributed samples;\n ' ns_ks = [] s = [] n = 0 for i in reduce_iter: (s_i, n_i) = i s.extend(s_i) n += n_i k_i = len(s_i) ns_ks.append((n_i, k_i)) if ((k < 0) or ((k > n) and (not replace))): raise ValueError('Sample larger than population or is negative') p = [] for (n_i, k_i) in ns_ks: if (k_i > 0): p_i = (n_i / (k_i * n)) p += ([p_i] * k_i) sample_func = (rnd.choices if replace else _weighted_sampling_without_replacement) return sample_func(population=s, weights=p, k=k)
2,513,519,958,655,585,000
Reduce function used on the sample and choice functions. Parameters ---------- reduce_iter : iterable Each element is a tuple coming generated by the _sample_map_partitions function. Returns a sequence of uniformly distributed samples;
ServerComponent/venv/Lib/site-packages/dask/bag/random.py
_sample_reduce
CDU55/FakeNews
python
def _sample_reduce(reduce_iter, k, replace): '\n Reduce function used on the sample and choice functions.\n\n Parameters\n ----------\n reduce_iter : iterable\n Each element is a tuple coming generated by the _sample_map_partitions function.\n\n Returns a sequence of uniformly distributed samples;\n ' ns_ks = [] s = [] n = 0 for i in reduce_iter: (s_i, n_i) = i s.extend(s_i) n += n_i k_i = len(s_i) ns_ks.append((n_i, k_i)) if ((k < 0) or ((k > n) and (not replace))): raise ValueError('Sample larger than population or is negative') p = [] for (n_i, k_i) in ns_ks: if (k_i > 0): p_i = (n_i / (k_i * n)) p += ([p_i] * k_i) sample_func = (rnd.choices if replace else _weighted_sampling_without_replacement) return sample_func(population=s, weights=p, k=k)
def _weighted_sampling_without_replacement(population, weights, k): '\n Source:\n Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis\n ' elt = [((math.log(rnd.random()) / weights[i]), i) for i in range(len(weights))] return [population[x[1]] for x in heapq.nlargest(k, elt)]
3,281,393,158,215,064,600
Source: Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis
ServerComponent/venv/Lib/site-packages/dask/bag/random.py
_weighted_sampling_without_replacement
CDU55/FakeNews
python
def _weighted_sampling_without_replacement(population, weights, k): '\n Source:\n Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis\n ' elt = [((math.log(rnd.random()) / weights[i]), i) for i in range(len(weights))] return [population[x[1]] for x in heapq.nlargest(k, elt)]
def kSimilarity(self, A, B): '\n :type A: str\n :type B: str\n :rtype: int\n ' def neighbors(s): for (i, c) in enumerate(s): if (c != B[i]): break t = list(s) for j in xrange((i + 1), len(s)): if (t[j] == B[i]): (t[i], t[j]) = (t[j], t[i]) (yield ''.join(t)) (t[j], t[i]) = (t[i], t[j]) q = collections.deque([A]) steps = {A: 0} while q: s = q.popleft() if (s == B): return steps[s] for t in neighbors(s): if (t not in steps): steps[t] = (steps[s] + 1) q.append(t)
-6,274,109,689,851,230,000
:type A: str :type B: str :rtype: int
Python/k-similar-strings.py
kSimilarity
RideGreg/LeetCode
python
def kSimilarity(self, A, B): '\n :type A: str\n :type B: str\n :rtype: int\n ' def neighbors(s): for (i, c) in enumerate(s): if (c != B[i]): break t = list(s) for j in xrange((i + 1), len(s)): if (t[j] == B[i]): (t[i], t[j]) = (t[j], t[i]) (yield .join(t)) (t[j], t[i]) = (t[i], t[j]) q = collections.deque([A]) steps = {A: 0} while q: s = q.popleft() if (s == B): return steps[s] for t in neighbors(s): if (t not in steps): steps[t] = (steps[s] + 1) q.append(t)
@classmethod def from_file(cls, filename, hdu_hdu='HDU_INDEX', hdu_obs='OBS_INDEX'): 'Create from a FITS file.\n\n The FITS file must contain both index files.\n\n Parameters\n ----------\n filename : str, Path\n FITS filename\n hdu_hdu : str or int\n FITS HDU name or number for the HDU index table\n hdu_obs : str or int\n FITS HDU name or number for the observation index table\n ' filename = make_path(filename) hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format='fits') obs_table = ObservationTable.read(filename, hdu=hdu_obs, format='fits') return cls(hdu_table=hdu_table, obs_table=obs_table)
3,118,659,519,337,359,400
Create from a FITS file. The FITS file must contain both index files. Parameters ---------- filename : str, Path FITS filename hdu_hdu : str or int FITS HDU name or number for the HDU index table hdu_obs : str or int FITS HDU name or number for the observation index table
gammapy/data/data_store.py
from_file
qpiel/gammapy
python
@classmethod def from_file(cls, filename, hdu_hdu='HDU_INDEX', hdu_obs='OBS_INDEX'): 'Create from a FITS file.\n\n The FITS file must contain both index files.\n\n Parameters\n ----------\n filename : str, Path\n FITS filename\n hdu_hdu : str or int\n FITS HDU name or number for the HDU index table\n hdu_obs : str or int\n FITS HDU name or number for the observation index table\n ' filename = make_path(filename) hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format='fits') obs_table = ObservationTable.read(filename, hdu=hdu_obs, format='fits') return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None): 'Create from a directory.\n\n Parameters\n ----------\n base_dir : str, Path\n Base directory of the data files.\n hdu_table_filename : str, Path\n Filename of the HDU index file. May be specified either relative\n to `base_dir` or as an absolute path. If None, the default filename\n will be looked for.\n obs_table_filename : str, Path\n Filename of the observation index file. May be specified either relative\n to `base_dir` or as an absolute path. If None, the default filename\n will be looked for.\n ' base_dir = make_path(base_dir) if hdu_table_filename: hdu_table_filename = make_path(hdu_table_filename) if (base_dir / hdu_table_filename).exists(): hdu_table_filename = (base_dir / hdu_table_filename) else: hdu_table_filename = (base_dir / cls.DEFAULT_HDU_TABLE) if obs_table_filename: obs_table_filename = make_path(obs_table_filename) if (base_dir / obs_table_filename).exists(): obs_table_filename = (base_dir / obs_table_filename) else: obs_table_filename = (base_dir / cls.DEFAULT_OBS_TABLE) if (not hdu_table_filename.exists()): raise IOError('File not found: {}'.format(hdu_table_filename)) log.debug('Reading {}'.format(hdu_table_filename)) hdu_table = HDUIndexTable.read(str(hdu_table_filename), format='fits') hdu_table.meta['BASE_DIR'] = str(base_dir) if (not obs_table_filename.exists()): raise IOError('File not found: {}'.format(obs_table_filename)) log.debug('Reading {}'.format(str(obs_table_filename))) obs_table = ObservationTable.read(str(obs_table_filename), format='fits') return cls(hdu_table=hdu_table, obs_table=obs_table)
8,706,081,717,159,003,000
Create from a directory. Parameters ---------- base_dir : str, Path Base directory of the data files. hdu_table_filename : str, Path Filename of the HDU index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for. obs_table_filename : str, Path Filename of the observation index file. May be specified either relative to `base_dir` or as an absolute path. If None, the default filename will be looked for.
gammapy/data/data_store.py
from_dir
qpiel/gammapy
python
@classmethod def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None): 'Create from a directory.\n\n Parameters\n ----------\n base_dir : str, Path\n Base directory of the data files.\n hdu_table_filename : str, Path\n Filename of the HDU index file. May be specified either relative\n to `base_dir` or as an absolute path. If None, the default filename\n will be looked for.\n obs_table_filename : str, Path\n Filename of the observation index file. May be specified either relative\n to `base_dir` or as an absolute path. If None, the default filename\n will be looked for.\n ' base_dir = make_path(base_dir) if hdu_table_filename: hdu_table_filename = make_path(hdu_table_filename) if (base_dir / hdu_table_filename).exists(): hdu_table_filename = (base_dir / hdu_table_filename) else: hdu_table_filename = (base_dir / cls.DEFAULT_HDU_TABLE) if obs_table_filename: obs_table_filename = make_path(obs_table_filename) if (base_dir / obs_table_filename).exists(): obs_table_filename = (base_dir / obs_table_filename) else: obs_table_filename = (base_dir / cls.DEFAULT_OBS_TABLE) if (not hdu_table_filename.exists()): raise IOError('File not found: {}'.format(hdu_table_filename)) log.debug('Reading {}'.format(hdu_table_filename)) hdu_table = HDUIndexTable.read(str(hdu_table_filename), format='fits') hdu_table.meta['BASE_DIR'] = str(base_dir) if (not obs_table_filename.exists()): raise IOError('File not found: {}'.format(obs_table_filename)) log.debug('Reading {}'.format(str(obs_table_filename))) obs_table = ObservationTable.read(str(obs_table_filename), format='fits') return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod def from_config(cls, config): 'Create from a config dict.' base_dir = config['base_dir'] hdu_table_filename = config.get('hduindx', cls.DEFAULT_HDU_TABLE) obs_table_filename = config.get('obsindx', cls.DEFAULT_OBS_TABLE) hdu_table_filename = cls._find_file(hdu_table_filename, base_dir) obs_table_filename = cls._find_file(obs_table_filename, base_dir) return cls.from_files(base_dir=base_dir, hdu_table_filename=hdu_table_filename, obs_table_filename=obs_table_filename)
5,788,775,811,130,244,000
Create from a config dict.
gammapy/data/data_store.py
from_config
qpiel/gammapy
python
@classmethod def from_config(cls, config): base_dir = config['base_dir'] hdu_table_filename = config.get('hduindx', cls.DEFAULT_HDU_TABLE) obs_table_filename = config.get('obsindx', cls.DEFAULT_OBS_TABLE) hdu_table_filename = cls._find_file(hdu_table_filename, base_dir) obs_table_filename = cls._find_file(obs_table_filename, base_dir) return cls.from_files(base_dir=base_dir, hdu_table_filename=hdu_table_filename, obs_table_filename=obs_table_filename)
@staticmethod def _find_file(filename, dir): "Find a file at an absolute or relative location.\n\n - First tries ``Path(filename)``\n - Second tries ``Path(dir) / filename``\n - Raises ``OSError`` if both don't exist.\n " path1 = make_path(filename) path2 = (make_path(dir) / filename) if path1.is_file(): filename = path1 elif path2.is_file(): filename = path2 else: raise OSError('File not found at {} or {}'.format(path1, path2)) return filename
-6,604,903,917,612,430,000
Find a file at an absolute or relative location. - First tries ``Path(filename)`` - Second tries ``Path(dir) / filename`` - Raises ``OSError`` if both don't exist.
gammapy/data/data_store.py
_find_file
qpiel/gammapy
python
@staticmethod def _find_file(filename, dir): "Find a file at an absolute or relative location.\n\n - First tries ``Path(filename)``\n - Second tries ``Path(dir) / filename``\n - Raises ``OSError`` if both don't exist.\n " path1 = make_path(filename) path2 = (make_path(dir) / filename) if path1.is_file(): filename = path1 elif path2.is_file(): filename = path2 else: raise OSError('File not found at {} or {}'.format(path1, path2)) return filename
def info(self, show=True): 'Print some info.' s = 'Data store:\n' s += self.hdu_table.summary() s += '\n\n' s += self.obs_table.summary() if show: print(s) else: return s
3,470,897,961,016,105,500
Print some info.
gammapy/data/data_store.py
info
qpiel/gammapy
python
def info(self, show=True): s = 'Data store:\n' s += self.hdu_table.summary() s += '\n\n' s += self.obs_table.summary() if show: print(s) else: return s
def obs(self, obs_id): 'Access a given `~gammapy.data.DataStoreObservation`.\n\n Parameters\n ----------\n obs_id : int\n Observation ID.\n\n Returns\n -------\n observation : `~gammapy.data.DataStoreObservation`\n Observation container\n ' return DataStoreObservation(obs_id=int(obs_id), data_store=self)
9,200,233,249,087,507,000
Access a given `~gammapy.data.DataStoreObservation`. Parameters ---------- obs_id : int Observation ID. Returns ------- observation : `~gammapy.data.DataStoreObservation` Observation container
gammapy/data/data_store.py
obs
qpiel/gammapy
python
def obs(self, obs_id): 'Access a given `~gammapy.data.DataStoreObservation`.\n\n Parameters\n ----------\n obs_id : int\n Observation ID.\n\n Returns\n -------\n observation : `~gammapy.data.DataStoreObservation`\n Observation container\n ' return DataStoreObservation(obs_id=int(obs_id), data_store=self)
def get_observations(self, obs_id, skip_missing=False): 'Generate a `~gammapy.data.Observations`.\n\n Parameters\n ----------\n obs_id : list\n Observation IDs.\n skip_missing : bool, optional\n Skip missing observations, default: False\n\n Returns\n -------\n observations : `~gammapy.data.Observations`\n Container holding a list of `~gammapy.data.DataStoreObservation`\n ' obs_list = [] for _ in obs_id: try: obs = self.obs(_) except ValueError as err: if skip_missing: log.warning('Skipping missing obs_id: {!r}'.format(_)) continue else: raise err else: obs_list.append(obs) return Observations(obs_list)
-351,004,946,236,869,570
Generate a `~gammapy.data.Observations`. Parameters ---------- obs_id : list Observation IDs. skip_missing : bool, optional Skip missing observations, default: False Returns ------- observations : `~gammapy.data.Observations` Container holding a list of `~gammapy.data.DataStoreObservation`
gammapy/data/data_store.py
get_observations
qpiel/gammapy
python
def get_observations(self, obs_id, skip_missing=False): 'Generate a `~gammapy.data.Observations`.\n\n Parameters\n ----------\n obs_id : list\n Observation IDs.\n skip_missing : bool, optional\n Skip missing observations, default: False\n\n Returns\n -------\n observations : `~gammapy.data.Observations`\n Container holding a list of `~gammapy.data.DataStoreObservation`\n ' obs_list = [] for _ in obs_id: try: obs = self.obs(_) except ValueError as err: if skip_missing: log.warning('Skipping missing obs_id: {!r}'.format(_)) continue else: raise err else: obs_list.append(obs) return Observations(obs_list)
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False): 'Create a new `~gammapy.data.DataStore` containing a subset of observations.\n\n Parameters\n ----------\n obs_id : array-like, `~gammapy.data.ObservationTable`\n List of observations to copy\n outdir : str, Path\n Directory for the new store\n hdu_class : list of str\n see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS`\n verbose : bool\n Print copied files\n overwrite : bool\n Overwrite\n ' outdir = make_path(outdir) if isinstance(obs_id, ObservationTable): obs_id = obs_id['OBS_ID'].data hdutable = self.hdu_table hdutable.add_index('OBS_ID') with hdutable.index_mode('discard_on_copy'): subhdutable = hdutable.loc[obs_id] if (hdu_class is not None): subhdutable.add_index('HDU_CLASS') with subhdutable.index_mode('discard_on_copy'): subhdutable = subhdutable.loc[hdu_class] subobstable = self.obs_table.select_obs_id(obs_id) for idx in range(len(subhdutable)): loc = subhdutable.location_info(idx) targetdir = (outdir / loc.file_dir) targetdir.mkdir(exist_ok=True, parents=True) cmd = (['cp', '-v'] if verbose else ['cp']) if (not overwrite): cmd += ['-n'] cmd += [str(loc.path()), str(targetdir)] subprocess.call(cmd) filename = str((outdir / self.DEFAULT_HDU_TABLE)) subhdutable.write(filename, format='fits', overwrite=overwrite) filename = str((outdir / self.DEFAULT_OBS_TABLE)) subobstable.write(filename, format='fits', overwrite=overwrite)
1,393,341,914,337,598,500
Create a new `~gammapy.data.DataStore` containing a subset of observations. Parameters ---------- obs_id : array-like, `~gammapy.data.ObservationTable` List of observations to copy outdir : str, Path Directory for the new store hdu_class : list of str see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS` verbose : bool Print copied files overwrite : bool Overwrite
gammapy/data/data_store.py
copy_obs
qpiel/gammapy
python
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False): 'Create a new `~gammapy.data.DataStore` containing a subset of observations.\n\n Parameters\n ----------\n obs_id : array-like, `~gammapy.data.ObservationTable`\n List of observations to copy\n outdir : str, Path\n Directory for the new store\n hdu_class : list of str\n see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS`\n verbose : bool\n Print copied files\n overwrite : bool\n Overwrite\n ' outdir = make_path(outdir) if isinstance(obs_id, ObservationTable): obs_id = obs_id['OBS_ID'].data hdutable = self.hdu_table hdutable.add_index('OBS_ID') with hdutable.index_mode('discard_on_copy'): subhdutable = hdutable.loc[obs_id] if (hdu_class is not None): subhdutable.add_index('HDU_CLASS') with subhdutable.index_mode('discard_on_copy'): subhdutable = subhdutable.loc[hdu_class] subobstable = self.obs_table.select_obs_id(obs_id) for idx in range(len(subhdutable)): loc = subhdutable.location_info(idx) targetdir = (outdir / loc.file_dir) targetdir.mkdir(exist_ok=True, parents=True) cmd = (['cp', '-v'] if verbose else ['cp']) if (not overwrite): cmd += ['-n'] cmd += [str(loc.path()), str(targetdir)] subprocess.call(cmd) filename = str((outdir / self.DEFAULT_HDU_TABLE)) subhdutable.write(filename, format='fits', overwrite=overwrite) filename = str((outdir / self.DEFAULT_OBS_TABLE)) subobstable.write(filename, format='fits', overwrite=overwrite)
def check(self, checks='all'): 'Check index tables and data files.\n\n This is a generator that yields a list of dicts.\n ' checker = DataStoreChecker(self) return checker.run(checks=checks)
-4,447,423,972,648,946,000
Check index tables and data files. This is a generator that yields a list of dicts.
gammapy/data/data_store.py
check
qpiel/gammapy
python
def check(self, checks='all'): 'Check index tables and data files.\n\n This is a generator that yields a list of dicts.\n ' checker = DataStoreChecker(self) return checker.run(checks=checks)
def check_obs_table(self): 'Checks for the observation index table.' checker = ObservationTableChecker(self.data_store.obs_table) for record in checker.run(): (yield record)
-6,397,188,248,187,160,000
Checks for the observation index table.
gammapy/data/data_store.py
check_obs_table
qpiel/gammapy
python
def check_obs_table(self): checker = ObservationTableChecker(self.data_store.obs_table) for record in checker.run(): (yield record)
def check_hdu_table(self): 'Checks for the HDU index table.' t = self.data_store.hdu_table m = t.meta if (m.get('HDUCLAS1', '') != 'INDEX'): (yield {'level': 'error', 'hdu': 'hdu-index', 'msg': 'Invalid header key. Must have HDUCLAS1=INDEX'}) if (m.get('HDUCLAS2', '') != 'HDU'): (yield {'level': 'error', 'hdu': 'hdu-index', 'msg': 'Invalid header key. Must have HDUCLAS2=HDU'}) for idx in range(len(t)): location_info = t.location_info(idx) try: location_info.get_hdu() except KeyError: (yield {'level': 'error', 'msg': 'HDU not found: {!r}'.format(location_info.__dict__)})
-1,683,391,674,228,314,600
Checks for the HDU index table.
gammapy/data/data_store.py
check_hdu_table
qpiel/gammapy
python
def check_hdu_table(self): t = self.data_store.hdu_table m = t.meta if (m.get('HDUCLAS1', ) != 'INDEX'): (yield {'level': 'error', 'hdu': 'hdu-index', 'msg': 'Invalid header key. Must have HDUCLAS1=INDEX'}) if (m.get('HDUCLAS2', ) != 'HDU'): (yield {'level': 'error', 'hdu': 'hdu-index', 'msg': 'Invalid header key. Must have HDUCLAS2=HDU'}) for idx in range(len(t)): location_info = t.location_info(idx) try: location_info.get_hdu() except KeyError: (yield {'level': 'error', 'msg': 'HDU not found: {!r}'.format(location_info.__dict__)})
def check_consistency(self): 'Consistency checks between multiple HDUs' obs_table_obs_id = set(self.data_store.obs_table['OBS_ID']) hdu_table_obs_id = set(self.data_store.hdu_table['OBS_ID']) if (not (obs_table_obs_id == hdu_table_obs_id)): (yield {'level': 'error', 'msg': 'Inconsistent OBS_ID in obs and HDU index tables'})
-3,145,264,212,553,814,000
Consistency checks between multiple HDUs
gammapy/data/data_store.py
check_consistency
qpiel/gammapy
python
def check_consistency(self): obs_table_obs_id = set(self.data_store.obs_table['OBS_ID']) hdu_table_obs_id = set(self.data_store.hdu_table['OBS_ID']) if (not (obs_table_obs_id == hdu_table_obs_id)): (yield {'level': 'error', 'msg': 'Inconsistent OBS_ID in obs and HDU index tables'})
def check_observations(self): 'Perform some sanity checks for all observations.' for obs_id in self.data_store.obs_table['OBS_ID']: obs = self.data_store.obs(obs_id) for record in ObservationChecker(obs).run(): (yield record)
820,165,116,716,392,800
Perform some sanity checks for all observations.
gammapy/data/data_store.py
check_observations
qpiel/gammapy
python
def check_observations(self): for obs_id in self.data_store.obs_table['OBS_ID']: obs = self.data_store.obs(obs_id) for record in ObservationChecker(obs).run(): (yield record)
def trim5p3p_helper(r, seq_5p, seq_3p_rev): "\n Search for 5' and 3' in the first and last 100 bp window\n " s1 = str(r.seq[:100]) s2 = str(r.reverse_complement().seq[:100]) o1 = parasail.sg_qx_trace(s1, seq_5p, 3, 1, SCOREMAT) o2 = parasail.sg_qe_db_trace(s2, seq_3p_rev, 3, 1, SCOREMAT) lenA = None if (o2.score >= MINSCORE_3P): lenA = trimA(s2[(o2.end_query + 1):]) if (MIN_A_LEN == 0): end3 = ((len(r.seq) - o2.end_query) - 1) return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3) elif (lenA is not None): end3 = ((len(r.seq) - o2.end_query) - 1) endA = ((end3 - lenA) + 1) return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=endA) else: end3 = ((len(r.seq) - o2.end_query) - 1) return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3)
-4,305,561,221,391,130,000
Search for 5' and 3' in the first and last 100 bp window
beta/trim_primers.py
trim5p3p_helper
ArthurDondi/cDNA_Cupcake
python
def trim5p3p_helper(r, seq_5p, seq_3p_rev): "\n \n " s1 = str(r.seq[:100]) s2 = str(r.reverse_complement().seq[:100]) o1 = parasail.sg_qx_trace(s1, seq_5p, 3, 1, SCOREMAT) o2 = parasail.sg_qe_db_trace(s2, seq_3p_rev, 3, 1, SCOREMAT) lenA = None if (o2.score >= MINSCORE_3P): lenA = trimA(s2[(o2.end_query + 1):]) if (MIN_A_LEN == 0): end3 = ((len(r.seq) - o2.end_query) - 1) return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3) elif (lenA is not None): end3 = ((len(r.seq) - o2.end_query) - 1) endA = ((end3 - lenA) + 1) return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=endA) else: end3 = ((len(r.seq) - o2.end_query) - 1) return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3)
@raises(ohsome.OhsomeException) def test_handle_multiple_responses_throw_timeouterror(): '\n Tests counting elements within a bounding box for two timestamps\n :return:\n ' bboxes = [8.67066, 49.41423, 8.68177, 49.4204] time = '2010-01-01/2011-01-01/P1Y' keys = ['building'] values = [''] client = ohsome.OhsomeClientParallel() response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2) del client
2,038,496,404,154,034,000
Tests counting elements within a bounding box for two timestamps :return:
src/ohsome/tests/test_ohsome_client.py
test_handle_multiple_responses_throw_timeouterror
redfrexx/osm_association_rules
python
@raises(ohsome.OhsomeException) def test_handle_multiple_responses_throw_timeouterror(): '\n Tests counting elements within a bounding box for two timestamps\n :return:\n ' bboxes = [8.67066, 49.41423, 8.68177, 49.4204] time = '2010-01-01/2011-01-01/P1Y' keys = ['building'] values = [] client = ohsome.OhsomeClientParallel() response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2) del client
def test_elements_count(): '\n Tests counting elements within a bounding box for two timestamps\n :return:\n ' bboxes = [8.67066, 49.41423, 8.68177, 49.4204] time = '2010-01-01/2011-01-01/P1Y' keys = ['building'] values = [''] timestamps = ['2010-01-01T00:00:00Z', '2011-01-01T00:00:00Z'] counts = [53.0, 256.0] expected = pd.DataFrame({'timestamp': timestamps, 'value': counts}) client = ohsome.OhsomeClient() response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values) result = response.as_dataframe() del client assert expected.equals(result)
-5,813,278,776,934,570,000
Tests counting elements within a bounding box for two timestamps :return:
src/ohsome/tests/test_ohsome_client.py
test_elements_count
redfrexx/osm_association_rules
python
def test_elements_count(): '\n Tests counting elements within a bounding box for two timestamps\n :return:\n ' bboxes = [8.67066, 49.41423, 8.68177, 49.4204] time = '2010-01-01/2011-01-01/P1Y' keys = ['building'] values = [] timestamps = ['2010-01-01T00:00:00Z', '2011-01-01T00:00:00Z'] counts = [53.0, 256.0] expected = pd.DataFrame({'timestamp': timestamps, 'value': counts}) client = ohsome.OhsomeClient() response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values) result = response.as_dataframe() del client assert expected.equals(result)
def test_elements_count_group_by_key(): '\n Tests counting elements within a bounding box and grouping them by keys\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01/2011-01-01/P1Y' groupByKeys = ['building'] timestamps = ['2010-01-01T00:00:00Z', '2011-01-01T00:00:00Z', '2010-01-01T00:00:00Z', '2011-01-01T00:00:00Z'] counts = [482.0, 628.0, 53.0, 256.0] keys = ['remainder', 'remainder', 'building', 'building'] expected = pd.DataFrame({'key': keys, 'timestamp': timestamps, 'value': counts}) expected.set_index(['key', 'timestamp'], inplace=True) client = ohsome.OhsomeClient() response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time) results = response.as_dataframe() assert expected.equals(results)
4,378,477,051,556,733,000
Tests counting elements within a bounding box and grouping them by keys :return:
src/ohsome/tests/test_ohsome_client.py
test_elements_count_group_by_key
redfrexx/osm_association_rules
python
def test_elements_count_group_by_key(): '\n Tests counting elements within a bounding box and grouping them by keys\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01/2011-01-01/P1Y' groupByKeys = ['building'] timestamps = ['2010-01-01T00:00:00Z', '2011-01-01T00:00:00Z', '2010-01-01T00:00:00Z', '2011-01-01T00:00:00Z'] counts = [482.0, 628.0, 53.0, 256.0] keys = ['remainder', 'remainder', 'building', 'building'] expected = pd.DataFrame({'key': keys, 'timestamp': timestamps, 'value': counts}) expected.set_index(['key', 'timestamp'], inplace=True) client = ohsome.OhsomeClient() response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time) results = response.as_dataframe() assert expected.equals(results)
def test_elemets_count_ratio(): '\n Tests count ratio\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01' keys = ['building'] keys2 = ['addr:city'] values = [''] values2 = [''] expected = 365.0 client = ohsome.OhsomeClient() response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2, values=values, values2=values2)
-6,645,920,146,652,629,000
Tests count ratio :return:
src/ohsome/tests/test_ohsome_client.py
test_elemets_count_ratio
redfrexx/osm_association_rules
python
def test_elemets_count_ratio(): '\n Tests count ratio\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01' keys = ['building'] keys2 = ['addr:city'] values = [] values2 = [] expected = 365.0 client = ohsome.OhsomeClient() response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2, values=values, values2=values2)
@raises(AssertionError) def test_elements_count_exception(): '\n Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01/2011-01-01/P1Y' keys = ['building'] values = [''] client = ohsome.OhsomeClient() response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values) response.as_geodataframe()
4,637,099,277,073,996,000
Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object :return:
src/ohsome/tests/test_ohsome_client.py
test_elements_count_exception
redfrexx/osm_association_rules
python
@raises(AssertionError) def test_elements_count_exception(): '\n Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01/2011-01-01/P1Y' keys = ['building'] values = [] client = ohsome.OhsomeClient() response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values) response.as_geodataframe()
def test_elements_geometry(): '\n Tests whether the result of an elements/geometry query can be converted to a geodataframe\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01' keys = ['landuse'] values = ['grass'] client = ohsome.OhsomeClient() response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values) result = response.as_geodataframe() del client assert (len(result.geometry) == 9)
7,416,005,179,180,065,000
Tests whether the result of an elements/geometry query can be converted to a geodataframe :return:
src/ohsome/tests/test_ohsome_client.py
test_elements_geometry
redfrexx/osm_association_rules
python
def test_elements_geometry(): '\n Tests whether the result of an elements/geometry query can be converted to a geodataframe\n :return:\n ' bboxes = '8.67066,49.41423,8.68177,49.4204' time = '2010-01-01' keys = ['landuse'] values = ['grass'] client = ohsome.OhsomeClient() response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values) result = response.as_geodataframe() del client assert (len(result.geometry) == 9)
def test_to_file_assert_filetype(): '\n Asserts whether an error is thrown if the output file is not json or geojson\n :return:\n ' output_file = './out.shp'
-9,188,493,300,800,411,000
Asserts whether an error is thrown if the output file is not json or geojson :return:
src/ohsome/tests/test_ohsome_client.py
test_to_file_assert_filetype
redfrexx/osm_association_rules
python
def test_to_file_assert_filetype(): '\n Asserts whether an error is thrown if the output file is not json or geojson\n :return:\n ' output_file = './out.shp'
def test_format_coordinates(): '\n Asserts that coordinates of a MultiPolygon are concerted correctly\n :return:\n ' bpolys = geojson.FeatureCollection([{'type': 'Feature', 'geometry': {'coordinates': [[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]], [[13, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]], 'type': 'MultiPolygon'}}]) time = '2018-01-01' keys = ['landuse'] values = ['grass'] client = ohsome.OhsomeClient() response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values) result = response.as_geodataframe() del client assert (len(result.geometry) == 74)
-4,140,947,888,958,577,000
Asserts that coordinates of a MultiPolygon are concerted correctly :return:
src/ohsome/tests/test_ohsome_client.py
test_format_coordinates
redfrexx/osm_association_rules
python
def test_format_coordinates(): '\n Asserts that coordinates of a MultiPolygon are concerted correctly\n :return:\n ' bpolys = geojson.FeatureCollection([{'type': 'Feature', 'geometry': {'coordinates': [[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]], [[13, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]], 'type': 'MultiPolygon'}}]) time = '2018-01-01' keys = ['landuse'] values = ['grass'] client = ohsome.OhsomeClient() response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values) result = response.as_geodataframe() del client assert (len(result.geometry) == 74)
def test_promise_return(): '\n Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects\n are returned wrapping Flyte literals instead of the unpacked dict.\n ' @task def t1(a: int) -> typing.NamedTuple('OutputsBC', t1_int_output=int, c=str): a = (a + 2) return (a, ('world-' + str(a))) @workflow def mimic_sub_wf(a: int) -> (str, str): (x, y) = t1(a=a) (u, v) = t1(a=x) return (y, v) ctx = context_manager.FlyteContext.current_context() with ctx.new_execution_context(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION) as ctx: (a, b) = mimic_sub_wf(a=3) assert isinstance(a, promise.Promise) assert isinstance(b, promise.Promise) assert (a.val.scalar.value.string_value == 'world-5') assert (b.val.scalar.value.string_value == 'world-7')
-1,850,010,718,450,448,600
Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects are returned wrapping Flyte literals instead of the unpacked dict.
tests/flytekit/unit/core/test_type_hints.py
test_promise_return
ThomVett/flytek
python
def test_promise_return(): '\n Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects\n are returned wrapping Flyte literals instead of the unpacked dict.\n ' @task def t1(a: int) -> typing.NamedTuple('OutputsBC', t1_int_output=int, c=str): a = (a + 2) return (a, ('world-' + str(a))) @workflow def mimic_sub_wf(a: int) -> (str, str): (x, y) = t1(a=a) (u, v) = t1(a=x) return (y, v) ctx = context_manager.FlyteContext.current_context() with ctx.new_execution_context(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION) as ctx: (a, b) = mimic_sub_wf(a=3) assert isinstance(a, promise.Promise) assert isinstance(b, promise.Promise) assert (a.val.scalar.value.string_value == 'world-5') assert (b.val.scalar.value.string_value == 'world-7')
def __init__(__self__, *, storage_class_name: pulumi.Input[str], api_version: Optional[pulumi.Input[str]]=None, capacity: Optional[pulumi.Input[str]]=None, kind: Optional[pulumi.Input[str]]=None, maximum_volume_size: Optional[pulumi.Input[str]]=None, metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]=None, node_topology: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]=None): "\n The set of arguments for constructing a CSIStorageCapacity resource.\n :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.\n :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n \n Objects are namespaced.\n \n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n :param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.\n " pulumi.set(__self__, 'storage_class_name', storage_class_name) if (api_version is not None): pulumi.set(__self__, 'api_version', 'storage.k8s.io/v1beta1') if (capacity is not None): pulumi.set(__self__, 'capacity', capacity) if (kind is not None): pulumi.set(__self__, 'kind', 'CSIStorageCapacity') if (maximum_volume_size is not None): pulumi.set(__self__, 'maximum_volume_size', maximum_volume_size) if (metadata is not None): pulumi.set(__self__, 'metadata', metadata) if (node_topology is not None): pulumi.set(__self__, 'node_topology', node_topology)
-1,888,684,912,498,286,300
The set of arguments for constructing a CSIStorageCapacity resource. :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
__init__
Teshel/pulumi-kubernetes
python
def __init__(__self__, *, storage_class_name: pulumi.Input[str], api_version: Optional[pulumi.Input[str]]=None, capacity: Optional[pulumi.Input[str]]=None, kind: Optional[pulumi.Input[str]]=None, maximum_volume_size: Optional[pulumi.Input[str]]=None, metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]=None, node_topology: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]=None): "\n The set of arguments for constructing a CSIStorageCapacity resource.\n :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.\n :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n \n Objects are namespaced.\n \n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n :param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.\n " pulumi.set(__self__, 'storage_class_name', storage_class_name) if (api_version is not None): pulumi.set(__self__, 'api_version', 'storage.k8s.io/v1beta1') if (capacity is not None): pulumi.set(__self__, 'capacity', capacity) if (kind is not None): pulumi.set(__self__, 'kind', 'CSIStorageCapacity') if (maximum_volume_size is not None): pulumi.set(__self__, 'maximum_volume_size', maximum_volume_size) if (metadata is not None): pulumi.set(__self__, 'metadata', metadata) if (node_topology is not None): pulumi.set(__self__, 'node_topology', node_topology)
@property @pulumi.getter(name='storageClassName') def storage_class_name(self) -> pulumi.Input[str]: '\n The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.\n ' return pulumi.get(self, 'storage_class_name')
-7,997,320,526,267,109,000
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
storage_class_name
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='storageClassName') def storage_class_name(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'storage_class_name')
@property @pulumi.getter(name='apiVersion') def api_version(self) -> Optional[pulumi.Input[str]]: '\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n ' return pulumi.get(self, 'api_version')
2,540,031,417,868,839,000
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
api_version
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='apiVersion') def api_version(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'api_version')
@property @pulumi.getter def capacity(self) -> Optional[pulumi.Input[str]]: '\n Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n ' return pulumi.get(self, 'capacity')
6,273,461,043,150,981,000
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
capacity
Teshel/pulumi-kubernetes
python
@property @pulumi.getter def capacity(self) -> Optional[pulumi.Input[str]]: '\n Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n ' return pulumi.get(self, 'capacity')
@property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: '\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n ' return pulumi.get(self, 'kind')
-7,224,738,725,622,071,000
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
kind
Teshel/pulumi-kubernetes
python
@property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: '\n \n ' return pulumi.get(self, 'kind')
@property @pulumi.getter(name='maximumVolumeSize') def maximum_volume_size(self) -> Optional[pulumi.Input[str]]: '\n MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n ' return pulumi.get(self, 'maximum_volume_size')
-8,693,613,806,452,367,000
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
maximum_volume_size
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='maximumVolumeSize') def maximum_volume_size(self) -> Optional[pulumi.Input[str]]: '\n MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n ' return pulumi.get(self, 'maximum_volume_size')
@property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]: "\n Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\n Objects are namespaced.\n\n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n " return pulumi.get(self, 'metadata')
3,211,347,561,544,616,000
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
metadata
Teshel/pulumi-kubernetes
python
@property @pulumi.getter def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]: "\n Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\n Objects are namespaced.\n\n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n " return pulumi.get(self, 'metadata')
@property @pulumi.getter(name='nodeTopology') def node_topology(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]: '\n NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.\n ' return pulumi.get(self, 'node_topology')
1,422,756,585,959,106,600
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
node_topology
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='nodeTopology') def node_topology(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]: '\n \n ' return pulumi.get(self, 'node_topology')
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, api_version: Optional[pulumi.Input[str]]=None, capacity: Optional[pulumi.Input[str]]=None, kind: Optional[pulumi.Input[str]]=None, maximum_volume_size: Optional[pulumi.Input[str]]=None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]]=None, node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]]=None, storage_class_name: Optional[pulumi.Input[str]]=None, __props__=None): '\n CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\n For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"\n\n The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\n The producer of these objects can decide which approach is more suitable.\n\n They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n :param pulumi.Input[pulumi.InputType[\'_meta.v1.ObjectMetaArgs\']] metadata: Standard object\'s metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n \n Objects are namespaced.\n \n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n :param pulumi.Input[pulumi.InputType[\'_meta.v1.LabelSelectorArgs\']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.\n :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.\n ' ...
2,079,734,043,246,291,500
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero The producer of these objects can decide which approach is more suitable. They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim. :param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
__init__
Teshel/pulumi-kubernetes
python
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, api_version: Optional[pulumi.Input[str]]=None, capacity: Optional[pulumi.Input[str]]=None, kind: Optional[pulumi.Input[str]]=None, maximum_volume_size: Optional[pulumi.Input[str]]=None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]]=None, node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]]=None, storage_class_name: Optional[pulumi.Input[str]]=None, __props__=None): '\n CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\n For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"\n\n The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\n The producer of these objects can decide which approach is more suitable.\n\n They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n :param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n :param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n \n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n :param pulumi.Input[pulumi.InputType[\'_meta.v1.ObjectMetaArgs\']] metadata: Standard object\'s metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n \n Objects are namespaced.\n \n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n :param pulumi.Input[pulumi.InputType[\'_meta.v1.LabelSelectorArgs\']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.\n :param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.\n ' ...
@overload def __init__(__self__, resource_name: str, args: CSIStorageCapacityArgs, opts: Optional[pulumi.ResourceOptions]=None): '\n CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\n For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"\n\n The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\n The producer of these objects can decide which approach is more suitable.\n\n They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.\n\n :param str resource_name: The name of the resource.\n :param CSIStorageCapacityArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...
-1,265,522,706,516,158,700
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes. For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero The producer of these objects can decide which approach is more suitable. They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity. :param str resource_name: The name of the resource. :param CSIStorageCapacityArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
__init__
Teshel/pulumi-kubernetes
python
@overload def __init__(__self__, resource_name: str, args: CSIStorageCapacityArgs, opts: Optional[pulumi.ResourceOptions]=None): '\n CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\n For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"\n\n The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\n The producer of these objects can decide which approach is more suitable.\n\n They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.\n\n :param str resource_name: The name of the resource.\n :param CSIStorageCapacityArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'CSIStorageCapacity': "\n Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs) __props__.__dict__['api_version'] = None __props__.__dict__['capacity'] = None __props__.__dict__['kind'] = None __props__.__dict__['maximum_volume_size'] = None __props__.__dict__['metadata'] = None __props__.__dict__['node_topology'] = None __props__.__dict__['storage_class_name'] = None return CSIStorageCapacity(resource_name, opts=opts, __props__=__props__)
4,905,509,871,798,751,000
Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
get
Teshel/pulumi-kubernetes
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'CSIStorageCapacity': "\n Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs) __props__.__dict__['api_version'] = None __props__.__dict__['capacity'] = None __props__.__dict__['kind'] = None __props__.__dict__['maximum_volume_size'] = None __props__.__dict__['metadata'] = None __props__.__dict__['node_topology'] = None __props__.__dict__['storage_class_name'] = None return CSIStorageCapacity(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter(name='apiVersion') def api_version(self) -> pulumi.Output[Optional[str]]: '\n APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n ' return pulumi.get(self, 'api_version')
3,559,738,647,590,309,000
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
api_version
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='apiVersion') def api_version(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'api_version')
@property @pulumi.getter def capacity(self) -> pulumi.Output[Optional[str]]: '\n Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n ' return pulumi.get(self, 'capacity')
-2,227,383,833,680,026,600
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
capacity
Teshel/pulumi-kubernetes
python
@property @pulumi.getter def capacity(self) -> pulumi.Output[Optional[str]]: '\n Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.\n ' return pulumi.get(self, 'capacity')
@property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: '\n Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n ' return pulumi.get(self, 'kind')
3,342,389,159,407,362,600
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
kind
Teshel/pulumi-kubernetes
python
@property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'kind')
@property @pulumi.getter(name='maximumVolumeSize') def maximum_volume_size(self) -> pulumi.Output[Optional[str]]: '\n MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n ' return pulumi.get(self, 'maximum_volume_size')
-1,424,734,522,101,174,300
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
maximum_volume_size
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='maximumVolumeSize') def maximum_volume_size(self) -> pulumi.Output[Optional[str]]: '\n MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\n This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.\n ' return pulumi.get(self, 'maximum_volume_size')
@property @pulumi.getter def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]: "\n Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\n Objects are namespaced.\n\n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n " return pulumi.get(self, 'metadata')
-1,247,645,133,006,876,400
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name. Objects are namespaced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
metadata
Teshel/pulumi-kubernetes
python
@property @pulumi.getter def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]: "\n Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\n Objects are namespaced.\n\n More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n " return pulumi.get(self, 'metadata')
@property @pulumi.getter(name='nodeTopology') def node_topology(self) -> pulumi.Output[Optional['_meta.v1.outputs.LabelSelector']]: '\n NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.\n ' return pulumi.get(self, 'node_topology')
551,171,195,398,054,000
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
node_topology
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='nodeTopology') def node_topology(self) -> pulumi.Output[Optional['_meta.v1.outputs.LabelSelector']]: '\n \n ' return pulumi.get(self, 'node_topology')
@property @pulumi.getter(name='storageClassName') def storage_class_name(self) -> pulumi.Output[str]: '\n The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.\n ' return pulumi.get(self, 'storage_class_name')
-8,851,942,988,545,265,000
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py
storage_class_name
Teshel/pulumi-kubernetes
python
@property @pulumi.getter(name='storageClassName') def storage_class_name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'storage_class_name')
def is_git_dir(self): '\n 判断是否为git目录\n\n @param path:\n @return:\n ' d = (self.path + '/.git') if osp.isdir(d): if (osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs'))): headref = osp.join(d, 'HEAD') return (osp.isfile(headref) or (osp.islink(headref) and os.readlink(headref).startswith('refs'))) elif (osp.isfile(osp.join(d, 'gitdir')) and osp.isfile(osp.join(d, 'commondir')) and osp.isfile(osp.join(d, 'gitfile'))): return False return False
1,796,436,861,509,386,200
判断是否为git目录 @param path: @return:
walle/service/git/repo.py
is_git_dir
lgq9220/walle-web
python
def is_git_dir(self): '\n 判断是否为git目录\n\n @param path:\n @return:\n ' d = (self.path + '/.git') if osp.isdir(d): if (osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs'))): headref = osp.join(d, 'HEAD') return (osp.isfile(headref) or (osp.islink(headref) and os.readlink(headref).startswith('refs'))) elif (osp.isfile(osp.join(d, 'gitdir')) and osp.isfile(osp.join(d, 'commondir')) and osp.isfile(osp.join(d, 'gitfile'))): return False return False
def clone(self, url): '\n 检出项目\n\n @param branch:\n @param kwargs:\n @return:\n ' return PyRepo.clone_from(url, self.path)
-4,446,506,467,057,233,000
检出项目 @param branch: @param kwargs: @return:
walle/service/git/repo.py
clone
lgq9220/walle-web
python
def clone(self, url): '\n 检出项目\n\n @param branch:\n @param kwargs:\n @return:\n ' return PyRepo.clone_from(url, self.path)
def pull(self): '\n 更新项目\n\n @param branch:\n @param kwargs:\n @return:\n ' repo = PyRepo(self.path) return repo.remote().pull()
-2,441,616,973,202,321,000
更新项目 @param branch: @param kwargs: @return:
walle/service/git/repo.py
pull
lgq9220/walle-web
python
def pull(self): '\n 更新项目\n\n @param branch:\n @param kwargs:\n @return:\n ' repo = PyRepo(self.path) return repo.remote().pull()
def checkout_2_commit(self, branch, commit): '\n @todo 未完成\n @param branch:\n @param commit:\n @return:\n ' PyRepo(self.path).git.checkout(branch) PyRepo(self.path).head.set_commit(commit)
-5,856,124,168,358,864,000
@todo 未完成 @param branch: @param commit: @return:
walle/service/git/repo.py
checkout_2_commit
lgq9220/walle-web
python
def checkout_2_commit(self, branch, commit): '\n @todo 未完成\n @param branch:\n @param commit:\n @return:\n ' PyRepo(self.path).git.checkout(branch) PyRepo(self.path).head.set_commit(commit)
def branches(self): '\n 获取所有分支\n\n @param branch:\n @param kwargs:\n @return:\n ' branches = PyRepo(self.path).remote().refs return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if (not str(branch).strip().startswith('origin/HEAD'))]
2,956,419,816,064,421,400
获取所有分支 @param branch: @param kwargs: @return:
walle/service/git/repo.py
branches
lgq9220/walle-web
python
def branches(self): '\n 获取所有分支\n\n @param branch:\n @param kwargs:\n @return:\n ' branches = PyRepo(self.path).remote().refs return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if (not str(branch).strip().startswith('origin/HEAD'))]
def tags(self): '\n 获取所有tag\n\n @param branch:\n @param kwargs:\n @return:\n ' return [str(tag) for tag in PyRepo(self.path).tags]
3,719,169,435,290,927,000
获取所有tag @param branch: @param kwargs: @return:
walle/service/git/repo.py
tags
lgq9220/walle-web
python
def tags(self): '\n 获取所有tag\n\n @param branch:\n @param kwargs:\n @return:\n ' return [str(tag) for tag in PyRepo(self.path).tags]
def commits(self, branch): '\n 获取分支的commits\n\n @param branch:\n @param kwargs:\n @return:\n ' self.checkout_2_branch(branch) commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50) commit_list = commit_log.split('\n') commits = [] for commit in commit_list: if (not re.search('^.+ #@_@# .+ #@_@# .*$', commit)): continue commit_dict = commit.split(' #@_@# ') from flask import current_app current_app.logger.info(commit_dict) commits.append({'id': commit_dict[0], 'name': commit_dict[1], 'message': commit_dict[2]}) return commits
7,243,843,317,773,868,000
获取分支的commits @param branch: @param kwargs: @return:
walle/service/git/repo.py
commits
lgq9220/walle-web
python
def commits(self, branch): '\n 获取分支的commits\n\n @param branch:\n @param kwargs:\n @return:\n ' self.checkout_2_branch(branch) commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50) commit_list = commit_log.split('\n') commits = [] for commit in commit_list: if (not re.search('^.+ #@_@# .+ #@_@# .*$', commit)): continue commit_dict = commit.split(' #@_@# ') from flask import current_app current_app.logger.info(commit_dict) commits.append({'id': commit_dict[0], 'name': commit_dict[1], 'message': commit_dict[2]}) return commits
def test_regular(self, posturl, s3conn, pubsub, crash_generator): 'Post a valid crash and verify the contents made it to S3.' (raw_crash, dumps) = crash_generator.generate() crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps) resp = mini_poster.post_crash(posturl, crash_payload, dumps) time.sleep(SLEEP_TIME) crash_id = content_to_crashid(resp.content) logger.debug('Crash ID is: %s', crash_id) logger.debug('S3conn: %s', s3conn.get_config()) verifier = CrashVerifier() verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn) verifier.verify_published_data(crash_id, pubsub)
99,417,856,098,789,150
Post a valid crash and verify the contents made it to S3.
tests/systemtest/test_post_crash.py
test_regular
Mozilla-GitHub-Standards/ca053cb8c97310481ca4524f115cd80002b8bbd773c6bdc00eb9955dd3d48e83
python
def test_regular(self, posturl, s3conn, pubsub, crash_generator): (raw_crash, dumps) = crash_generator.generate() crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps) resp = mini_poster.post_crash(posturl, crash_payload, dumps) time.sleep(SLEEP_TIME) crash_id = content_to_crashid(resp.content) logger.debug('Crash ID is: %s', crash_id) logger.debug('S3conn: %s', s3conn.get_config()) verifier = CrashVerifier() verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn) verifier.verify_published_data(crash_id, pubsub)
def test_compressed_crash(self, posturl, s3conn, pubsub, crash_generator): 'Post a compressed crash and verify contents made it to S3.' (raw_crash, dumps) = crash_generator.generate() crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps) resp = mini_poster.post_crash(posturl, crash_payload, compressed=True) time.sleep(SLEEP_TIME) crash_id = content_to_crashid(resp.content) logger.debug('Crash ID is: %s', crash_id) logger.debug('S3conn: %s', s3conn.get_config()) verifier = CrashVerifier() verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn) verifier.verify_published_data(crash_id, pubsub)
-54,558,160,317,365,016
Post a compressed crash and verify contents made it to S3.
tests/systemtest/test_post_crash.py
test_compressed_crash
Mozilla-GitHub-Standards/ca053cb8c97310481ca4524f115cd80002b8bbd773c6bdc00eb9955dd3d48e83
python
def test_compressed_crash(self, posturl, s3conn, pubsub, crash_generator): (raw_crash, dumps) = crash_generator.generate() crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps) resp = mini_poster.post_crash(posturl, crash_payload, compressed=True) time.sleep(SLEEP_TIME) crash_id = content_to_crashid(resp.content) logger.debug('Crash ID is: %s', crash_id) logger.debug('S3conn: %s', s3conn.get_config()) verifier = CrashVerifier() verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn) verifier.verify_published_data(crash_id, pubsub)
@app.route('/', methods=['GET']) def server_status(): '\n Get status.\n ---\n describe: get status\n responses:\n 200:\n description: OK\n ' logger.info('GET /') return ('', 200)
-8,569,340,285,813,509,000
Get status. --- describe: get status responses: 200: description: OK
dcs_rest_client.py
server_status
5GEVE/5geve-wp4-dcs-signalling-topic-handler
python
@app.route('/', methods=['GET']) def server_status(): '\n Get status.\n ---\n describe: get status\n responses:\n 200:\n description: OK\n ' logger.info('GET /') return (, 200)
@app.route('/spec', methods=['GET']) def spec(): '\n Get swagger specification.\n ---\n describe: get swagger specification\n responses:\n swagger:\n description: swagger specification\n ' swag = swagger(app) swag['info']['version'] = '1.0' swag['info']['title'] = 'DCS REST API' return jsonify(swag)
3,071,032,749,776,101,000
Get swagger specification. --- describe: get swagger specification responses: swagger: description: swagger specification
dcs_rest_client.py
spec
5GEVE/5geve-wp4-dcs-signalling-topic-handler
python
@app.route('/spec', methods=['GET']) def spec(): '\n Get swagger specification.\n ---\n describe: get swagger specification\n responses:\n swagger:\n description: swagger specification\n ' swag = swagger(app) swag['info']['version'] = '1.0' swag['info']['title'] = 'DCS REST API' return jsonify(swag)
@app.route('/portal/dcs/start_signalling/', methods=['POST']) def start_dcs(): '\n Start signalling topics.\n ---\n describe: start signalling topics\n responses:\n 201:\n description: accepted request\n 400:\n description: error processing the request\n ' logger.info('Request received - POST /portal/dcs/start_signalling/') try: start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure)) start_consuming_signalling_topic(json.dumps(signalling_metric_application)) start_consuming_signalling_topic(json.dumps(signalling_kpi)) except Exception as e: logger.error('Error while parsing request') logger.exception(e) return (str(e), 400) return ('', 201)
6,490,691,433,022,938,000
Start signalling topics. --- describe: start signalling topics responses: 201: description: accepted request 400: description: error processing the request
dcs_rest_client.py
start_dcs
5GEVE/5geve-wp4-dcs-signalling-topic-handler
python
@app.route('/portal/dcs/start_signalling/', methods=['POST']) def start_dcs(): '\n Start signalling topics.\n ---\n describe: start signalling topics\n responses:\n 201:\n description: accepted request\n 400:\n description: error processing the request\n ' logger.info('Request received - POST /portal/dcs/start_signalling/') try: start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure)) start_consuming_signalling_topic(json.dumps(signalling_metric_application)) start_consuming_signalling_topic(json.dumps(signalling_kpi)) except Exception as e: logger.error('Error while parsing request') logger.exception(e) return (str(e), 400) return (, 201)
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE']) def stop_dcs(): '\n Stop signalling topics.\n ---\n describe: stop signalling topics\n responses:\n 201:\n description: accepted request\n 400:\n description: error processing the request\n ' logger.info('Request received - DELETE /portal/dcs/stop_signalling/') try: stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure)) stop_consuming_signalling_topic(json.dumps(signalling_metric_application)) stop_consuming_signalling_topic(json.dumps(signalling_kpi)) except Exception as e: logger.error('Error while parsing request') logger.exception(e) return (str(e), 400) return ('', 201)
-1,923,893,096,943,920,000
Stop signalling topics. --- describe: stop signalling topics responses: 201: description: accepted request 400: description: error processing the request
dcs_rest_client.py
stop_dcs
5GEVE/5geve-wp4-dcs-signalling-topic-handler
python
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE']) def stop_dcs(): '\n Stop signalling topics.\n ---\n describe: stop signalling topics\n responses:\n 201:\n description: accepted request\n 400:\n description: error processing the request\n ' logger.info('Request received - DELETE /portal/dcs/stop_signalling/') try: stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure)) stop_consuming_signalling_topic(json.dumps(signalling_metric_application)) stop_consuming_signalling_topic(json.dumps(signalling_kpi)) except Exception as e: logger.error('Error while parsing request') logger.exception(e) return (str(e), 400) return (, 201)