code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
m = float(m) def f(x): if x <= m: return 0.0 return (x - m) / (1.0 - m) return f
def black(m)
Return a function that maps all values from [0.0,m] to 0, and maps the range [m,1.0] into [0.0, 1.0] linearly.
4.116157
2.574848
1.598602
# Write out PNG signature. out.write(bytearray([137, 80, 78, 71, 13, 10, 26, 10])) # Write out PNG header chunk. header = struct.pack(">2LBBBBB", size[0], size[1], 8, 0, 0, 0, 0) write_chunk(out, b"IHDR", header) bs = bytearray() for row in rows: bs.append(0) bs.extend(row) write_chunk(out, b"IDAT", zlib.compress(bs)) write_chunk(out, b"IEND", bytearray())
def rows_to_png(out, rows, size)
Write to the binary file `out` a single channel 8-bit PNG. `rows` should yield each row in turn; `size` should be the tuple of (width, height) in pixels.
2.465785
2.537322
0.971806
i = ord(i) if i not in font: return [(0,)] * 8 return [(ord(row),) for row in font[i].decode('hex')]
def char(i)
Get image data for the character `i` (a one character string). Returned as a list of rows. Each row is a tuple containing the packed pixels.
11.02781
6.969338
1.582332
lines = m.split('\n') maxlen = max(len(line) for line in lines) justified = [line.ljust(maxlen) for line in lines] rasters = [linetoraster(line) for line in justified] x, = set(r[0] for r in rasters) y = sum(r[1] for r in rasters) raster = itertools.chain(*(r[2] for r in rasters)) return x, y, raster
def texttoraster(m)
Convert the string *m* to a raster image. Any newlines in *m* will cause more than one line of output. The resulting raster will be taller. Prior to rendering each line, it is padded on the right with enough spaces to make all lines the same length.
3.233307
3.036874
1.064683
# Assumes monospaced font. x = 8 * len(m) y = 8 return x, y, [itertools.chain(*row) for row in zip(*map(char, m))]
def linetoraster(m)
Convert a single line of text *m* to a raster image, by rendering it using the font in *font*. A triple of (*width*, *height*, *pixels*) is returned; *pixels* is in boxed row packed pixel format.
10.283542
9.082083
1.132289
def desc(ascii): ascii += '\x00' n = len(ascii) return struct.pack('>L%ds2LHB67s' % n, n, ascii, 0, 0, 0, 0, '') def text(ascii): return ascii + '\x00' def curv(f=None, n=256): if f is None: return struct.pack('>L', 0) try: if float(f) == f: return struct.pack('>LH', 1, int(round(f * 2 ** 8))) except (TypeError, ValueError): pass assert n >= 2 table = [] M = float(n - 1) for i in range(n): x = i / M table.append(int(round(f(x) * 65535))) return struct.pack('>L%dH' % n, n, *table) def XYZ(*l): return struct.pack('>3l', *map(fs15f16, l)) return locals()
def encodefuns()
Returns a dictionary mapping ICC type signature sig to encoding function. Each function returns a string comprising the content of the encoded value. To form the full value, the type sig and the 4 zero bytes should be prefixed (8 bytes).
4.350791
4.257093
1.02201
fun = encodefuns() if tsig not in fun: raise "No encoder for type %r." % tsig v = fun[tsig](*l) # Padd tsig out with spaces. tsig = (tsig + ' ')[: 4] return tsig + ('\x00' * 4) + v
def encode(tsig, *l)
Encode a Python value as an ICC type. `tsig` is the type signature to (the first 4 bytes of the encoded value, see [ICC 2004] section 10.
8.015568
7.650134
1.047768
n = len(tag) tablelen = 12 * n # Build the tag table in two parts. A list of 12-byte tags, and a # string of element data. Offset is the offset from the start of # the profile to the start of the element data (so the offset for # the next element is this offset plus the length of the element # string so far). offset = 128 + tablelen + 4 # The table. As a string. table = '' # The element data element = '' for k, v in tag: table += struct.pack('>4s2L', k, offset + len(element), len(v)) element += v return struct.pack('>L', n) + table + element
def tagblock(tag)
`tag` should be a list of (*signature*, *element*) pairs, where *signature* (the key) is a length 4 string, and *element* is the content of the tag element (another string). The entire tag block (consisting of first a table and then the element data) is constructed and returned as a string.
5.668315
4.8708
1.163734
r = png.Reader(file=inp) _, chunk = r.chunk('iCCP') i = chunk.index(b'\x00') name = chunk[: i] compression = chunk[i + 1] assert compression == 0 profile = zlib.decompress(chunk[i + 2:]) return profile, name
def profileFromPNG(inp)
Extract profile from PNG file. Return (*profile*, *name*) pair.
5.353702
4.755358
1.125825
import time if t is None: t = time.gmtime() return struct.pack('>6H', *t[:6])
def writeICCdatetime(t=None)
`t` should be a gmtime tuple (as returned from ``time.gmtime()``). If not supplied, the current time will be used. Return an ICC dateTimeNumber in a 12 byte string.
4.543625
3.501036
1.297795
# Note: As long as float has at least 32 bits of mantissa, all # values are preserved. n = len(s) // 4 t = struct.unpack('>%dl' % n, s) return map((2**-16).__mul__, t)
def s15f16l(s)
Convert sequence of ICC s15Fixed16 to list of float.
8.135667
6.630032
1.227093
sig = s[0:4].strip() f = dict(text=RDtext, XYZ=RDXYZ, curv=RDcurv, vcgt=RDvcgt, sf32=RDsf32, ) if sig not in f: return None return (sig, f[sig](s))
def ICCdecode(s)
Take an ICC encoded tag, and dispatch on its type signature (first 4 bytes) to decode it into a Python value. Pair (*sig*, *value*) is returned, where *sig* is a 4 byte string, and *value* is some Python value determined by the content and type.
8.84529
7.502665
1.178953
# See [ICC 2004] 10.13 assert s[0:4] == 'mluc' n, sz = struct.unpack('>2L', s[8:16]) assert sz == 12 record = [] for i in range(n): lc, l, o = struct.unpack('4s2L', s[16 + 12 * n: 28 + 12 * n]) record.append(lc, s[o: o + l]) # How are strings encoded? return record
def RDmluc(s)
Convert ICC multiLocalizedUnicodeType. This types encodes several strings together with a language/country code for each string. A list of (*lc*, *string*) pairs is returned where *lc* is the 4 byte language/country code, and *string* is the string corresponding to that code. It seems unlikely that the same language/country code will appear more than once with different strings, but the ICC standard does not prohibit it.
6.138122
5.108233
1.201613
# See [ICC 2001] 6.5.3 assert s[0:4] == 'curv' count, = struct.unpack('>L', s[8:12]) if count == 0: return dict(gamma=1) table = struct.unpack('>%dH' % count, s[12:]) if count == 1: return dict(gamma=table[0] * 2 ** -8) return table
def RDcurv(s)
Convert ICC curveType.
4.904225
4.275649
1.147013
# See # http://developer.apple.com/documentation/GraphicsImaging/Reference/ColorSync_Manager/Reference/reference.html#//apple_ref/c/tdef/CMVideoCardGammaType assert s[0:4] == 'vcgt' tagtype, = struct.unpack('>L', s[8:12]) if tagtype != 0: return s[8:] if tagtype == 0: # Table. channels, count, size = struct.unpack('>3H', s[12:18]) if size == 1: fmt = 'B' elif size == 2: fmt = 'H' else: return s[8:] n = len(s[18:]) // size t = struct.unpack('>%d%s' % (n, fmt), s[18:]) t = group(t, count) return size, t return s[8:]
def RDvcgt(s)
Convert Apple CMVideoCardGammaType.
4.411376
3.661646
1.204752
self.d.update(dict(profileclass='scnr', colourspace='GRAY', pcs='XYZ ')) return self
def greyInput(self)
Adjust ``self.d`` dictionary for greyscale input device. ``profileclass`` is 'scnr', ``colourspace`` is 'GRAY', ``pcs`` is 'XYZ '.
79.05407
3.386621
23.343049
for tag, thing in k.items(): if not isinstance(thing, (tuple, list)): thing = (thing,) typetag = defaulttagtype[tag] self.rawtagdict[tag] = encode(typetag, *thing) return self
def _addTags(self, **k)
Helper for :meth:`addTags`.
7.087699
6.605115
1.073062
if not self.rawtagtable: self.rawtagtable = self.rawtagdict.items() tags = tagblock(self.rawtagtable) self.writeHeader(out, 128 + len(tags)) out.write(tags) out.flush() return self
def write(self, out)
Write ICC Profile to the file.
6.406216
6.013569
1.065293
def defaultkey(d, key, value): if key in d: return d[key] = value z = '\x00' * 4 defaults = dict(preferredCMM=z, version='02000000', profileclass=z, colourspace=z, pcs='XYZ ', created=writeICCdatetime(), acsp='acsp', platform=z, flag=0, manufacturer=z, model=0, deviceattributes=0, intent=0, pcsilluminant=encodefuns()['XYZ'](*D50()), creator=z, ) for k, v in defaults.items(): defaultkey(self.d, k, v) hl = map(self.d.__getitem__, ['preferredCMM', 'version', 'profileclass', 'colourspace', 'pcs', 'created', 'acsp', 'platform', 'flag', 'manufacturer', 'model', 'deviceattributes', 'intent', 'pcsilluminant', 'creator']) # Convert to struct.pack input hl[1] = int(hl[1], 16) out.write(struct.pack('>L4sL4s4s4s12s4s4sL4sLQL12s4s', size, *hl)) out.write('\x00' * 44) return self
def writeHeader(self, out, size=999)
Add default values to the instance's `d` dictionary, then write a header out onto the file stream. The size of the profile must be specified using the `size` argument.
6.122104
5.851252
1.04629
r = f.read(11) if r == 'compressed\n': png(output, *decompress(f)) else: png(output, *glue(f, r))
def convert(f, output=sys.stdout)
Convert Plan 9 file to PNG format. Works with either uncompressed or compressed files.
8.951745
8.101061
1.105009
r = r + f.read(60 - len(r)) return (r, f)
def glue(f, r)
Return (metadata, stream) pair where `r` is the initial portion of the metadata that has already been read from the stream `f`.
11.203545
8.859032
1.264647
r = r.split() # :todo: raise FormatError assert len(r) == 5 r = [r[0]] + map(int, r[1:]) return r
def meta(r)
Convert 60 character string `r`, the metadata from an image file. Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may settle into lists in transit. As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata comprises 5 words separated by blanks. As it happens each word starts at an index that is a multiple of 12, but this routine does not care about that.
8.510145
7.562849
1.125257
maxd = 0 for c in re.findall(r'[a-z]\d*', pixel): if c[0] != 'x': maxd = max(maxd, int(c[1:])) return maxd
def bitdepthof(pixel)
Return the bitdepth for a Plan9 pixel format string.
4.600577
3.845007
1.196507
chan, minx, miny, limx, limy = metadata rows = limy - miny width = limx - minx nchans = len(re.findall('[a-wyz]', chan)) alpha = 'a' in chan # Iverson's convention for the win! ncolour = nchans - alpha greyscale = ncolour == 1 bitdepth = bitdepthof(chan) maxval = 2**bitdepth - 1 # PNG style metadata meta = dict(size=(width, rows), bitdepth=bitdepthof(chan), greyscale=greyscale, alpha=alpha, planes=nchans) return itertools.imap( lambda x: itertools.chain(*x), block(unpack(f, rows, width, chan, maxval), width)), meta
def pixmeta(metadata, f)
Convert (uncompressed) Plan 9 image file to pair of (*metadata*, *pixels*). This is intended to be used by PyPNG format. *metadata* is the metadata returned in a dictionary, *pixels* is an iterator that yields each row in boxed row flat pixel format. `f`, the input file, should be cued up to the start of the image data.
8.676966
8.007051
1.083666
import png pixels, meta = pixmeta(metadata, f) p = png.Writer(**meta) p.write(out, pixels)
def png(out, metadata, f)
Convert to PNG format. `metadata` should be a Plan9 5-tuple; `f` the input file (see :meth:`pixmeta`).
8.819957
6.230258
1.415665
def mask(w): return (1 << w) - 1 def deblock(f, depth, width): w = depth // 8 i = 0 for block in f: for i in range(len(block) // w): p = block[w * i: w * (i + 1)] i += w # Convert p to little-endian integer, x x = 0 s = 1 # scale for j in p: x += s * ord(j) s <<= 8 yield x def bitfunge(f, depth, width): for block in f: col = 0 for i in block: x = ord(i) for j in range(8 / depth): yield x >> (8 - depth) col += 1 if col == width: # A row-end forces a new byte even if # we haven't consumed all of the current byte. # Effectively rows are bit-padded to make # a whole number of bytes. col = 0 break x <<= depth # number of bits in each channel chan = map(int, re.findall(r'\d+', pixel)) # type of each channel type = re.findall('[a-z]', pixel) depth = sum(chan) # According to the value of depth pick a "packer" that either gathers # multiple bytes into a single pixel (for depth >= 8) or split bytes # into several pixels (for depth < 8) if depth >= 8: assert depth % 8 == 0 packer = deblock else: assert 8 % depth == 0 packer = bitfunge for x in packer(f, depth, width): # x is the pixel as an unsigned integer o = [] # This is a bit yucky. Extract each channel from the _most_ # significant part of x. for j in range(len(chan)): v = (x >> (depth - chan[j])) & mask(chan[j]) x <<= chan[j] if type[j] != 'x': # scale to maxval v = v * float(maxval) / mask(chan[j]) v = int(v + 0.5) o.append(v) yield o
def unpack(f, rows, width, pixel, maxval)
Unpack `f` into pixels. Assumes the pixel format is such that the depth is either a multiple or a divisor of 8. `f` is assumed to be an iterator that returns blocks of input such that each block contains a whole number of pixels. An iterator is returned that yields each pixel as an n-tuple. `pixel` describes the pixel format using the Plan9 syntax ("k8", "r8g8b8", and so on).
5.017611
4.703687
1.06674
r = meta(f.read(60)) return r, decomprest(f, r[4])
def decompress(f)
Decompress a Plan 9 image file. Assumes f is already cued past the initial 'compressed\n' string.
22.633224
22.882399
0.989111
row = 0 while row < rows: row, o = deblock(f) yield o
def decomprest(f, rows)
Iterator that decompresses the rest of a file once the metadata have been consumed.
11.754745
9.10462
1.291075
row = int(f.read(12)) size = int(f.read(12)) if not (0 <= size <= 6000): raise Error('block has invalid size; not a Plan 9 image file?') # Since each block is at most 6000 bytes we may as well read it all in # one go. d = f.read(size) i = 0 o = [] while i < size: x = ord(d[i]) i += 1 if x & 0x80: x = (x & 0x7f) + 1 lit = d[i: i + x] i += x o.extend(lit) continue # x's high-order bit is 0 length = (x >> 2) + 3 # Offset is made from bottom 2 bits of x and all 8 bits of next # byte. http://plan9.bell-labs.com/magic/man2html/6/image doesn't # say whether x's 2 bits are most significant or least significant. # But it is clear from inspecting a random file, # http://plan9.bell-labs.com/sources/plan9/sys/games/lib/sokoban/images/cargo.bit # that x's 2 bits are most significant. offset = (x & 3) << 8 offset |= ord(d[i]) i += 1 # Note: complement operator neatly maps (0 to 1023) to (-1 to # -1024). Adding len(o) gives a (non-negative) offset into o from # which to start indexing. offset = ~offset + len(o) if offset < 0: raise Error('byte offset indexes off the begininning of ' 'the output buffer; not a Plan 9 image file?') for j in range(length): o.append(o[offset + j]) return row, ''.join(o)
def deblock(f)
Decompress a single block from a compressed Plan 9 image file. Each block starts with 2 decimal strings of 12 bytes each. Yields a sequence of (row, data) pairs where row is the total number of rows processed according to the file format and data is the decompressed data for a set of rows.
6.560934
5.995379
1.094332
logging.debug("Checking local and remote resolver list for update") # If the local resolver file does not exist, or it has expired if not os.path.isfile(self.listLocal) or \ os.path.getmtime(self.listLocal) < \ time.time() - self.updateListEvery: logging.info("Updating resolver list file") r = requests.get( self.listLocation, headers={ 'User-Agent': "dnsyo/{0}".format( pkg_resources.get_distribution("dnsyo").version ) } ) if r.status_code != 200: # If status code response is not 200 and we don't # already have a resolvers file, raise an exception # Otherwise keep going with the old file if not os.path.isfile(self.listLocal): # File does not exist locally, we can't continue raise EnvironmentError( "List location returned HTTP status {0} and we " "don't have a local copy of resolvers to fall " "back on. Can't continue".format( r.status_code ) ) else: # Save the file with open(self.listLocal, 'w') as lf: lf.write(r.text)
def updateList(self)
Check to see if the X{resolver list} needs updating Get the filemtime on the local list, if it's older than the hosted list download the new one
3.794347
3.418884
1.10982
logging.debug("Loading resolver file") listFileLocation = self.listLocal if not listFile else listFile # Resolve the user part of the path listLocal = os.path.expanduser(listFileLocation) # Check local file location exists and is writable assert os.path.isdir(os.path.dirname(listLocal)),\ "{0} is not a directory!".format(os.path.dirname(listLocal)) assert os.access(os.path.dirname(listLocal), os.W_OK),\ "{0} is not writable!".format(os.path.dirname(listLocal)) # Open and yaml parse the resolver list with open(listLocal) as ll: raw = ll.read() # Use safe_load, just to be safe. serverList = yaml.safe_load(raw) # Remove all but the specified countries from the server list if self.country is not None: logging.debug("Filtering serverList for country {0}" .format(self.country)) serverList = [d for d in serverList if d['country'] == self.country] if len(serverList) == 0: raise ValueError("There are no servers avaliable " "with the country code {0}" .format(self.country)) # Get selected number of servers if self.maxServers == 'ALL' or noSample: # Set servers to the number of servers we have self.maxServers = len(serverList) elif self.maxServers > len(serverList): # We were asked for more servers than exist in the list logging.warning( "You asked me to query {0} servers, but I only have " "{1} servers in my serverlist".format( self.maxServers, len(serverList) ) ) # Fallback to setting it to all self.maxServers = len(serverList) # Get a random selection of the specified number # of servers from the list self.serverList = random.sample(serverList, self.maxServers) return self.serverList
def prepareList(self, listFile=False, noSample=False)
Load and filter the server list for only the servers we care about
3.297091
3.271464
1.007833
# Ignore domain validation, if someone wants to lookup an invalid # domain let them, just ensure it's a string assert type(domain) == str, "Domain must be a string" # Ensure record type is valid, and in our list of allowed records recordType = recordType.upper() assert recordType in self.lookupRecordTypes, \ "Record type is not in valid list of record types {0}". \ format(', '.join(self.lookupRecordTypes)) self.domain = domain self.recordType = recordType self.resultsColated = [] self.results = [] if len(self.serverList) == 0: logging.warning("Server list is empty. Attempting " "to populate with prepareList") self.prepareList() logging.debug("Starting query against {0} servers".format( len(self.serverList))) workers = [] startTime = datetime.utcnow() serverCounter = 0 # Run continuously while waiting for results while len(self.results) < len(self.serverList): # Count the workers still running runningWorkers = len([w for w in workers if w.result is None]) # Get the results of any finished workers for i, w in enumerate(workers): if w.result: # Add the results and get rid of the worker from the # worker list self.results.append(w.result) workers.pop(i) # Output progress if progress: # Output progress on one line that updates if terminal # supports it sys.stdout.write( "\r\x1b[KStatus: Queried {0} of {1} servers, duration: {2}" .format(len(self.results), len(self.serverList), (datetime.utcnow() - startTime)) ) # Make sure the stdout updates sys.stdout.flush() # Start more workers if needed if runningWorkers < self.maxWorkers: logging.debug("Starting {0} workers".format( self.maxWorkers - runningWorkers)) # Start however many workers we need # based on max workers - running workers for i in range(0, self.maxWorkers - runningWorkers): if serverCounter < len(self.serverList): # Create a new thread with all the details wt = QueryWorker() wt.server = self.serverList[serverCounter] wt.domain = domain wt.recType = recordType wt.daemon = True # Add it to the worker tracker workers.append(wt) # Start it wt.start() serverCounter += 1 # Pause a little bit time.sleep(0.1) # Now colate the results # Group by number of servers with the same response for r in self.results: # Result already in collation if r['results'] in [rs['results'] for rs in self.resultsColated]: cid = [ i for i, rs in enumerate(self.resultsColated) if r['results'] == rs['results'] ][0] self.resultsColated[cid]['servers'].append(r['server']) else: self.resultsColated.append( { 'servers': [ r['server'] ], 'results': r['results'], 'success': r['success'] } ) if progress: sys.stdout.write("\n\n") logging.debug("There are {0} unique results".format( len(self.resultsColated)))
def query(self, domain, recordType, progress=True)
Run the query Query spins out multiple thread workers to query each server @param domain: Domain to query @param recordType: Type of record to query for @param progress: Write progress to stdout @type domain: str @type recordType: str
3.461426
3.430194
1.009105
successfulResponses = len( [ True for rsp in self.results if rsp['success'] ] ) sys.stdout.write(.format( num_servers=len(self.serverList), rec_type=self.recordType, domain=self.domain, success_responses=successfulResponses, error_responses=len(self.serverList) - successfulResponses )) errors = [] for rsp in self.resultsColated: out = [] if extended: out.append("The following servers\n") out.append("\n".join([ " - {0} ({1} - {2})". format(s['ip'], s['provider'], s['country']) for s in rsp['servers']])) out.append("\nresponded with;\n") else: out.append("{num_servers} servers responded with;\n".format( num_servers=len(rsp['servers'])) ) out.append( "\n".join(rsp['results']) ) out.append("\n\n") if rsp['success']: sys.stdout.write("".join(out)) else: errors.append("".join(out)) sys.stdout.write("\n\nAnd here are the errors;\n\n\n") sys.stdout.write("".join(errors))
def outputStandard(self, extended=False)
Standard, multi-line output display
3.822624
3.838137
0.995958
out = [] errors = [] successfulResponses = \ len([True for rsp in self.results if rsp['success']]) out.append("INFO QUERIED {0}".format( len(self.serverList))) out.append("INFO SUCCESS {0}".format( successfulResponses)) out.append("INFO ERROR {0}".format( len(self.serverList) - successfulResponses)) for rsp in self.resultsColated: if rsp['success']: out.append("RESULT {0} {1}".format( len(rsp['servers']), "|".join(rsp['results']) )) else: errors.append("ERROR {0} {1}".format( len(rsp['servers']), "|".join(rsp['results']) )) out += errors sys.stdout.write("\n".join(out)) sys.stdout.write("\n")
def outputSimple(self)
Simple output mode
3.424953
3.387284
1.011121
logging.debug("Querying server {0}".format(self.server['ip'])) try: # Create a DNS resolver query rsvr = dns.resolver.Resolver() rsvr.nameservers = [self.server['ip']] rsvr.lifetime = 5 rsvr.timeout = 5 qry = rsvr.query(self.domain, self.recType) # Get the results, sort for consistancy results = sorted([r.to_text() for r in qry]) success = True # Handle all the various exceptions except dns.resolver.NXDOMAIN: success = False results = ['NXDOMAIN'] except dns.resolver.NoNameservers: success = False results = ['No Nameservers'] except dns.resolver.NoAnswer: success = False results = ['No Answer'] except dns.resolver.Timeout: success = False results = ['Server Timeout'] # Save the results self.result = { 'server': self.server, 'results': results, 'success': success }
def run(self)
Do a single DNS query against a server
3.118581
2.854744
1.09242
uid_bytes = hashlib.sha256(aws_key_id.encode()).digest()[-2:] if USING_PYTHON2: return 2000 + int(from_bytes(uid_bytes) // 2) else: return 2000 + (int.from_bytes(uid_bytes, byteorder=sys.byteorder) // 2)
def aws_to_unix_id(aws_key_id)
Converts a AWS Key ID into a UID
3.712059
3.56552
1.041099
assert isinstance(cls, GenericMeta) if hasattr(cls, '_gorg'): return cls._gorg while cls.__origin__ is not None: cls = cls.__origin__ return cls
def _gorg(cls)
This function exists for compatibility with old typing versions.
3.552909
3.209303
1.107066
if NEW_TYPING: return (tp is Callable or isinstance(tp, _GenericAlias) and tp.__origin__ is collections.abc.Callable or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, collections.abc.Callable)) return type(tp) is CallableMeta
def is_callable_type(tp)
Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7
4.350695
5.102857
0.8526
if NEW_TYPING: return (tp is Tuple or isinstance(tp, _GenericAlias) and tp.__origin__ is tuple or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, tuple)) return type(tp) is TupleMeta
def is_tuple_type(tp)
Test if the type is a generic tuple type, including subclasses excluding non-generic classes. Examples:: is_tuple_type(int) == False is_tuple_type(tuple) == False is_tuple_type(Tuple) == True is_tuple_type(Tuple[str, int]) == True class MyClass(Tuple[str, int]): ... is_tuple_type(MyClass) == True For more general tests use issubclass(..., tuple), for more precise test (excluding subclasses) use:: get_origin(tp) is tuple # Tuple prior to Python 3.7
4.635834
5.501913
0.842586
if tp is type(None): return True elif is_union_type(tp): return any(is_optional_type(tt) for tt in get_args(tp, evaluate=True)) else: return False
def is_optional_type(tp)
Returns `True` if the type is `type(None)`, or is a direct `Union` to `type(None)`, such as `Optional[T]`. NOTE: this method inspects nested `Union` arguments but not `TypeVar` definitions (`bound`/`constraint`). So it will return `False` if - `tp` is a `TypeVar` bound, or constrained to, an optional type - `tp` is a `Union` to a `TypeVar` bound or constrained to an optional type, - `tp` refers to a *nested* `Union` containing an optional type or one of the above. Users wishing to check for optionality in types relying on type variables might wish to use this method in combination with `get_constraints` and `get_bound`
2.882746
2.828287
1.019255
if NEW_TYPING: return (tp is Union or isinstance(tp, _GenericAlias) and tp.__origin__ is Union) return type(tp) is _Union
def is_union_type(tp)
Test if the type is a union type. Examples:: is_union_type(int) == False is_union_type(Union) == True is_union_type(Union[int, int]) == False is_union_type(Union[T, int]) == True
4.924994
5.802092
0.848831
if NEW_TYPING: raise ValueError('This function is only supported in Python 3.6,' ' use get_origin instead') sentinel = object() origin = getattr(tp, '__origin__', sentinel) if origin is sentinel: return None if origin is None: return tp return origin
def get_last_origin(tp)
Get the last base of (multiply) subscripted type. Supports generic types, Union, Callable, and Tuple. Returns None for unsupported types. Examples:: get_last_origin(int) == None get_last_origin(ClassVar[int]) == None get_last_origin(Generic[T]) == Generic get_last_origin(Union[T, int][str]) == Union[T, int] get_last_origin(List[Tuple[T, T]][int]) == List[Tuple[T, T]] get_last_origin(List) == List
4.708054
5.37197
0.876411
if NEW_TYPING: if isinstance(tp, _GenericAlias): return tp.__origin__ if tp.__origin__ is not ClassVar else None if tp is Generic: return Generic return None if isinstance(tp, GenericMeta): return _gorg(tp) if is_union_type(tp): return Union return None
def get_origin(tp)
Get the unsubscripted version of a type. Supports generic types, Union, Callable, and Tuple. Returns None for unsupported types. Examples:: get_origin(int) == None get_origin(ClassVar[int]) == None get_origin(Generic) == Generic get_origin(Generic[T]) == Generic get_origin(Union[T, int]) == Union get_origin(List[Tuple[T, T]][int]) == list # List prior to Python 3.7
4.283907
4.542796
0.943011
if NEW_TYPING: if (isinstance(tp, _GenericAlias) or isinstance(tp, type) and issubclass(tp, Generic) and tp is not Generic): return tp.__parameters__ return () if ( is_generic_type(tp) or is_union_type(tp) or is_callable_type(tp) or is_tuple_type(tp) ): return tp.__parameters__ if tp.__parameters__ is not None else () return ()
def get_parameters(tp)
Return type parameters of a parameterizable type as a tuple in lexicographic order. Parameterizable types are generic types, unions, tuple types and callable types. Examples:: get_parameters(int) == () get_parameters(Generic) == () get_parameters(Union) == () get_parameters(List[int]) == () get_parameters(Generic[T]) == (T,) get_parameters(Tuple[List[T], List[S_co]]) == (T, S_co) get_parameters(Union[S_co, Tuple[T, T]][int, U]) == (U,) get_parameters(Mapping[T, Tuple[S_co, T]]) == (T, S_co)
3.321513
3.113678
1.066749
if NEW_TYPING: raise ValueError('This function is only supported in Python 3.6,' ' use get_args instead') if is_classvar(tp): return (tp.__type__,) if tp.__type__ is not None else () if ( is_generic_type(tp) or is_union_type(tp) or is_callable_type(tp) or is_tuple_type(tp) ): return tp.__args__ if tp.__args__ is not None else () return ()
def get_last_args(tp)
Get last arguments of (multiply) subscripted type. Parameters for Callable are flattened. Examples:: get_last_args(int) == () get_last_args(Union) == () get_last_args(ClassVar[int]) == (int,) get_last_args(Union[T, int]) == (T, int) get_last_args(Iterable[Tuple[T, S]][int, T]) == (int, T) get_last_args(Callable[[T], int]) == (T, int) get_last_args(Callable[[], int]) == (int,)
3.559403
3.522926
1.010354
res = [] for arg in args: if not isinstance(arg, tuple): res.append(arg) elif is_callable_type(arg[0]): callable_args = _eval_args(arg[1:]) if len(arg) == 2: res.append(Callable[[], callable_args[0]]) elif arg[1] is Ellipsis: res.append(Callable[..., callable_args[1]]) else: res.append(Callable[list(callable_args[:-1]), callable_args[-1]]) else: res.append(type(arg[0]).__getitem__(arg[0], _eval_args(arg[1:]))) return tuple(res)
def _eval_args(args)
Internal helper for get_args.
2.474101
2.40691
1.027916
if NEW_TYPING: if evaluate is not None and not evaluate: raise ValueError('evaluate can only be True in Python 3.7') if isinstance(tp, _GenericAlias): res = tp.__args__ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: res = (list(res[:-1]), res[-1]) return res return () if is_classvar(tp): return (tp.__type__,) if ( is_generic_type(tp) or is_union_type(tp) or is_callable_type(tp) or is_tuple_type(tp) ): tree = tp._subs_tree() if isinstance(tree, tuple) and len(tree) > 1: if not evaluate: return tree[1:] res = _eval_args(tree[1:]) if get_origin(tp) is Callable and res[0] is not Ellipsis: res = (list(res[:-1]), res[-1]) return res return ()
def get_args(tp, evaluate=None)
Get type arguments with all substitutions performed. For unions, basic simplifications used by Union constructor are performed. On versions prior to 3.7 if `evaluate` is False (default), report result as nested tuple, this matches the internal representation of types. If `evaluate` is True (or if Python version is 3.7 or greater), then all type parameters are applied (this could be time and memory expensive). Examples:: get_args(int) == () get_args(Union[int, Union[T, int], str][int]) == (int, str) get_args(Union[int, Tuple[T, int]][str]) == (int, (Tuple, str, int)) get_args(Union[int, Tuple[T, int]][str], evaluate=True) == \ (int, Tuple[str, int]) get_args(Dict[int, Tuple[T, T]][Optional[int]], evaluate=True) == \ (int, Tuple[Optional[int], Optional[int]]) get_args(Callable[[], T][int], evaluate=True) == ([], int,)
2.955104
2.975454
0.993161
display(HTML( % (self.igv_id))) # DON'T check status before showing browser, msg = json.dumps({ "id": self.igv_id, "command": "create", "options": self.config }) self.comm.send(msg)
def show(self)
Create an igv.js "Browser" instance on the front end.
12.931044
9.176392
1.409164
# Check for minimal igv.js requirements (the only required field for all tracks is url, which must be a string) if isinstance(track, dict) == False: raise Exception("track parameter must be a dictionary") return self._send({ "id": self.igv_id, "command": "loadTrack", "track": track })
def load_track(self, track)
Load a track. Corresponds to the igv.js Browser function loadTrack (see https://github.com/igvteam/igv.js/wiki/Browser-Control-2.0#loadtrack). :param track: A dictionary specifying track options. See https://github.com/igvteam/igv.js/wiki/Tracks-2.0. :type dict
7.371088
6.613331
1.11458
self.eventHandlers[eventName] = cb return self._send({ "id": self.igv_id, "command": "on", "eventName": eventName })
def on(self, eventName, cb)
Subscribe to an igv.js event. :param Name of the event. Currently only "locuschange" is supported. :type str :param cb - callback function taking a single argument. For the locuschange event this argument will contain a dictionary of the form {chr, start, end} :type function
5.393764
5.682732
0.94915
return log_(request, request_logger, logging.INFO, trim=trim_log_values, **kwargs)
def log_request(request: str, trim_log_values: bool = False, **kwargs: Any) -> None
Log a request
9.33029
6.075604
1.535697
return log_(response, response_logger, logging.INFO, trim=trim_log_values, **kwargs)
def log_response(response: str, trim_log_values: bool = False, **kwargs: Any) -> None
Log a response
10.148421
7.142371
1.420876
jsonschema_validate(request, schema) return request
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
6.461756
21.833529
0.295956
return validate_args(method, *args, **kwargs)(*args, **kwargs)
def call(method: Method, *args: Any, **kwargs: Any) -> Any
Validates arguments and then calls the method. Args: method: The method to call. *args, **kwargs: Arguments to the method. Returns: The "result" part of the JSON-RPC response (the return value from the method). Raises: TypeError: If arguments don't match function signature.
8.37988
11.594443
0.72275
with handle_exceptions(request, debug) as handler: result = call(methods.items[request.method], *request.args, **request.kwargs) handler.response = SuccessResponse(result=result, id=request.id) return handler.response
def safe_call(request: Request, methods: Methods, *, debug: bool) -> Response
Call a Request, catching exceptions to ensure we always return a Response. Args: request: The Request object. methods: The list of methods that can be called. debug: Include more information in error responses. Returns: A Response object.
5.085986
6.717084
0.757172
if isinstance(requests, collections.Iterable): return BatchResponse(safe_call(r, methods, debug=debug) for r in requests) return safe_call(requests, methods, debug=debug)
def call_requests( requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool ) -> Response
Takes a request or list of Requests and calls them. Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. debug: Include more information in error responses.
3.683187
4.180691
0.881
if isinstance(requests, list): return { Request(context=context, convert_camel_case=convert_camel_case, **request) for request in requests } return Request(context=context, convert_camel_case=convert_camel_case, **requests)
def create_requests( requests: Union[Dict, List], *, context: Any = NOCONTEXT, convert_camel_case: bool ) -> Union[Request, Set[Request]]
Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them.
1.873921
2.098499
0.892982
try: deserialized = validate(deserialize(request), schema) except JSONDecodeError as exc: return InvalidJSONResponse(data=str(exc), debug=debug) except ValidationError as exc: return InvalidJSONRPCResponse(data=None, debug=debug) return call_requests( create_requests( deserialized, context=context, convert_camel_case=convert_camel_case ), methods, debug=debug, )
def dispatch_pure( request: str, methods: Methods, *, context: Any, convert_camel_case: bool, debug: bool, ) -> Response
Pure version of dispatch - no logging, no optional parameters. Does two things: 1. Deserializes and validates the string. 2. Calls each request. Args: request: The incoming request string. methods: Collection of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. debug: Include more information in error responses. Returns: A Response.
3.916517
3.773333
1.037946
logging.info(" * Listening on port %s", port) httpd = HTTPServer((name, port), RequestHandler) httpd.serve_forever()
def serve(name: str = "", port: int = 5000) -> None
A basic way to serve the methods. Args: name: Server address. port: Server port.
4.537025
4.1164
1.102183
string = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", string).lower()
def convert_camel_case_string(name: str) -> str
Convert camel case string to snake case
1.770274
1.496715
1.182773
new_dict = dict() for key, val in original_dict.items(): if isinstance(val, dict): # Recurse new_dict[convert_camel_case_string(key)] = convert_camel_case_keys(val) else: new_dict[convert_camel_case_string(key)] = val return new_dict
def convert_camel_case_keys(original_dict: Dict[str, Any]) -> Dict[str, Any]
Converts all keys of a dict from camel case to snake case, recursively
1.819908
1.798229
1.012056
positionals, nameds = [], {} # type: list, dict if params is not NOPARAMS: assert isinstance(params, (list, dict)) if isinstance(params, list): positionals, nameds = (params, {}) elif isinstance(params, dict): positionals, nameds = ([], params) # If context data was passed, include it as the first positional argument. if context is not NOCONTEXT: positionals = [context] + positionals return (positionals, nameds)
def get_arguments( params: Union[List, Dict, object] = NOPARAMS, context: Any = NOCONTEXT ) -> Tuple[List, Dict]
Get the positional and keyword arguments from a request. Takes the 'params' part of a JSON-RPC request and converts it to either positional or named arguments usable in a Python function call. Note that a JSON-RPC request can only have positional _or_ named arguments, but not both. See http://www.jsonrpc.org/specification#parameter_structures Args: params: The 'params' part of the JSON-RPC request (should be a list or dict). The 'params' value can be a JSON array (Python list), object (Python dict), or None. context: Optionally include some context data, which will be included as the first positional arguments passed to the method. Returns: A two-tuple containing the positional (in a list, or None) and named (in a dict, or None) arguments, extracted from the 'params' part of the request.
2.763812
2.453794
1.126342
if extra is None: extra = {} # Clean up the message for logging if message: message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{") if trim: message = _trim_message(message) # Log. logger.log(level, message, extra=extra)
def log_( message: str, logger: logging.Logger, level: int = logging.INFO, extra: Optional[Dict] = None, trim: bool = False, ) -> None
Log a request or response Args: message: JSON-RPC request or response string. logger: level: Log level. extra: More details to include in the log entry. trim: Abbreviate log messages.
3.809249
3.562698
1.069204
signature(func).bind(*args, **kwargs) return func
def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method
Check if the request's arguments match a function's signature. Raises TypeError exception if arguments cannot be passed to a function. Args: func: The function to check. args: Positional arguments. kwargs: Keyword arguments. Raises: TypeError: If the arguments cannot be passed to the function.
7.465437
16.398525
0.455251
self.items = { **self.items, # Methods passed as positional args need a __name__ attribute, raises # AttributeError otherwise. **{m.__name__: validate(m) for m in args}, **{k: validate(v) for k, v in kwargs.items()}, } if len(args): return args[0] # for the decorator to work return None
def add(self, *args: Any, **kwargs: Any) -> Optional[Callable]
Register a function to the list. Args: *args: Set/Sequence of positional arguments. **kwargs: Mapping of named arguments. Raises: AttributeError: Raised if the method being added has no name. (i.e. it has no `__name__` property, and no `name` argument was given.) Examples: methods = Methods() @methods.add def subtract(minuend, subtrahend): return minuend - subtrahend
6.221488
6.218603
1.000464
base_link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" if (sys.version_info[0] < 3): to_translate = urllib.quote_plus(to_translate) link = base_link % (to_language, from_language, to_translate) request = urllib2.Request(link, headers=agent) raw_data = urllib2.urlopen(request).read() else: to_translate = urllib.parse.quote(to_translate) link = base_link % (to_language, from_language, to_translate) request = urllib.request.Request(link, headers=agent) raw_data = urllib.request.urlopen(request).read() data = raw_data.decode("utf-8") expr = r'class="t0">(.*?)<' re_result = re.findall(expr, data) if (len(re_result) == 0): result = "" else: result = unescape(re_result[0]) return (result)
def translate(to_translate, to_language="auto", from_language="auto")
Returns the translation using google translate you must shortcut the language you define (French = fr, English = en, Spanish = es, etc...) if not defined it will detect it or use english by default Example: print(translate("salut tu vas bien?", "en")) hello you alright?
1.87913
1.985831
0.946269
for i in ['gen-sample']: raw_cli_arguments[i.replace('-', '_')] = raw_cli_arguments[i] raw_cli_arguments.pop(i) return raw_cli_arguments
def fix_hyphen_commands(raw_cli_arguments)
Update options to match their module names with underscores.
3.572271
3.166708
1.128071
if os.environ.get('DEBUG'): logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # botocore info is spammy logging.getLogger('botocore').setLevel(logging.ERROR) cli_arguments = fix_hyphen_commands(docopt(__doc__, version=version)) # at least one of these must be enabled, i.e. the value is 'True'... but unfortunately # `docopts` doesn't give you the hierarchy... so given 'gen-sample cfn', there are # TWO enabled items in the list, 'gen-sample' and 'cfn' possible_commands = [command for command, enabled in cli_arguments.items() if enabled] command_class = find_command_class(possible_commands) if command_class: command_class(cli_arguments).execute() else: LOGGER.error("class not found for command '%s'", possible_commands)
def main()
Provide main CLI entrypoint.
5.787526
5.612707
1.031147
file_hash = hashlib.md5() for fname in sorted(files): fileobj = os.path.join(root, fname) file_hash.update((fname + "\0").encode()) with open(fileobj, "rb") as filedes: for chunk in iter(lambda: filedes.read(4096), ""): # noqa pylint: disable=cell-var-from-loop if not chunk: break file_hash.update(chunk) file_hash.update("\0".encode()) return file_hash.hexdigest()
def calculate_hash_of_files(files, root)
Return a hash of all of the given files at the given root. Adapted from stacker.hooks.aws_lambda; used according to its license: https://github.com/cloudtools/stacker/blob/1.4.0/LICENSE Args: files (list[str]): file names to include in the hash calculation, relative to ``root``. root (str): base directory to analyze files in. Returns: str: A hash of the hashes of the given files.
2.489759
2.648467
0.940075
if not directories: directories = [{'path': './'}] files_to_hash = [] for i in directories: ignorer = get_ignorer(os.path.join(root_path, i['path']), i.get('exclusions')) with change_dir(root_path): for root, dirs, files in os.walk(i['path'], topdown=True): if (root != './') and ignorer.is_ignored(root, True): dirs[:] = [] files[:] = [] else: for filename in files: filepath = os.path.join(root, filename) if not ignorer.is_ignored(filepath): files_to_hash.append( filepath[2:] if filepath.startswith('./') else filepath # noqa ) return calculate_hash_of_files(files_to_hash, root_path)
def get_hash_of_files(root_path, directories=None)
Generate md5 hash of files.
3.015702
2.924147
1.03131
ignorefile = zgitignore.ZgitIgnore() gitignore_file = os.path.join(path, '.gitignore') if os.path.isfile(gitignore_file): with open(gitignore_file, 'r') as fileobj: ignorefile.add_patterns(fileobj.read().splitlines()) if additional_exclusions is not None: ignorefile.add_patterns(additional_exclusions) return ignorefile
def get_ignorer(path, additional_exclusions=None)
Create ignorer with directory gitignore file.
2.354249
2.155128
1.092394
version_dir = os.path.join(versions_dir, version) if arch is None: arch = ( os.environ.get('TFENV_ARCH') if os.environ.get('TFENV_ARCH') else 'amd64') if tf_platform: tfver_os = tf_platform + '_' + arch else: if platform.system().startswith('Darwin'): tfver_os = "darwin_%s" % arch elif platform.system().startswith('MINGW64') or ( platform.system().startswith('MSYS_NT') or ( platform.system().startswith('CYGWIN_NT'))): tfver_os = "windows_%s" % arch else: tfver_os = "linux_%s" % arch download_dir = tempfile.mkdtemp() filename = "terraform_%s_%s.zip" % (version, tfver_os) shasums_name = "terraform_%s_SHA256SUMS" % version tf_url = "https://releases.hashicorp.com/terraform/" + version for i in [filename, shasums_name]: urlretrieve(tf_url + '/' + i, os.path.join(download_dir, i)) tf_hash = get_hash_for_filename(filename, os.path.join(download_dir, shasums_name)) if tf_hash != sha256sum(os.path.join(download_dir, filename)): LOGGER.error("Downloaded Terraform %s does not match sha256 %s", filename, tf_hash) sys.exit(1) tf_zipfile = zipfile.ZipFile(os.path.join(download_dir, filename)) os.mkdir(version_dir) tf_zipfile.extractall(version_dir) tf_zipfile.close() shutil.rmtree(download_dir) os.chmod( # ensure it is executable os.path.join(version_dir, 'terraform' + command_suffix), os.stat(os.path.join(version_dir, 'terraform' + command_suffix)).st_mode | 0o0111 )
def download_tf_release(version, versions_dir, command_suffix, tf_platform=None, arch=None)
Download Terraform archive and return path to it.
2.192123
2.15605
1.016731
tf_releases = json.loads( requests.get('https://releases.hashicorp.com/index.json').text )['terraform'] tf_versions = sorted([k # descending for k, _v in tf_releases['versions'].items()], key=LooseVersion, reverse=True) if include_prerelease: return tf_versions return [i for i in tf_versions if '-' not in i]
def get_available_tf_versions(include_prerelease=False)
Return available Terraform versions.
3.955046
3.701866
1.068393
found_min_required = '' for filename in glob.glob(os.path.join(path, '*.tf')): with open(filename, 'r') as stream: tf_config = hcl.load(stream) if tf_config.get('terraform', {}).get('required_version'): found_min_required = tf_config.get('terraform', {}).get('required_version') break if found_min_required: if re.match(r'^!=.+', found_min_required): LOGGER.error('Min required Terraform version is a negation (%s) ' '- unable to determine required version', found_min_required) sys.exit(1) else: found_min_required = re.search(r'[0-9]*\.[0-9]*(?:\.[0-9]*)?', found_min_required).group(0) LOGGER.debug("Detected minimum terraform version is %s", found_min_required) return found_min_required LOGGER.error('Terraform version specified as min-required, but unable to ' 'find a specified version requirement in this module\'s tf ' 'files') sys.exit(1)
def find_min_required(path)
Inspect terraform files and find minimum version.
3.094336
2.922489
1.058801
tf_version_path = os.path.join(path, TF_VERSION_FILENAME) if not os.path.isfile(tf_version_path): LOGGER.error("Terraform install attempted and no %s file present to " "dictate the version. Please create it (e.g. write " "\"0.11.13\" (without quotes) to the file and try again", TF_VERSION_FILENAME) sys.exit(1) with open(tf_version_path, 'r') as stream: ver = stream.read().rstrip() return ver
def get_version_requested(path)
Return string listing requested Terraform version.
4.583781
4.201797
1.09091
versions_dir = os.path.join(tfenv_path, 'versions') if not os.path.isdir(tfenv_path): os.mkdir(tfenv_path) if not os.path.isdir(versions_dir): os.mkdir(versions_dir) return versions_dir
def ensure_versions_dir_exists(tfenv_path)
Ensure versions directory is available.
1.611488
1.622193
0.993401
command_suffix = '.exe' if platform.system() == 'Windows' else '' versions_dir = ensure_versions_dir_exists(self.tfenv_dir) if not version_requested: version_requested = get_version_requested(self.path) if re.match(r'^min-required$', version_requested): LOGGER.debug('tfenv: detecting minimal required version') version_requested = find_min_required(self.path) if re.match(r'^latest:.*$', version_requested): regex = re.search(r'latest:(.*)', version_requested).group(1) include_prerelease_versions = False elif re.match(r'^latest$', version_requested): regex = r'^[0-9]+\.[0-9]+\.[0-9]+$' include_prerelease_versions = False else: regex = "^%s$" % version_requested include_prerelease_versions = True # Return early (i.e before reaching out to the internet) if the # matching version is already installed if os.path.isdir(os.path.join(versions_dir, version_requested)): LOGGER.info("Terraform version %s already installed; using " "it...", version_requested) return os.path.join(versions_dir, version_requested, 'terraform') + command_suffix try: version = next(i for i in get_available_tf_versions( include_prerelease_versions) if re.match(regex, i)) except StopIteration: LOGGER.error("Unable to find a Terraform version matching regex: %s", regex) sys.exit(1) # Now that a version has been selected, skip downloading if it's # already been downloaded if os.path.isdir(os.path.join(versions_dir, version)): LOGGER.info("Terraform version %s already installed; using it...", version) return os.path.join(versions_dir, version, 'terraform') + command_suffix LOGGER.info("Downloading and using Terraform version %s ...", version) download_tf_release(version, versions_dir, command_suffix) LOGGER.info("Downloaded Terraform %s successfully", version) return os.path.join(versions_dir, version, 'terraform') + command_suffix
def install(self, version_requested=None)
Ensure terraform is available.
2.876222
2.770054
1.038327
raise ValueError('Must specify `parameter_name` for delete_param ' 'hook.') session = get_session(provider.region) ssm_client = session.client('ssm') try: ssm_client.delete_parameter(Name=parameter_name) except ssm_client.exceptions.ParameterNotFound: LOGGER.info("%s parameter appears to have already been deleted...", parameter_name) return True
def delete_param(context, provider, **kwargs): # noqa pylint: disable=unused-argument parameter_name = kwargs.get('parameter_name') if not parameter_name
Delete SSM parameter.
4.201885
3.623046
1.159766
for i in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']: if i in self.env_vars: self.env_vars['OLD_' + i] = self.env_vars[i]
def save_existing_iam_env_vars(self)
Backup IAM environment variables for later restoration.
2.613357
2.271559
1.150468
for i in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']: if 'OLD_' + i in self.env_vars: self.env_vars[i] = self.env_vars['OLD_' + i] elif i in self.env_vars: self.env_vars.pop(i)
def restore_existing_iam_env_vars(self)
Restore backed up IAM environment variables.
2.213846
2.077111
1.065829
def hello(event, context): # pylint: disable=unused-argument body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event } response = { "statusCode": 200, "body": json.dumps(body) } return response
Return Serverless Hello World.
null
null
null
if 'DEPLOY_ENVIRONMENT' in os.environ: return os.environ['DEPLOY_ENVIRONMENT'] if ignore_git_branch: LOGGER.info('Skipping environment lookup from current git branch ' '("ignore_git_branch" is set to true in the runway ' 'config)') else: # These are not located with the top imports because they throw an # error if git isn't installed from git import Repo as GitRepo from git.exc import InvalidGitRepositoryError try: b_name = GitRepo( path, search_parent_directories=True ).active_branch.name LOGGER.info('Deriving environment name from git branch %s...', b_name) return get_env_from_branch(b_name) except InvalidGitRepositoryError: pass LOGGER.info('Deriving environment name from directory %s...', path) return get_env_from_directory(os.path.basename(path))
def get_env(path, ignore_git_branch=False)
Determine environment name.
3.779131
3.668294
1.030215
repo_dirs = next(os.walk(self.env_root))[1] if '.git' in repo_dirs: repo_dirs.remove('.git') # not relevant for any repo operations return repo_dirs
def get_env_dirs(self)
Return list of directories in env_root.
5.389962
4.466693
1.206701
yaml_files = glob.glob( os.path.join(self.env_root, '*.yaml') ) yml_files = glob.glob( os.path.join(self.env_root, '*.yml') ) return yaml_files + yml_files
def get_yaml_files_at_env_root(self)
Return list of yaml files in env_root.
2.049602
1.934541
1.059477
if base_dir is None: base_dir = self.env_root cookbook_dirs = [] dirs_to_skip = set(['.git']) for root, dirs, files in os.walk(base_dir): # pylint: disable=W0612 dirs[:] = [d for d in dirs if d not in dirs_to_skip] for name in files: if name == 'metadata.rb': if 'cookbook' in os.path.basename(os.path.dirname(root)): cookbook_dirs.append(root) return cookbook_dirs
def get_cookbook_dirs(self, base_dir=None)
Find cookbook directories.
2.345938
2.281861
1.028081
pathlistdir = os.listdir(path) if pathlistdir == []: return True if any(os.path.isfile(os.path.join(path, i)) for i in pathlistdir): return False return all(self.path_only_contains_dirs(os.path.join(path, i)) for i in pathlistdir)
def path_only_contains_dirs(self, path)
Return boolean on whether a path only contains directories.
2.105368
2.006988
1.049019
empty_dirs = [] for i in os.listdir(path): child_path = os.path.join(path, i) if i == '.git' or os.path.isfile(child_path) or os.path.islink(child_path): # noqa continue if self.path_only_contains_dirs(child_path): empty_dirs.append(i) return empty_dirs
def get_empty_dirs(self, path)
Return a list of empty directories in path.
2.48313
2.3685
1.048398
if not os.path.isfile(self.runway_config_path): LOGGER.error("Runway config file was not found (looking for " "%s)", self.runway_config_path) sys.exit(1) with open(self.runway_config_path) as data_file: return yaml.safe_load(data_file)
def parse_runway_config(self)
Read and parse runway.yml.
2.715339
2.507068
1.083074
if not self._runway_config: self._runway_config = self.parse_runway_config() return self._runway_config
def runway_config(self)
Return parsed runway.yml.
2.788491
2.280694
1.22265
session = get_session(provider.region) if kwargs.get('bucket_name'): bucket_name = kwargs['bucket_name'] else: if kwargs.get('bucket_output_lookup'): value = kwargs['bucket_output_lookup'] handler = OutputLookup.handle elif kwargs.get('bucket_rxref_lookup'): value = kwargs['bucket_rxref_lookup'] handler = RxrefLookup.handle elif kwargs.get('bucket_xref_lookup'): value = kwargs['bucket_xref_lookup'] handler = XrefLookup.handle else: LOGGER.fatal('No bucket name/source provided.') return False try: # Exit early if the bucket's stack is already deleted session.client('cloudformation').describe_stacks( StackName=context.get_fqn(value.split('::')[0]) ) except ClientError as exc: if 'does not exist' in exc.response['Error']['Message']: LOGGER.info('S3 bucket stack appears to have already been ' 'deleted...') return True raise bucket_name = handler( value, provider=provider, context=context ) s3_resource = session.resource('s3') try: s3_resource.meta.client.head_bucket(Bucket=bucket_name) except ClientError as exc: if exc.response['Error']['Code'] == '404': LOGGER.info("%s S3 bucket appears to have already been deleted...", bucket_name) return True raise bucket = s3_resource.Bucket(bucket_name) bucket.object_versions.delete() return True
def purge_bucket(context, provider, **kwargs)
Delete objects in bucket.
2.683076
2.697295
0.994728
# Disable other runway logging so the only response is the env name logging.getLogger('runway').setLevel(logging.ERROR) # This may be invoked from a module directory in an environment; # account for that here if necessary if not os.path.isfile('runway.yml'): self.env_root = os.path.dirname(os.getcwd()) self.runway_config_path = os.path.join(self.env_root, 'runway.yml') print(get_env( self.env_root, self.runway_config.get('ignore_git_branch', False) ))
def execute(self)
Output environment name.
6.458304
5.862652
1.101601
template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined')) ) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition( AttributeName='LockID', AttributeType='S' ) ], KeySchema=[ dynamodb.KeySchema( AttributeName='LockID', KeyType='HASH' ) ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2 ), TableName=If( 'TableNameOmitted', NoValue, variables['TableName'].ref ) ) ) template.add_output(Output( '%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref() )) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If( 'BucketNameOmitted', NoValue, variables['BucketName'].ref ), LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status='Enabled' ) ] ), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled' ) ) ) template.add_output(Output( '%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref() )) template.add_output(Output( '%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn') )) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')] ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [terraformstatebucket.get_att('Arn'), '/*']) ] ), Statement( Action=[awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')] ) ] ) ) ) template.add_output( Output( 'PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref() ) )
def create_template(self)
Create template (main function called by Stacker).
1.972764
1.86692
1.056695
environment = subprocess.check_output(['pipenv', 'run', 'runway', 'whichenv']).decode().strip() environment_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'src', 'environments', 'environment.prod.ts' if environment == 'prod' else 'environment.ts' ) cloudformation = boto3.resource('cloudformation') stack = cloudformation.Stack(STACK_PREFIX + environment) endpoint = [i['OutputValue'] for i in stack.outputs if i['OutputKey'] == 'ServiceEndpoint'][0] with open(environment_file, 'r') as stream: content = stream.read() content = re.sub(r'api_url: \'.*\'$', "api_url: '%s/api'" % endpoint, content, flags=re.M) with open(environment_file, 'w') as stream: stream.write(content)
def update_api_endpoint()
Update app environment file with backend endpoint.
2.979496
2.901725
1.026802
prevdir = os.getcwd() os.chdir(os.path.expanduser(newdir)) try: yield finally: os.chdir(prevdir)
def change_dir(newdir)
Change directory. Adapted from http://stackoverflow.com/a/24176022
2.029672
2.005239
1.012185
if platform.system() != 'Windows' and ( not stat.S_IXUSR & os.stat(path)[stat.ST_MODE]): print("Error: File %s is not executable" % path) sys.exit(1)
def ensure_file_is_executable(path)
Exit if file is not executable.
3.153475
3.001214
1.050733
if deep_merge: if isinstance(dict1, list) and isinstance(dict2, list): return dict1 + dict2 if not isinstance(dict1, dict) or not isinstance(dict2, dict): return dict2 for key in dict2: dict1[key] = merge_dicts(dict1[key], dict2[key]) if key in dict1 else dict2[key] # noqa pylint: disable=line-too-long return dict1 dict3 = dict1.copy() dict3.update(dict2) return dict3
def merge_dicts(dict1, dict2, deep_merge=True)
Merge dict2 into dict1.
2.00666
1.946797
1.030749
boto_args = {} for i in ['aws_access_key_id', 'aws_secret_access_key', 'aws_session_token']: if env_vars.get(i.upper()): boto_args[i] = env_vars[i.upper()] return boto_args
def extract_boto_args_from_env(env_vars)
Return boto3 client args dict with environment creds.
1.98433
1.88939
1.050249
for (key, val) in env_dict.items(): # Lists are presumed to be path components and will be turned back # to strings if isinstance(val, list): env_dict[key] = os.path.join(env_root, os.path.join(*val)) if (env_root and not os.path.isabs(os.path.join(*val))) else os.path.join(*val) # noqa pylint: disable=line-too-long return env_dict
def flatten_path_lists(env_dict, env_root=None)
Join paths in environment dict down to strings.
3.221972
3.068078
1.05016
# If the provided dictionary is just a single "level" (no nested # environments), it applies to all environments if all(isinstance(val, (six.string_types, list)) for (_key, val) in env_dicts.items()): return flatten_path_lists(env_dicts, env_root) if env_name is None: if env_dicts.get('*'): return flatten_path_lists(env_dicts.get('*'), env_root) raise AttributeError("Provided config key:val pairs %s aren't usable with no environment provided" % env_dicts) # noqa pylint: disable=line-too-long if not env_dicts.get('*') and not env_dicts.get(env_name): raise AttributeError("Provided config key:val pairs %s aren't usable with environment %s" % (env_dicts, env_name)) # noqa pylint: disable=line-too-long combined_dicts = merge_dicts(env_dicts.get('*', {}), env_dicts.get(env_name, {})) return flatten_path_lists(combined_dicts, env_root)
def merge_nested_environment_dicts(env_dicts, env_name=None, env_root=None)
Return single-level dictionary from dictionary of dictionaries.
3.311139
3.242866
1.021053
return os.path.join( os.path.dirname(os.path.abspath(__file__)), 'embedded' )
def get_embedded_lib_path()
Return path of embedded libraries.
2.86657
2.637702
1.086768