Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
6,800 | UniversalDevicesInc/polyglot-v2-python-interface | polyinterface/polyinterface.py | Interface.send | def send(self, message):
"""
Formatted Message to send to Polyglot. Connection messages are sent automatically from this module
so this method is used to send commands to/from Polyglot and formats it for consumption
"""
if not isinstance(message, dict) and self.connected:
warnings.warn('payload not a dictionary')
return False
try:
message['node'] = self.profileNum
self._mqttc.publish(self.topicInput, json.dumps(message), retain=False)
except TypeError as err:
LOGGER.error('MQTT Send Error: {}'.format(err), exc_info=True) | python | def send(self, message):
"""
Formatted Message to send to Polyglot. Connection messages are sent automatically from this module
so this method is used to send commands to/from Polyglot and formats it for consumption
"""
if not isinstance(message, dict) and self.connected:
warnings.warn('payload not a dictionary')
return False
try:
message['node'] = self.profileNum
self._mqttc.publish(self.topicInput, json.dumps(message), retain=False)
except TypeError as err:
LOGGER.error('MQTT Send Error: {}'.format(err), exc_info=True) | ['def', 'send', '(', 'self', ',', 'message', ')', ':', 'if', 'not', 'isinstance', '(', 'message', ',', 'dict', ')', 'and', 'self', '.', 'connected', ':', 'warnings', '.', 'warn', '(', "'payload not a dictionary'", ')', 'return', 'False', 'try', ':', 'message', '[', "'node'", ']', '=', 'self', '.', 'profileNum', 'self', '.', '_mqttc', '.', 'publish', '(', 'self', '.', 'topicInput', ',', 'json', '.', 'dumps', '(', 'message', ')', ',', 'retain', '=', 'False', ')', 'except', 'TypeError', 'as', 'err', ':', 'LOGGER', '.', 'error', '(', "'MQTT Send Error: {}'", '.', 'format', '(', 'err', ')', ',', 'exc_info', '=', 'True', ')'] | Formatted Message to send to Polyglot. Connection messages are sent automatically from this module
so this method is used to send commands to/from Polyglot and formats it for consumption | ['Formatted', 'Message', 'to', 'send', 'to', 'Polyglot', '.', 'Connection', 'messages', 'are', 'sent', 'automatically', 'from', 'this', 'module', 'so', 'this', 'method', 'is', 'used', 'to', 'send', 'commands', 'to', '/', 'from', 'Polyglot', 'and', 'formats', 'it', 'for', 'consumption'] | train | https://github.com/UniversalDevicesInc/polyglot-v2-python-interface/blob/fe613135b762731a41a081222e43d2a8ae4fc53f/polyinterface/polyinterface.py#L349-L361 |
6,801 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/__init__.py | _build_trigram_indices | def _build_trigram_indices(trigram_index):
"""Build a dictionary of trigrams and their indices from a csv"""
result = {}
trigram_count = 0
for key, val in csv.reader(open(trigram_index)):
result[key] = int(val)
trigram_count += 1
return result, trigram_count | python | def _build_trigram_indices(trigram_index):
"""Build a dictionary of trigrams and their indices from a csv"""
result = {}
trigram_count = 0
for key, val in csv.reader(open(trigram_index)):
result[key] = int(val)
trigram_count += 1
return result, trigram_count | ['def', '_build_trigram_indices', '(', 'trigram_index', ')', ':', 'result', '=', '{', '}', 'trigram_count', '=', '0', 'for', 'key', ',', 'val', 'in', 'csv', '.', 'reader', '(', 'open', '(', 'trigram_index', ')', ')', ':', 'result', '[', 'key', ']', '=', 'int', '(', 'val', ')', 'trigram_count', '+=', '1', 'return', 'result', ',', 'trigram_count'] | Build a dictionary of trigrams and their indices from a csv | ['Build', 'a', 'dictionary', 'of', 'trigrams', 'and', 'their', 'indices', 'from', 'a', 'csv'] | train | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/__init__.py#L48-L55 |
6,802 | maweigert/gputools | gputools/denoise/tv2.py | _tv2 | def _tv2(data,weight,Niter=50):
"""
chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma
"""
if dev is None:
dev = imgtools.__DEFAULT_OPENCL_DEVICE__
if dev is None:
raise ValueError("no OpenCLDevice found...")
proc = OCLProcessor(dev,utils.absPath("kernels/tv_chambolle.cl"))
if Ncut ==1:
inImg = dev.createImage(data.shape[::-1],dtype = np.float32)
pImgs = [ dev.createImage(data.shape[::-1],
mem_flags = cl.mem_flags.READ_WRITE,
dtype= np.float32,
channel_order = cl.channel_order.RGBA)
for i in range(2)]
outImg = dev.createImage(data.shape[::-1],
dtype = np.float32,
mem_flags = cl.mem_flags.READ_WRITE)
dev.writeImage(inImg,data.astype(np.float32));
dev.writeImage(pImgs[0],np.zeros((4,)+data.shape,dtype=np.float32));
dev.writeImage(pImgs[1],np.zeros((4,)+data.shape,dtype=np.float32));
for i in range(Niter):
proc.runKernel("div_step",inImg.shape,None,
inImg,pImgs[i%2],outImg)
proc.runKernel("grad_step",inImg.shape,None,
outImg,pImgs[i%2],pImgs[1-i%2],
np.float32(weight))
return dev.readImage(outImg,dtype=np.float32)
else:
res = np.empty_like(data,dtype=np.float32)
Nz,Ny,Nx = data.shape
# a heuristic guess: Npad = Niter means perfect
Npad = 1+Niter/2
for i0,(i,j,k) in enumerate(product(list(range(Ncut)),repeat=3)):
logger.info("calculating box %i/%i"%(i0+1,Ncut**3))
sx = slice(i*Nx/Ncut,(i+1)*Nx/Ncut)
sy = slice(j*Ny/Ncut,(j+1)*Ny/Ncut)
sz = slice(k*Nz/Ncut,(k+1)*Nz/Ncut)
sx1,sx2 = utils._extended_slice(sx,Nx,Npad)
sy1,sy2 = utils._extended_slice(sy,Ny,Npad)
sz1,sz2 = utils._extended_slice(sz,Nz,Npad)
data_sliced = data[sz1,sy1,sx1].copy()
_res = tv3_gpu(dev,data_sliced,weight,Niter,Ncut = 1)
res[sz,sy,sx] = _res[sz2,sy2,sx2]
return res | python | def _tv2(data,weight,Niter=50):
"""
chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma
"""
if dev is None:
dev = imgtools.__DEFAULT_OPENCL_DEVICE__
if dev is None:
raise ValueError("no OpenCLDevice found...")
proc = OCLProcessor(dev,utils.absPath("kernels/tv_chambolle.cl"))
if Ncut ==1:
inImg = dev.createImage(data.shape[::-1],dtype = np.float32)
pImgs = [ dev.createImage(data.shape[::-1],
mem_flags = cl.mem_flags.READ_WRITE,
dtype= np.float32,
channel_order = cl.channel_order.RGBA)
for i in range(2)]
outImg = dev.createImage(data.shape[::-1],
dtype = np.float32,
mem_flags = cl.mem_flags.READ_WRITE)
dev.writeImage(inImg,data.astype(np.float32));
dev.writeImage(pImgs[0],np.zeros((4,)+data.shape,dtype=np.float32));
dev.writeImage(pImgs[1],np.zeros((4,)+data.shape,dtype=np.float32));
for i in range(Niter):
proc.runKernel("div_step",inImg.shape,None,
inImg,pImgs[i%2],outImg)
proc.runKernel("grad_step",inImg.shape,None,
outImg,pImgs[i%2],pImgs[1-i%2],
np.float32(weight))
return dev.readImage(outImg,dtype=np.float32)
else:
res = np.empty_like(data,dtype=np.float32)
Nz,Ny,Nx = data.shape
# a heuristic guess: Npad = Niter means perfect
Npad = 1+Niter/2
for i0,(i,j,k) in enumerate(product(list(range(Ncut)),repeat=3)):
logger.info("calculating box %i/%i"%(i0+1,Ncut**3))
sx = slice(i*Nx/Ncut,(i+1)*Nx/Ncut)
sy = slice(j*Ny/Ncut,(j+1)*Ny/Ncut)
sz = slice(k*Nz/Ncut,(k+1)*Nz/Ncut)
sx1,sx2 = utils._extended_slice(sx,Nx,Npad)
sy1,sy2 = utils._extended_slice(sy,Ny,Npad)
sz1,sz2 = utils._extended_slice(sz,Nz,Npad)
data_sliced = data[sz1,sy1,sx1].copy()
_res = tv3_gpu(dev,data_sliced,weight,Niter,Ncut = 1)
res[sz,sy,sx] = _res[sz2,sy2,sx2]
return res | ['def', '_tv2', '(', 'data', ',', 'weight', ',', 'Niter', '=', '50', ')', ':', 'if', 'dev', 'is', 'None', ':', 'dev', '=', 'imgtools', '.', '__DEFAULT_OPENCL_DEVICE__', 'if', 'dev', 'is', 'None', ':', 'raise', 'ValueError', '(', '"no OpenCLDevice found..."', ')', 'proc', '=', 'OCLProcessor', '(', 'dev', ',', 'utils', '.', 'absPath', '(', '"kernels/tv_chambolle.cl"', ')', ')', 'if', 'Ncut', '==', '1', ':', 'inImg', '=', 'dev', '.', 'createImage', '(', 'data', '.', 'shape', '[', ':', ':', '-', '1', ']', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'pImgs', '=', '[', 'dev', '.', 'createImage', '(', 'data', '.', 'shape', '[', ':', ':', '-', '1', ']', ',', 'mem_flags', '=', 'cl', '.', 'mem_flags', '.', 'READ_WRITE', ',', 'dtype', '=', 'np', '.', 'float32', ',', 'channel_order', '=', 'cl', '.', 'channel_order', '.', 'RGBA', ')', 'for', 'i', 'in', 'range', '(', '2', ')', ']', 'outImg', '=', 'dev', '.', 'createImage', '(', 'data', '.', 'shape', '[', ':', ':', '-', '1', ']', ',', 'dtype', '=', 'np', '.', 'float32', ',', 'mem_flags', '=', 'cl', '.', 'mem_flags', '.', 'READ_WRITE', ')', 'dev', '.', 'writeImage', '(', 'inImg', ',', 'data', '.', 'astype', '(', 'np', '.', 'float32', ')', ')', 'dev', '.', 'writeImage', '(', 'pImgs', '[', '0', ']', ',', 'np', '.', 'zeros', '(', '(', '4', ',', ')', '+', 'data', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'float32', ')', ')', 'dev', '.', 'writeImage', '(', 'pImgs', '[', '1', ']', ',', 'np', '.', 'zeros', '(', '(', '4', ',', ')', '+', 'data', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'float32', ')', ')', 'for', 'i', 'in', 'range', '(', 'Niter', ')', ':', 'proc', '.', 'runKernel', '(', '"div_step"', ',', 'inImg', '.', 'shape', ',', 'None', ',', 'inImg', ',', 'pImgs', '[', 'i', '%', '2', ']', ',', 'outImg', ')', 'proc', '.', 'runKernel', '(', '"grad_step"', ',', 'inImg', '.', 'shape', ',', 'None', ',', 'outImg', ',', 'pImgs', '[', 'i', '%', '2', ']', ',', 'pImgs', '[', '1', '-', 'i', '%', '2', ']', ',', 'np', '.', 'float32', '(', 'weight', ')', ')', 'return', 'dev', '.', 'readImage', '(', 'outImg', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'else', ':', 'res', '=', 'np', '.', 'empty_like', '(', 'data', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'Nz', ',', 'Ny', ',', 'Nx', '=', 'data', '.', 'shape', '# a heuristic guess: Npad = Niter means perfect', 'Npad', '=', '1', '+', 'Niter', '/', '2', 'for', 'i0', ',', '(', 'i', ',', 'j', ',', 'k', ')', 'in', 'enumerate', '(', 'product', '(', 'list', '(', 'range', '(', 'Ncut', ')', ')', ',', 'repeat', '=', '3', ')', ')', ':', 'logger', '.', 'info', '(', '"calculating box %i/%i"', '%', '(', 'i0', '+', '1', ',', 'Ncut', '**', '3', ')', ')', 'sx', '=', 'slice', '(', 'i', '*', 'Nx', '/', 'Ncut', ',', '(', 'i', '+', '1', ')', '*', 'Nx', '/', 'Ncut', ')', 'sy', '=', 'slice', '(', 'j', '*', 'Ny', '/', 'Ncut', ',', '(', 'j', '+', '1', ')', '*', 'Ny', '/', 'Ncut', ')', 'sz', '=', 'slice', '(', 'k', '*', 'Nz', '/', 'Ncut', ',', '(', 'k', '+', '1', ')', '*', 'Nz', '/', 'Ncut', ')', 'sx1', ',', 'sx2', '=', 'utils', '.', '_extended_slice', '(', 'sx', ',', 'Nx', ',', 'Npad', ')', 'sy1', ',', 'sy2', '=', 'utils', '.', '_extended_slice', '(', 'sy', ',', 'Ny', ',', 'Npad', ')', 'sz1', ',', 'sz2', '=', 'utils', '.', '_extended_slice', '(', 'sz', ',', 'Nz', ',', 'Npad', ')', 'data_sliced', '=', 'data', '[', 'sz1', ',', 'sy1', ',', 'sx1', ']', '.', 'copy', '(', ')', '_res', '=', 'tv3_gpu', '(', 'dev', ',', 'data_sliced', ',', 'weight', ',', 'Niter', ',', 'Ncut', '=', '1', ')', 'res', '[', 'sz', ',', 'sy', ',', 'sx', ']', '=', '_res', '[', 'sz2', ',', 'sy2', ',', 'sx2', ']', 'return', 'res'] | chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma | ['chambolles', 'tv', 'regularized', 'denoising'] | train | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/denoise/tv2.py#L16-L77 |
6,803 | buguroo/pyknow | pyknow/fact.py | Fact.copy | def copy(self):
"""Return a copy of this `Fact`."""
content = [(k, v) for k, v in self.items()]
intidx = [(k, v) for k, v in content if isinstance(k, int)]
args = [v for k, v in sorted(intidx)]
kwargs = {k: v
for k, v in content
if not isinstance(k, int) and not self.is_special(k)}
return self.__class__(*args, **kwargs) | python | def copy(self):
"""Return a copy of this `Fact`."""
content = [(k, v) for k, v in self.items()]
intidx = [(k, v) for k, v in content if isinstance(k, int)]
args = [v for k, v in sorted(intidx)]
kwargs = {k: v
for k, v in content
if not isinstance(k, int) and not self.is_special(k)}
return self.__class__(*args, **kwargs) | ['def', 'copy', '(', 'self', ')', ':', 'content', '=', '[', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'self', '.', 'items', '(', ')', ']', 'intidx', '=', '[', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'content', 'if', 'isinstance', '(', 'k', ',', 'int', ')', ']', 'args', '=', '[', 'v', 'for', 'k', ',', 'v', 'in', 'sorted', '(', 'intidx', ')', ']', 'kwargs', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'content', 'if', 'not', 'isinstance', '(', 'k', ',', 'int', ')', 'and', 'not', 'self', '.', 'is_special', '(', 'k', ')', '}', 'return', 'self', '.', '__class__', '(', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Return a copy of this `Fact`. | ['Return', 'a', 'copy', 'of', 'this', 'Fact', '.'] | train | https://github.com/buguroo/pyknow/blob/48818336f2e9a126f1964f2d8dc22d37ff800fe8/pyknow/fact.py#L106-L116 |
6,804 | hyperledger/indy-plenum | plenum/server/replica.py | Replica.nonFinalisedReqs | def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self.requests.is_finalised(key)} | python | def nonFinalisedReqs(self, reqKeys: List[Tuple[str, int]]):
"""
Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs
"""
return {key for key in reqKeys if not self.requests.is_finalised(key)} | ['def', 'nonFinalisedReqs', '(', 'self', ',', 'reqKeys', ':', 'List', '[', 'Tuple', '[', 'str', ',', 'int', ']', ']', ')', ':', 'return', '{', 'key', 'for', 'key', 'in', 'reqKeys', 'if', 'not', 'self', '.', 'requests', '.', 'is_finalised', '(', 'key', ')', '}'] | Check if there are any requests which are not finalised, i.e for
which there are not enough PROPAGATEs | ['Check', 'if', 'there', 'are', 'any', 'requests', 'which', 'are', 'not', 'finalised', 'i', '.', 'e', 'for', 'which', 'there', 'are', 'not', 'enough', 'PROPAGATEs'] | train | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica.py#L1428-L1433 |
6,805 | shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.clear_port_stats | def clear_port_stats(self):
""" Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve.
"""
stat = IxeStat(self)
stat.ix_set_default()
stat.enableValidStats = True
stat.ix_set()
stat.write() | python | def clear_port_stats(self):
""" Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve.
"""
stat = IxeStat(self)
stat.ix_set_default()
stat.enableValidStats = True
stat.ix_set()
stat.write() | ['def', 'clear_port_stats', '(', 'self', ')', ':', 'stat', '=', 'IxeStat', '(', 'self', ')', 'stat', '.', 'ix_set_default', '(', ')', 'stat', '.', 'enableValidStats', '=', 'True', 'stat', '.', 'ix_set', '(', ')', 'stat', '.', 'write', '(', ')'] | Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve. | ['Clear', 'only', 'port', 'stats', '(', 'leave', 'stream', 'and', 'packet', 'group', 'stats', ')', '.'] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L319-L328 |
6,806 | tobgu/pyrsistent | pyrsistent/_plist.py | _PListBase.split | def split(self, index):
"""
Spilt the list at position specified by index. Returns a tuple containing the
list up until index and the list after the index. Runs in O(index).
>>> plist([1, 2, 3, 4]).split(2)
(plist([1, 2]), plist([3, 4]))
"""
lb = _PListBuilder()
right_list = self
i = 0
while right_list and i < index:
lb.append_elem(right_list.first)
right_list = right_list.rest
i += 1
if not right_list:
# Just a small optimization in the cases where no split occurred
return self, _EMPTY_PLIST
return lb.build(), right_list | python | def split(self, index):
"""
Spilt the list at position specified by index. Returns a tuple containing the
list up until index and the list after the index. Runs in O(index).
>>> plist([1, 2, 3, 4]).split(2)
(plist([1, 2]), plist([3, 4]))
"""
lb = _PListBuilder()
right_list = self
i = 0
while right_list and i < index:
lb.append_elem(right_list.first)
right_list = right_list.rest
i += 1
if not right_list:
# Just a small optimization in the cases where no split occurred
return self, _EMPTY_PLIST
return lb.build(), right_list | ['def', 'split', '(', 'self', ',', 'index', ')', ':', 'lb', '=', '_PListBuilder', '(', ')', 'right_list', '=', 'self', 'i', '=', '0', 'while', 'right_list', 'and', 'i', '<', 'index', ':', 'lb', '.', 'append_elem', '(', 'right_list', '.', 'first', ')', 'right_list', '=', 'right_list', '.', 'rest', 'i', '+=', '1', 'if', 'not', 'right_list', ':', '# Just a small optimization in the cases where no split occurred', 'return', 'self', ',', '_EMPTY_PLIST', 'return', 'lb', '.', 'build', '(', ')', ',', 'right_list'] | Spilt the list at position specified by index. Returns a tuple containing the
list up until index and the list after the index. Runs in O(index).
>>> plist([1, 2, 3, 4]).split(2)
(plist([1, 2]), plist([3, 4])) | ['Spilt', 'the', 'list', 'at', 'position', 'specified', 'by', 'index', '.', 'Returns', 'a', 'tuple', 'containing', 'the', 'list', 'up', 'until', 'index', 'and', 'the', 'list', 'after', 'the', 'index', '.', 'Runs', 'in', 'O', '(', 'index', ')', '.'] | train | https://github.com/tobgu/pyrsistent/blob/c84dab0daaa44973cbe83830d14888827b307632/pyrsistent/_plist.py#L109-L129 |
6,807 | nion-software/nionswift-io | nionswift_plugin/TIFF_IO/tifffile.py | natural_sorted | def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey) | python | def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey) | ['def', 'natural_sorted', '(', 'iterable', ')', ':', 'def', 'sortkey', '(', 'x', ')', ':', 'return', '[', '(', 'int', '(', 'c', ')', 'if', 'c', '.', 'isdigit', '(', ')', 'else', 'c', ')', 'for', 'c', 'in', 're', '.', 'split', '(', 'numbers', ',', 'x', ')', ']', 'numbers', '=', 're', '.', 'compile', '(', "r'(\\d+)'", ')', 'return', 'sorted', '(', 'iterable', ',', 'key', '=', 'sortkey', ')'] | Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10'] | ['Return', 'human', 'sorted', 'list', 'of', 'strings', '.'] | train | https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L10244-L10257 |
6,808 | UUDigitalHumanitieslab/tei_reader | tei_reader/models/element.py | Element.attributes | def attributes(self):
if 'id' in self.node.attrib:
yield PlaceholderAttribute('id', self.node.attrib['id'])
if 'tei-tag' in self.node.attrib:
yield PlaceholderAttribute('tei-tag', self.node.attrib['tei-tag'])
"""Contain attributes applicable to this element"""
for attributes in self.node.iterchildren('attributes'):
for attribute in self.__iter_attributes__(attributes):
yield attribute | python | def attributes(self):
if 'id' in self.node.attrib:
yield PlaceholderAttribute('id', self.node.attrib['id'])
if 'tei-tag' in self.node.attrib:
yield PlaceholderAttribute('tei-tag', self.node.attrib['tei-tag'])
"""Contain attributes applicable to this element"""
for attributes in self.node.iterchildren('attributes'):
for attribute in self.__iter_attributes__(attributes):
yield attribute | ['def', 'attributes', '(', 'self', ')', ':', 'if', "'id'", 'in', 'self', '.', 'node', '.', 'attrib', ':', 'yield', 'PlaceholderAttribute', '(', "'id'", ',', 'self', '.', 'node', '.', 'attrib', '[', "'id'", ']', ')', 'if', "'tei-tag'", 'in', 'self', '.', 'node', '.', 'attrib', ':', 'yield', 'PlaceholderAttribute', '(', "'tei-tag'", ',', 'self', '.', 'node', '.', 'attrib', '[', "'tei-tag'", ']', ')', 'for', 'attributes', 'in', 'self', '.', 'node', '.', 'iterchildren', '(', "'attributes'", ')', ':', 'for', 'attribute', 'in', 'self', '.', '__iter_attributes__', '(', 'attributes', ')', ':', 'yield', 'attribute'] | Contain attributes applicable to this element | ['Contain', 'attributes', 'applicable', 'to', 'this', 'element'] | train | https://github.com/UUDigitalHumanitieslab/tei_reader/blob/7b19c34a9d7cc941a36ecdcf6f361e26c6488697/tei_reader/models/element.py#L14-L24 |
6,809 | klavinslab/coral | coral/seqio/_dna.py | read_dna | def read_dna(path):
'''Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA
'''
filename, ext = os.path.splitext(os.path.split(path)[-1])
genbank_exts = ['.gb', '.ape']
fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
abi_exts = ['.abi', '.ab1']
if any([ext == extension for extension in genbank_exts]):
file_format = 'genbank'
elif any([ext == extension for extension in fasta_exts]):
file_format = 'fasta'
elif any([ext == extension for extension in abi_exts]):
file_format = 'abi'
else:
raise ValueError('File format not recognized.')
seq = SeqIO.read(path, file_format)
dna = coral.DNA(str(seq.seq))
if seq.name == '.':
dna.name = filename
else:
dna.name = seq.name
# Features
for feature in seq.features:
try:
dna.features.append(_seqfeature_to_coral(feature))
except FeatureNameError:
pass
dna.features = sorted(dna.features, key=lambda feature: feature.start)
# Used to use data_file_division, but it's inconsistent (not always the
# molecule type)
dna.circular = False
with open(path) as f:
first_line = f.read().split()
for word in first_line:
if word == 'circular':
dna.circular = True
return dna | python | def read_dna(path):
'''Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA
'''
filename, ext = os.path.splitext(os.path.split(path)[-1])
genbank_exts = ['.gb', '.ape']
fasta_exts = ['.fasta', '.fa', '.fsa', '.seq']
abi_exts = ['.abi', '.ab1']
if any([ext == extension for extension in genbank_exts]):
file_format = 'genbank'
elif any([ext == extension for extension in fasta_exts]):
file_format = 'fasta'
elif any([ext == extension for extension in abi_exts]):
file_format = 'abi'
else:
raise ValueError('File format not recognized.')
seq = SeqIO.read(path, file_format)
dna = coral.DNA(str(seq.seq))
if seq.name == '.':
dna.name = filename
else:
dna.name = seq.name
# Features
for feature in seq.features:
try:
dna.features.append(_seqfeature_to_coral(feature))
except FeatureNameError:
pass
dna.features = sorted(dna.features, key=lambda feature: feature.start)
# Used to use data_file_division, but it's inconsistent (not always the
# molecule type)
dna.circular = False
with open(path) as f:
first_line = f.read().split()
for word in first_line:
if word == 'circular':
dna.circular = True
return dna | ['def', 'read_dna', '(', 'path', ')', ':', 'filename', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'os', '.', 'path', '.', 'split', '(', 'path', ')', '[', '-', '1', ']', ')', 'genbank_exts', '=', '[', "'.gb'", ',', "'.ape'", ']', 'fasta_exts', '=', '[', "'.fasta'", ',', "'.fa'", ',', "'.fsa'", ',', "'.seq'", ']', 'abi_exts', '=', '[', "'.abi'", ',', "'.ab1'", ']', 'if', 'any', '(', '[', 'ext', '==', 'extension', 'for', 'extension', 'in', 'genbank_exts', ']', ')', ':', 'file_format', '=', "'genbank'", 'elif', 'any', '(', '[', 'ext', '==', 'extension', 'for', 'extension', 'in', 'fasta_exts', ']', ')', ':', 'file_format', '=', "'fasta'", 'elif', 'any', '(', '[', 'ext', '==', 'extension', 'for', 'extension', 'in', 'abi_exts', ']', ')', ':', 'file_format', '=', "'abi'", 'else', ':', 'raise', 'ValueError', '(', "'File format not recognized.'", ')', 'seq', '=', 'SeqIO', '.', 'read', '(', 'path', ',', 'file_format', ')', 'dna', '=', 'coral', '.', 'DNA', '(', 'str', '(', 'seq', '.', 'seq', ')', ')', 'if', 'seq', '.', 'name', '==', "'.'", ':', 'dna', '.', 'name', '=', 'filename', 'else', ':', 'dna', '.', 'name', '=', 'seq', '.', 'name', '# Features', 'for', 'feature', 'in', 'seq', '.', 'features', ':', 'try', ':', 'dna', '.', 'features', '.', 'append', '(', '_seqfeature_to_coral', '(', 'feature', ')', ')', 'except', 'FeatureNameError', ':', 'pass', 'dna', '.', 'features', '=', 'sorted', '(', 'dna', '.', 'features', ',', 'key', '=', 'lambda', 'feature', ':', 'feature', '.', 'start', ')', "# Used to use data_file_division, but it's inconsistent (not always the", '# molecule type)', 'dna', '.', 'circular', '=', 'False', 'with', 'open', '(', 'path', ')', 'as', 'f', ':', 'first_line', '=', 'f', '.', 'read', '(', ')', '.', 'split', '(', ')', 'for', 'word', 'in', 'first_line', ':', 'if', 'word', '==', "'circular'", ':', 'dna', '.', 'circular', '=', 'True', 'return', 'dna'] | Read DNA from file. Uses BioPython and coerces to coral format.
:param path: Full path to input file.
:type path: str
:returns: DNA sequence.
:rtype: coral.DNA | ['Read', 'DNA', 'from', 'file', '.', 'Uses', 'BioPython', 'and', 'coerces', 'to', 'coral', 'format', '.'] | train | https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/seqio/_dna.py#L22-L69 |
6,810 | odlgroup/odl | odl/space/pspace.py | ProductSpaceArrayWeighting.inner | def inner(self, x1, x2):
"""Calculate the array-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=x1[0].space.dtype, count=len(x1))
inner = np.dot(inners, self.array)
if is_real_dtype(x1[0].dtype):
return float(inner)
else:
return complex(inner) | python | def inner(self, x1, x2):
"""Calculate the array-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=x1[0].space.dtype, count=len(x1))
inner = np.dot(inners, self.array)
if is_real_dtype(x1[0].dtype):
return float(inner)
else:
return complex(inner) | ['def', 'inner', '(', 'self', ',', 'x1', ',', 'x2', ')', ':', 'if', 'self', '.', 'exponent', '!=', '2.0', ':', 'raise', 'NotImplementedError', '(', "'no inner product defined for '", "'exponent != 2 (got {})'", "''", '.', 'format', '(', 'self', '.', 'exponent', ')', ')', 'inners', '=', 'np', '.', 'fromiter', '(', '(', 'x1i', '.', 'inner', '(', 'x2i', ')', 'for', 'x1i', ',', 'x2i', 'in', 'zip', '(', 'x1', ',', 'x2', ')', ')', ',', 'dtype', '=', 'x1', '[', '0', ']', '.', 'space', '.', 'dtype', ',', 'count', '=', 'len', '(', 'x1', ')', ')', 'inner', '=', 'np', '.', 'dot', '(', 'inners', ',', 'self', '.', 'array', ')', 'if', 'is_real_dtype', '(', 'x1', '[', '0', ']', '.', 'dtype', ')', ':', 'return', 'float', '(', 'inner', ')', 'else', ':', 'return', 'complex', '(', 'inner', ')'] | Calculate the array-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements. | ['Calculate', 'the', 'array', '-', 'weighted', 'inner', 'product', 'of', 'two', 'elements', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1596-L1622 |
6,811 | FNNDSC/med2image | med2image/message.py | Message.str_syslog | def str_syslog(self, *args):
'''
get/set the str_syslog, i.e. the current value of the
syslog prepend string.
str_syslog(): returns the current syslog string
str_syslog(<astr>): sets the syslog string to <astr>
'''
if len(args):
self._str_syslog = args[0]
else:
return self._str_syslog | python | def str_syslog(self, *args):
'''
get/set the str_syslog, i.e. the current value of the
syslog prepend string.
str_syslog(): returns the current syslog string
str_syslog(<astr>): sets the syslog string to <astr>
'''
if len(args):
self._str_syslog = args[0]
else:
return self._str_syslog | ['def', 'str_syslog', '(', 'self', ',', '*', 'args', ')', ':', 'if', 'len', '(', 'args', ')', ':', 'self', '.', '_str_syslog', '=', 'args', '[', '0', ']', 'else', ':', 'return', 'self', '.', '_str_syslog'] | get/set the str_syslog, i.e. the current value of the
syslog prepend string.
str_syslog(): returns the current syslog string
str_syslog(<astr>): sets the syslog string to <astr> | ['get', '/', 'set', 'the', 'str_syslog', 'i', '.', 'e', '.', 'the', 'current', 'value', 'of', 'the', 'syslog', 'prepend', 'string', '.'] | train | https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/message.py#L109-L121 |
6,812 | cmap/cmapPy | cmapPy/pandasGEXpress/parse_gctx.py | parse_data_df | def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
"""
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
"""
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df | python | def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
"""
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
"""
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df | ['def', 'parse_data_df', '(', 'data_dset', ',', 'ridx', ',', 'cidx', ',', 'row_meta', ',', 'col_meta', ')', ':', 'if', 'len', '(', 'ridx', ')', '==', 'len', '(', 'row_meta', '.', 'index', ')', 'and', 'len', '(', 'cidx', ')', '==', 'len', '(', 'col_meta', '.', 'index', ')', ':', '# no subset', 'data_array', '=', 'np', '.', 'empty', '(', 'data_dset', '.', 'shape', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'data_dset', '.', 'read_direct', '(', 'data_array', ')', 'data_array', '=', 'data_array', '.', 'transpose', '(', ')', 'elif', 'len', '(', 'ridx', ')', '<=', 'len', '(', 'cidx', ')', ':', 'first_subset', '=', 'data_dset', '[', ':', ',', 'ridx', ']', '.', 'astype', '(', 'np', '.', 'float32', ')', 'data_array', '=', 'first_subset', '[', 'cidx', ',', ':', ']', '.', 'transpose', '(', ')', 'elif', 'len', '(', 'cidx', ')', '<', 'len', '(', 'ridx', ')', ':', 'first_subset', '=', 'data_dset', '[', 'cidx', ',', ':', ']', '.', 'astype', '(', 'np', '.', 'float32', ')', 'data_array', '=', 'first_subset', '[', ':', ',', 'ridx', ']', '.', 'transpose', '(', ')', '# make DataFrame instance', 'data_df', '=', 'pd', '.', 'DataFrame', '(', 'data_array', ',', 'index', '=', 'row_meta', '.', 'index', '[', 'ridx', ']', ',', 'columns', '=', 'col_meta', '.', 'index', '[', 'cidx', ']', ')', 'return', 'data_df'] | Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata | ['Parses', 'in', 'data_df', 'from', 'hdf5', 'subsetting', 'if', 'specified', '.'] | train | https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L320-L345 |
6,813 | Diaoul/subliminal | subliminal/subtitle.py | Subtitle.guess_encoding | def guess_encoding(self):
"""Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str
"""
logger.info('Guessing encoding for language %s', self.language)
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
logger.debug('Trying encodings %r', encodings)
for encoding in encodings:
try:
self.content.decode(encoding)
except UnicodeDecodeError:
pass
else:
logger.info('Guessed encoding %s', encoding)
return encoding
logger.warning('Could not guess encoding from language')
# fallback on chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
return encoding | python | def guess_encoding(self):
"""Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str
"""
logger.info('Guessing encoding for language %s', self.language)
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
logger.debug('Trying encodings %r', encodings)
for encoding in encodings:
try:
self.content.decode(encoding)
except UnicodeDecodeError:
pass
else:
logger.info('Guessed encoding %s', encoding)
return encoding
logger.warning('Could not guess encoding from language')
# fallback on chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
return encoding | ['def', 'guess_encoding', '(', 'self', ')', ':', 'logger', '.', 'info', '(', "'Guessing encoding for language %s'", ',', 'self', '.', 'language', ')', '# always try utf-8 first', 'encodings', '=', '[', "'utf-8'", ']', '# add language-specific encodings', 'if', 'self', '.', 'language', '.', 'alpha3', '==', "'zho'", ':', 'encodings', '.', 'extend', '(', '[', "'gb18030'", ',', "'big5'", ']', ')', 'elif', 'self', '.', 'language', '.', 'alpha3', '==', "'jpn'", ':', 'encodings', '.', 'append', '(', "'shift-jis'", ')', 'elif', 'self', '.', 'language', '.', 'alpha3', '==', "'ara'", ':', 'encodings', '.', 'append', '(', "'windows-1256'", ')', 'elif', 'self', '.', 'language', '.', 'alpha3', '==', "'heb'", ':', 'encodings', '.', 'append', '(', "'windows-1255'", ')', 'elif', 'self', '.', 'language', '.', 'alpha3', '==', "'tur'", ':', 'encodings', '.', 'extend', '(', '[', "'iso-8859-9'", ',', "'windows-1254'", ']', ')', 'elif', 'self', '.', 'language', '.', 'alpha3', '==', "'pol'", ':', '# Eastern European Group 1', 'encodings', '.', 'extend', '(', '[', "'windows-1250'", ']', ')', 'elif', 'self', '.', 'language', '.', 'alpha3', '==', "'bul'", ':', '# Eastern European Group 2', 'encodings', '.', 'extend', '(', '[', "'windows-1251'", ']', ')', 'else', ':', '# Western European (windows-1252)', 'encodings', '.', 'append', '(', "'latin-1'", ')', '# try to decode', 'logger', '.', 'debug', '(', "'Trying encodings %r'", ',', 'encodings', ')', 'for', 'encoding', 'in', 'encodings', ':', 'try', ':', 'self', '.', 'content', '.', 'decode', '(', 'encoding', ')', 'except', 'UnicodeDecodeError', ':', 'pass', 'else', ':', 'logger', '.', 'info', '(', "'Guessed encoding %s'", ',', 'encoding', ')', 'return', 'encoding', 'logger', '.', 'warning', '(', "'Could not guess encoding from language'", ')', '# fallback on chardet', 'encoding', '=', 'chardet', '.', 'detect', '(', 'self', '.', 'content', ')', '[', "'encoding'", ']', 'logger', '.', 'info', '(', "'Chardet found encoding %s'", ',', 'encoding', ')', 'return', 'encoding'] | Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str | ['Guess', 'encoding', 'using', 'the', 'language', 'falling', 'back', 'on', 'chardet', '.'] | train | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L96-L146 |
6,814 | pvlib/pvlib-python | pvlib/modelchain.py | ModelChain.complete_irradiance | def complete_irradiance(self, times=None, weather=None):
"""
Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP
"""
if weather is not None:
self.weather = weather
if times is not None:
self.times = times
self.solar_position = self.location.get_solarposition(
self.times, method=self.solar_position_method)
icolumns = set(self.weather.columns)
wrn_txt = ("This function is not safe at the moment.\n" +
"Results can be too high or negative.\n" +
"Help to improve this function on github:\n" +
"https://github.com/pvlib/pvlib-python \n")
if {'ghi', 'dhi'} <= icolumns and 'dni' not in icolumns:
clearsky = self.location.get_clearsky(
times, solar_position=self.solar_position)
self.weather.loc[:, 'dni'] = pvlib.irradiance.dni(
self.weather.loc[:, 'ghi'], self.weather.loc[:, 'dhi'],
self.solar_position.zenith,
clearsky_dni=clearsky['dni'],
clearsky_tolerance=1.1)
elif {'dni', 'dhi'} <= icolumns and 'ghi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'ghi'] = (
self.weather.dni * tools.cosd(self.solar_position.zenith) +
self.weather.dhi)
elif {'dni', 'ghi'} <= icolumns and 'dhi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'dhi'] = (
self.weather.ghi - self.weather.dni *
tools.cosd(self.solar_position.zenith))
return self | python | def complete_irradiance(self, times=None, weather=None):
"""
Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP
"""
if weather is not None:
self.weather = weather
if times is not None:
self.times = times
self.solar_position = self.location.get_solarposition(
self.times, method=self.solar_position_method)
icolumns = set(self.weather.columns)
wrn_txt = ("This function is not safe at the moment.\n" +
"Results can be too high or negative.\n" +
"Help to improve this function on github:\n" +
"https://github.com/pvlib/pvlib-python \n")
if {'ghi', 'dhi'} <= icolumns and 'dni' not in icolumns:
clearsky = self.location.get_clearsky(
times, solar_position=self.solar_position)
self.weather.loc[:, 'dni'] = pvlib.irradiance.dni(
self.weather.loc[:, 'ghi'], self.weather.loc[:, 'dhi'],
self.solar_position.zenith,
clearsky_dni=clearsky['dni'],
clearsky_tolerance=1.1)
elif {'dni', 'dhi'} <= icolumns and 'ghi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'ghi'] = (
self.weather.dni * tools.cosd(self.solar_position.zenith) +
self.weather.dhi)
elif {'dni', 'ghi'} <= icolumns and 'dhi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'dhi'] = (
self.weather.ghi - self.weather.dni *
tools.cosd(self.solar_position.zenith))
return self | ['def', 'complete_irradiance', '(', 'self', ',', 'times', '=', 'None', ',', 'weather', '=', 'None', ')', ':', 'if', 'weather', 'is', 'not', 'None', ':', 'self', '.', 'weather', '=', 'weather', 'if', 'times', 'is', 'not', 'None', ':', 'self', '.', 'times', '=', 'times', 'self', '.', 'solar_position', '=', 'self', '.', 'location', '.', 'get_solarposition', '(', 'self', '.', 'times', ',', 'method', '=', 'self', '.', 'solar_position_method', ')', 'icolumns', '=', 'set', '(', 'self', '.', 'weather', '.', 'columns', ')', 'wrn_txt', '=', '(', '"This function is not safe at the moment.\\n"', '+', '"Results can be too high or negative.\\n"', '+', '"Help to improve this function on github:\\n"', '+', '"https://github.com/pvlib/pvlib-python \\n"', ')', 'if', '{', "'ghi'", ',', "'dhi'", '}', '<=', 'icolumns', 'and', "'dni'", 'not', 'in', 'icolumns', ':', 'clearsky', '=', 'self', '.', 'location', '.', 'get_clearsky', '(', 'times', ',', 'solar_position', '=', 'self', '.', 'solar_position', ')', 'self', '.', 'weather', '.', 'loc', '[', ':', ',', "'dni'", ']', '=', 'pvlib', '.', 'irradiance', '.', 'dni', '(', 'self', '.', 'weather', '.', 'loc', '[', ':', ',', "'ghi'", ']', ',', 'self', '.', 'weather', '.', 'loc', '[', ':', ',', "'dhi'", ']', ',', 'self', '.', 'solar_position', '.', 'zenith', ',', 'clearsky_dni', '=', 'clearsky', '[', "'dni'", ']', ',', 'clearsky_tolerance', '=', '1.1', ')', 'elif', '{', "'dni'", ',', "'dhi'", '}', '<=', 'icolumns', 'and', "'ghi'", 'not', 'in', 'icolumns', ':', 'warnings', '.', 'warn', '(', 'wrn_txt', ',', 'UserWarning', ')', 'self', '.', 'weather', '.', 'loc', '[', ':', ',', "'ghi'", ']', '=', '(', 'self', '.', 'weather', '.', 'dni', '*', 'tools', '.', 'cosd', '(', 'self', '.', 'solar_position', '.', 'zenith', ')', '+', 'self', '.', 'weather', '.', 'dhi', ')', 'elif', '{', "'dni'", ',', "'ghi'", '}', '<=', 'icolumns', 'and', "'dhi'", 'not', 'in', 'icolumns', ':', 'warnings', '.', 'warn', '(', 'wrn_txt', ',', 'UserWarning', ')', 'self', '.', 'weather', '.', 'loc', '[', ':', ',', "'dhi'", ']', '=', '(', 'self', '.', 'weather', '.', 'ghi', '-', 'self', '.', 'weather', '.', 'dni', '*', 'tools', '.', 'cosd', '(', 'self', '.', 'solar_position', '.', 'zenith', ')', ')', 'return', 'self'] | Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP | ['Determine', 'the', 'missing', 'irradiation', 'columns', '.', 'Only', 'two', 'of', 'the', 'following', 'data', 'columns', '(', 'dni', 'ghi', 'dhi', ')', 'are', 'needed', 'to', 'calculate', 'the', 'missing', 'data', '.'] | train | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/modelchain.py#L714-L788 |
6,815 | ewels/MultiQC | multiqc/modules/tophat/tophat.py | MultiqcModule.tophat_alignment_plot | def tophat_alignment_plot (self):
""" Make the HighCharts HTML to plot the alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['aligned_not_multimapped_discordant'] = { 'color': '#437bb1', 'name': 'Aligned' }
keys['aligned_multimap'] = { 'color': '#f7a35c', 'name': 'Multimapped' }
keys['aligned_discordant'] = { 'color': '#e63491', 'name': 'Discordant mappings' }
keys['unaligned_total'] = { 'color': '#7f0000', 'name': 'Not aligned' }
# Config for the plot
config = {
'id': 'tophat_alignment',
'title': 'Tophat: Alignment Scores',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
self.add_section( plot = bargraph.plot(self.tophat_data, keys, config) ) | python | def tophat_alignment_plot (self):
""" Make the HighCharts HTML to plot the alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['aligned_not_multimapped_discordant'] = { 'color': '#437bb1', 'name': 'Aligned' }
keys['aligned_multimap'] = { 'color': '#f7a35c', 'name': 'Multimapped' }
keys['aligned_discordant'] = { 'color': '#e63491', 'name': 'Discordant mappings' }
keys['unaligned_total'] = { 'color': '#7f0000', 'name': 'Not aligned' }
# Config for the plot
config = {
'id': 'tophat_alignment',
'title': 'Tophat: Alignment Scores',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
self.add_section( plot = bargraph.plot(self.tophat_data, keys, config) ) | ['def', 'tophat_alignment_plot', '(', 'self', ')', ':', '# Specify the order of the different possible categories', 'keys', '=', 'OrderedDict', '(', ')', 'keys', '[', "'aligned_not_multimapped_discordant'", ']', '=', '{', "'color'", ':', "'#437bb1'", ',', "'name'", ':', "'Aligned'", '}', 'keys', '[', "'aligned_multimap'", ']', '=', '{', "'color'", ':', "'#f7a35c'", ',', "'name'", ':', "'Multimapped'", '}', 'keys', '[', "'aligned_discordant'", ']', '=', '{', "'color'", ':', "'#e63491'", ',', "'name'", ':', "'Discordant mappings'", '}', 'keys', '[', "'unaligned_total'", ']', '=', '{', "'color'", ':', "'#7f0000'", ',', "'name'", ':', "'Not aligned'", '}', '# Config for the plot', 'config', '=', '{', "'id'", ':', "'tophat_alignment'", ',', "'title'", ':', "'Tophat: Alignment Scores'", ',', "'ylab'", ':', "'# Reads'", ',', "'cpswitch_counts_label'", ':', "'Number of Reads'", '}', 'self', '.', 'add_section', '(', 'plot', '=', 'bargraph', '.', 'plot', '(', 'self', '.', 'tophat_data', ',', 'keys', ',', 'config', ')', ')'] | Make the HighCharts HTML to plot the alignment rates | ['Make', 'the', 'HighCharts', 'HTML', 'to', 'plot', 'the', 'alignment', 'rates'] | train | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/tophat/tophat.py#L122-L140 |
6,816 | ranaroussi/ezibpy | ezibpy/ezibpy.py | ezIBpy.getConId | def getConId(self, contract_identifier):
""" Get contracts conId """
details = self.contractDetails(contract_identifier)
if len(details["contracts"]) > 1:
return details["m_underConId"]
return details["m_summary"]["m_conId"] | python | def getConId(self, contract_identifier):
""" Get contracts conId """
details = self.contractDetails(contract_identifier)
if len(details["contracts"]) > 1:
return details["m_underConId"]
return details["m_summary"]["m_conId"] | ['def', 'getConId', '(', 'self', ',', 'contract_identifier', ')', ':', 'details', '=', 'self', '.', 'contractDetails', '(', 'contract_identifier', ')', 'if', 'len', '(', 'details', '[', '"contracts"', ']', ')', '>', '1', ':', 'return', 'details', '[', '"m_underConId"', ']', 'return', 'details', '[', '"m_summary"', ']', '[', '"m_conId"', ']'] | Get contracts conId | ['Get', 'contracts', 'conId'] | train | https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1974-L1979 |
6,817 | ciena/afkak | afkak/producer.py | Producer.send_messages | def send_messages(self, topic, key=None, msgs=()):
"""
Given a topic, and optional key (for partitioning) and a list of
messages, send them to Kafka, either immediately, or when a batch is
ready, depending on the Producer's batch settings.
:param str topic: Kafka topic to send the messages to
:param str key:
Message key used to determine the topic partition to which the
messages will be written. Either `bytes` or `None`.
`None` means that there is no key, but note that that:
- Kafka does not permit producing unkeyed messages to a compacted topic.
- The *partitioner_class* may require a non-`None` key
(`HashedPartitioner` does so).
:param list msgs:
A non-empty sequence of message bytestrings to send. `None`
indicates a ``null`` message (i.e. a tombstone on a compacted
topic).
:returns:
A :class:`~twisted.internet.defer.Deferred` that fires when the
messages have been received by the Kafka cluster.
It will fail with `TypeError` when:
- *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2)
- *key* is not `bytes` or `None`
- *msgs* is not a sequence of `bytes` or `None`
It will fail with `ValueError` when *msgs* is empty.
"""
try:
topic = _coerce_topic(topic)
if key is not None and not isinstance(key, bytes):
raise TypeError('key={!r} must be bytes or None'.format(key))
if not msgs:
raise ValueError("msgs must be a non-empty sequence")
msg_cnt = len(msgs)
byte_cnt = 0
for index, m in enumerate(msgs):
if m is None:
continue
if not isinstance(m, bytes):
raise TypeError('Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'.format(
index, topic, m, type(m).__name__, type(bytes).__name__))
byte_cnt += len(m)
except Exception:
return fail()
d = Deferred(self._cancel_send_messages)
self._batch_reqs.append(SendRequest(topic, key, msgs, d))
self._waitingMsgCount += msg_cnt
self._waitingByteCount += byte_cnt
# Add request to list of outstanding reqs' callback to remove
self._outstanding.append(d)
d.addBoth(self._remove_from_outstanding, d)
# See if we have enough messages in the batch to do a send.
self._check_send_batch()
return d | python | def send_messages(self, topic, key=None, msgs=()):
"""
Given a topic, and optional key (for partitioning) and a list of
messages, send them to Kafka, either immediately, or when a batch is
ready, depending on the Producer's batch settings.
:param str topic: Kafka topic to send the messages to
:param str key:
Message key used to determine the topic partition to which the
messages will be written. Either `bytes` or `None`.
`None` means that there is no key, but note that that:
- Kafka does not permit producing unkeyed messages to a compacted topic.
- The *partitioner_class* may require a non-`None` key
(`HashedPartitioner` does so).
:param list msgs:
A non-empty sequence of message bytestrings to send. `None`
indicates a ``null`` message (i.e. a tombstone on a compacted
topic).
:returns:
A :class:`~twisted.internet.defer.Deferred` that fires when the
messages have been received by the Kafka cluster.
It will fail with `TypeError` when:
- *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2)
- *key* is not `bytes` or `None`
- *msgs* is not a sequence of `bytes` or `None`
It will fail with `ValueError` when *msgs* is empty.
"""
try:
topic = _coerce_topic(topic)
if key is not None and not isinstance(key, bytes):
raise TypeError('key={!r} must be bytes or None'.format(key))
if not msgs:
raise ValueError("msgs must be a non-empty sequence")
msg_cnt = len(msgs)
byte_cnt = 0
for index, m in enumerate(msgs):
if m is None:
continue
if not isinstance(m, bytes):
raise TypeError('Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'.format(
index, topic, m, type(m).__name__, type(bytes).__name__))
byte_cnt += len(m)
except Exception:
return fail()
d = Deferred(self._cancel_send_messages)
self._batch_reqs.append(SendRequest(topic, key, msgs, d))
self._waitingMsgCount += msg_cnt
self._waitingByteCount += byte_cnt
# Add request to list of outstanding reqs' callback to remove
self._outstanding.append(d)
d.addBoth(self._remove_from_outstanding, d)
# See if we have enough messages in the batch to do a send.
self._check_send_batch()
return d | ['def', 'send_messages', '(', 'self', ',', 'topic', ',', 'key', '=', 'None', ',', 'msgs', '=', '(', ')', ')', ':', 'try', ':', 'topic', '=', '_coerce_topic', '(', 'topic', ')', 'if', 'key', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'key', ',', 'bytes', ')', ':', 'raise', 'TypeError', '(', "'key={!r} must be bytes or None'", '.', 'format', '(', 'key', ')', ')', 'if', 'not', 'msgs', ':', 'raise', 'ValueError', '(', '"msgs must be a non-empty sequence"', ')', 'msg_cnt', '=', 'len', '(', 'msgs', ')', 'byte_cnt', '=', '0', 'for', 'index', ',', 'm', 'in', 'enumerate', '(', 'msgs', ')', ':', 'if', 'm', 'is', 'None', ':', 'continue', 'if', 'not', 'isinstance', '(', 'm', ',', 'bytes', ')', ':', 'raise', 'TypeError', '(', "'Message {} to topic {} ({!r:.100}) has type {}, but must have type {}'", '.', 'format', '(', 'index', ',', 'topic', ',', 'm', ',', 'type', '(', 'm', ')', '.', '__name__', ',', 'type', '(', 'bytes', ')', '.', '__name__', ')', ')', 'byte_cnt', '+=', 'len', '(', 'm', ')', 'except', 'Exception', ':', 'return', 'fail', '(', ')', 'd', '=', 'Deferred', '(', 'self', '.', '_cancel_send_messages', ')', 'self', '.', '_batch_reqs', '.', 'append', '(', 'SendRequest', '(', 'topic', ',', 'key', ',', 'msgs', ',', 'd', ')', ')', 'self', '.', '_waitingMsgCount', '+=', 'msg_cnt', 'self', '.', '_waitingByteCount', '+=', 'byte_cnt', "# Add request to list of outstanding reqs' callback to remove", 'self', '.', '_outstanding', '.', 'append', '(', 'd', ')', 'd', '.', 'addBoth', '(', 'self', '.', '_remove_from_outstanding', ',', 'd', ')', '# See if we have enough messages in the batch to do a send.', 'self', '.', '_check_send_batch', '(', ')', 'return', 'd'] | Given a topic, and optional key (for partitioning) and a list of
messages, send them to Kafka, either immediately, or when a batch is
ready, depending on the Producer's batch settings.
:param str topic: Kafka topic to send the messages to
:param str key:
Message key used to determine the topic partition to which the
messages will be written. Either `bytes` or `None`.
`None` means that there is no key, but note that that:
- Kafka does not permit producing unkeyed messages to a compacted topic.
- The *partitioner_class* may require a non-`None` key
(`HashedPartitioner` does so).
:param list msgs:
A non-empty sequence of message bytestrings to send. `None`
indicates a ``null`` message (i.e. a tombstone on a compacted
topic).
:returns:
A :class:`~twisted.internet.defer.Deferred` that fires when the
messages have been received by the Kafka cluster.
It will fail with `TypeError` when:
- *topic* is not text (`str` on Python 3, `str` or `unicode` on Python 2)
- *key* is not `bytes` or `None`
- *msgs* is not a sequence of `bytes` or `None`
It will fail with `ValueError` when *msgs* is empty. | ['Given', 'a', 'topic', 'and', 'optional', 'key', '(', 'for', 'partitioning', ')', 'and', 'a', 'list', 'of', 'messages', 'send', 'them', 'to', 'Kafka', 'either', 'immediately', 'or', 'when', 'a', 'batch', 'is', 'ready', 'depending', 'on', 'the', 'Producer', 's', 'batch', 'settings', '.'] | train | https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L166-L233 |
6,818 | caseyjlaw/rtpipe | rtpipe/calpipe.py | pipe.genms | def genms(self, scans=[]):
""" Generate an MS that contains all calibrator scans with 1 s integration time.
"""
if len(scans):
scanstr = string.join([str(ss) for ss in sorted(scans)], ',')
else:
scanstr = self.allstr
print 'Splitting out all cal scans (%s) with 1s int time' % scanstr
newname = ps.sdm2ms(self.sdmfile, self.sdmfile.rstrip('/')+'.ms', scanstr, inttime='1') # integrate down to 1s during split
return newname | python | def genms(self, scans=[]):
""" Generate an MS that contains all calibrator scans with 1 s integration time.
"""
if len(scans):
scanstr = string.join([str(ss) for ss in sorted(scans)], ',')
else:
scanstr = self.allstr
print 'Splitting out all cal scans (%s) with 1s int time' % scanstr
newname = ps.sdm2ms(self.sdmfile, self.sdmfile.rstrip('/')+'.ms', scanstr, inttime='1') # integrate down to 1s during split
return newname | ['def', 'genms', '(', 'self', ',', 'scans', '=', '[', ']', ')', ':', 'if', 'len', '(', 'scans', ')', ':', 'scanstr', '=', 'string', '.', 'join', '(', '[', 'str', '(', 'ss', ')', 'for', 'ss', 'in', 'sorted', '(', 'scans', ')', ']', ',', "','", ')', 'else', ':', 'scanstr', '=', 'self', '.', 'allstr', 'print', "'Splitting out all cal scans (%s) with 1s int time'", '%', 'scanstr', 'newname', '=', 'ps', '.', 'sdm2ms', '(', 'self', '.', 'sdmfile', ',', 'self', '.', 'sdmfile', '.', 'rstrip', '(', "'/'", ')', '+', "'.ms'", ',', 'scanstr', ',', 'inttime', '=', "'1'", ')', '# integrate down to 1s during split', 'return', 'newname'] | Generate an MS that contains all calibrator scans with 1 s integration time. | ['Generate', 'an', 'MS', 'that', 'contains', 'all', 'calibrator', 'scans', 'with', '1', 's', 'integration', 'time', '.'] | train | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/calpipe.py#L64-L76 |
6,819 | jingw/pyhdfs | pyhdfs.py | HdfsClient.rename | def rename(self, path, destination, **kwargs):
"""Renames Path src to Path dst.
:returns: true if rename is successful
:rtype: bool
"""
return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean'] | python | def rename(self, path, destination, **kwargs):
"""Renames Path src to Path dst.
:returns: true if rename is successful
:rtype: bool
"""
return _json(self._put(path, 'RENAME', destination=destination, **kwargs))['boolean'] | ['def', 'rename', '(', 'self', ',', 'path', ',', 'destination', ',', '*', '*', 'kwargs', ')', ':', 'return', '_json', '(', 'self', '.', '_put', '(', 'path', ',', "'RENAME'", ',', 'destination', '=', 'destination', ',', '*', '*', 'kwargs', ')', ')', '[', "'boolean'", ']'] | Renames Path src to Path dst.
:returns: true if rename is successful
:rtype: bool | ['Renames', 'Path', 'src', 'to', 'Path', 'dst', '.'] | train | https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L509-L515 |
6,820 | UCL-INGI/INGInious | inginious/frontend/submission_manager.py | WebAppSubmissionManager.get_submission | def get_submission(self, submissionid, user_check=True):
""" Get a submission from the database """
sub = self._database.submissions.find_one({'_id': ObjectId(submissionid)})
if user_check and not self.user_is_submission_owner(sub):
return None
return sub | python | def get_submission(self, submissionid, user_check=True):
""" Get a submission from the database """
sub = self._database.submissions.find_one({'_id': ObjectId(submissionid)})
if user_check and not self.user_is_submission_owner(sub):
return None
return sub | ['def', 'get_submission', '(', 'self', ',', 'submissionid', ',', 'user_check', '=', 'True', ')', ':', 'sub', '=', 'self', '.', '_database', '.', 'submissions', '.', 'find_one', '(', '{', "'_id'", ':', 'ObjectId', '(', 'submissionid', ')', '}', ')', 'if', 'user_check', 'and', 'not', 'self', '.', 'user_is_submission_owner', '(', 'sub', ')', ':', 'return', 'None', 'return', 'sub'] | Get a submission from the database | ['Get', 'a', 'submission', 'from', 'the', 'database'] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/submission_manager.py#L208-L213 |
6,821 | mitsei/dlkit | dlkit/json_/grading/objects.py | Grade.get_grade_system_id | def get_grade_system_id(self):
"""Gets the ``GradeSystem Id`` in which this grade belongs.
return: (osid.id.Id) - the grade system ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
if not bool(self._my_map['gradeSystemId']):
raise errors.IllegalState('grade_system empty')
return Id(self._my_map['gradeSystemId']) | python | def get_grade_system_id(self):
"""Gets the ``GradeSystem Id`` in which this grade belongs.
return: (osid.id.Id) - the grade system ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
if not bool(self._my_map['gradeSystemId']):
raise errors.IllegalState('grade_system empty')
return Id(self._my_map['gradeSystemId']) | ['def', 'get_grade_system_id', '(', 'self', ')', ':', '# Implemented from template for osid.learning.Activity.get_objective_id', 'if', 'not', 'bool', '(', 'self', '.', '_my_map', '[', "'gradeSystemId'", ']', ')', ':', 'raise', 'errors', '.', 'IllegalState', '(', "'grade_system empty'", ')', 'return', 'Id', '(', 'self', '.', '_my_map', '[', "'gradeSystemId'", ']', ')'] | Gets the ``GradeSystem Id`` in which this grade belongs.
return: (osid.id.Id) - the grade system ``Id``
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'GradeSystem', 'Id', 'in', 'which', 'this', 'grade', 'belongs', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L51-L61 |
6,822 | angr/angr | angr/analyses/decompiler/structurer.py | Structurer._extract_jump_targets | def _extract_jump_targets(stmt):
"""
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
"""
targets = [ ]
# FIXME: We are assuming all jump targets are concrete targets. They may not be.
if isinstance(stmt, ailment.Stmt.Jump):
targets.append(stmt.target.value)
elif isinstance(stmt, ailment.Stmt.ConditionalJump):
targets.append(stmt.true_target.value)
targets.append(stmt.false_target.value)
return targets | python | def _extract_jump_targets(stmt):
"""
Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list
"""
targets = [ ]
# FIXME: We are assuming all jump targets are concrete targets. They may not be.
if isinstance(stmt, ailment.Stmt.Jump):
targets.append(stmt.target.value)
elif isinstance(stmt, ailment.Stmt.ConditionalJump):
targets.append(stmt.true_target.value)
targets.append(stmt.false_target.value)
return targets | ['def', '_extract_jump_targets', '(', 'stmt', ')', ':', 'targets', '=', '[', ']', '# FIXME: We are assuming all jump targets are concrete targets. They may not be.', 'if', 'isinstance', '(', 'stmt', ',', 'ailment', '.', 'Stmt', '.', 'Jump', ')', ':', 'targets', '.', 'append', '(', 'stmt', '.', 'target', '.', 'value', ')', 'elif', 'isinstance', '(', 'stmt', ',', 'ailment', '.', 'Stmt', '.', 'ConditionalJump', ')', ':', 'targets', '.', 'append', '(', 'stmt', '.', 'true_target', '.', 'value', ')', 'targets', '.', 'append', '(', 'stmt', '.', 'false_target', '.', 'value', ')', 'return', 'targets'] | Extract goto targets from a Jump or a ConditionalJump statement.
:param stmt: The statement to analyze.
:return: A list of known concrete jump targets.
:rtype: list | ['Extract', 'goto', 'targets', 'from', 'a', 'Jump', 'or', 'a', 'ConditionalJump', 'statement', '.'] | train | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/decompiler/structurer.py#L838-L857 |
6,823 | nkavaldj/myhdl_lib | myhdl_lib/mem.py | convert_ram_sp_rf | def convert_ram_sp_rf(ADDR_WIDTH=8, DATA_WIDTH=8):
''' Convert RAM: Single-Port, Read-First '''
clk = Signal(bool(0))
we = Signal(bool(0))
addr = Signal(intbv(0)[ADDR_WIDTH:])
di = Signal(intbv(0)[DATA_WIDTH:])
do = Signal(intbv(0)[DATA_WIDTH:])
toVerilog(ram_sp_rf, clk, we, addr, di, do) | python | def convert_ram_sp_rf(ADDR_WIDTH=8, DATA_WIDTH=8):
''' Convert RAM: Single-Port, Read-First '''
clk = Signal(bool(0))
we = Signal(bool(0))
addr = Signal(intbv(0)[ADDR_WIDTH:])
di = Signal(intbv(0)[DATA_WIDTH:])
do = Signal(intbv(0)[DATA_WIDTH:])
toVerilog(ram_sp_rf, clk, we, addr, di, do) | ['def', 'convert_ram_sp_rf', '(', 'ADDR_WIDTH', '=', '8', ',', 'DATA_WIDTH', '=', '8', ')', ':', 'clk', '=', 'Signal', '(', 'bool', '(', '0', ')', ')', 'we', '=', 'Signal', '(', 'bool', '(', '0', ')', ')', 'addr', '=', 'Signal', '(', 'intbv', '(', '0', ')', '[', 'ADDR_WIDTH', ':', ']', ')', 'di', '=', 'Signal', '(', 'intbv', '(', '0', ')', '[', 'DATA_WIDTH', ':', ']', ')', 'do', '=', 'Signal', '(', 'intbv', '(', '0', ')', '[', 'DATA_WIDTH', ':', ']', ')', 'toVerilog', '(', 'ram_sp_rf', ',', 'clk', ',', 'we', ',', 'addr', ',', 'di', ',', 'do', ')'] | Convert RAM: Single-Port, Read-First | ['Convert', 'RAM', ':', 'Single', '-', 'Port', 'Read', '-', 'First'] | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mem.py#L193-L200 |
6,824 | andrewgross/pyrelic | pyrelic/client.py | Client.get_metric_data | def get_metric_data(self, applications, metrics, field, begin, end, summary=False):
"""
Requires: account ID,
list of application IDs,
list of metrics,
metric fields,
begin,
end
Method: Get
Endpoint: api.newrelic.com
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of metric objects, each will have information about its
start/end time, application, metric name and any associated
values
"""
# TODO: it may be nice to have some helper methods that make it easier
# to query by common time frames based off the time period folding
# of the metrics returned by the New Relic API.
# Make sure we aren't going to hit an API timeout
self._api_rate_limit_exceeded(self.get_metric_data)
# Just in case the API needs parameters to be in order
parameters = {}
# Figure out what we were passed and set our parameter correctly
# TODO: allow querying by something other than an application name/id,
# such as server id or agent id
try:
int(applications[0])
except ValueError:
app_string = "app"
else:
app_string = "app_id"
if len(applications) > 1:
app_string = app_string + "[]"
# Set our parameters
parameters[app_string] = applications
parameters['metrics[]'] = metrics
parameters['field'] = field
parameters['begin'] = begin
parameters['end'] = end
parameters['summary'] = int(summary)
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/metrics/data.xml"\
.format(endpoint=endpoint, account_id=self.account_id)
# A longer timeout is needed due to the
# amount of data that can be returned
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parsing our response into lightweight objects and creating a list.
# The dividing factor is the time period covered by the metric,
# there should be no overlaps in time.
metrics = []
for metric in response.findall('.//metric'):
metrics.append(Metric(metric))
return metrics | python | def get_metric_data(self, applications, metrics, field, begin, end, summary=False):
"""
Requires: account ID,
list of application IDs,
list of metrics,
metric fields,
begin,
end
Method: Get
Endpoint: api.newrelic.com
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of metric objects, each will have information about its
start/end time, application, metric name and any associated
values
"""
# TODO: it may be nice to have some helper methods that make it easier
# to query by common time frames based off the time period folding
# of the metrics returned by the New Relic API.
# Make sure we aren't going to hit an API timeout
self._api_rate_limit_exceeded(self.get_metric_data)
# Just in case the API needs parameters to be in order
parameters = {}
# Figure out what we were passed and set our parameter correctly
# TODO: allow querying by something other than an application name/id,
# such as server id or agent id
try:
int(applications[0])
except ValueError:
app_string = "app"
else:
app_string = "app_id"
if len(applications) > 1:
app_string = app_string + "[]"
# Set our parameters
parameters[app_string] = applications
parameters['metrics[]'] = metrics
parameters['field'] = field
parameters['begin'] = begin
parameters['end'] = end
parameters['summary'] = int(summary)
endpoint = "https://api.newrelic.com"
uri = "{endpoint}/api/v1/accounts/{account_id}/metrics/data.xml"\
.format(endpoint=endpoint, account_id=self.account_id)
# A longer timeout is needed due to the
# amount of data that can be returned
response = self._make_get_request(uri, parameters=parameters, timeout=max(self.timeout, 5.0))
# Parsing our response into lightweight objects and creating a list.
# The dividing factor is the time period covered by the metric,
# there should be no overlaps in time.
metrics = []
for metric in response.findall('.//metric'):
metrics.append(Metric(metric))
return metrics | ['def', 'get_metric_data', '(', 'self', ',', 'applications', ',', 'metrics', ',', 'field', ',', 'begin', ',', 'end', ',', 'summary', '=', 'False', ')', ':', '# TODO: it may be nice to have some helper methods that make it easier', '# to query by common time frames based off the time period folding', '# of the metrics returned by the New Relic API.', "# Make sure we aren't going to hit an API timeout", 'self', '.', '_api_rate_limit_exceeded', '(', 'self', '.', 'get_metric_data', ')', '# Just in case the API needs parameters to be in order', 'parameters', '=', '{', '}', '# Figure out what we were passed and set our parameter correctly', '# TODO: allow querying by something other than an application name/id,', '# such as server id or agent id', 'try', ':', 'int', '(', 'applications', '[', '0', ']', ')', 'except', 'ValueError', ':', 'app_string', '=', '"app"', 'else', ':', 'app_string', '=', '"app_id"', 'if', 'len', '(', 'applications', ')', '>', '1', ':', 'app_string', '=', 'app_string', '+', '"[]"', '# Set our parameters', 'parameters', '[', 'app_string', ']', '=', 'applications', 'parameters', '[', "'metrics[]'", ']', '=', 'metrics', 'parameters', '[', "'field'", ']', '=', 'field', 'parameters', '[', "'begin'", ']', '=', 'begin', 'parameters', '[', "'end'", ']', '=', 'end', 'parameters', '[', "'summary'", ']', '=', 'int', '(', 'summary', ')', 'endpoint', '=', '"https://api.newrelic.com"', 'uri', '=', '"{endpoint}/api/v1/accounts/{account_id}/metrics/data.xml"', '.', 'format', '(', 'endpoint', '=', 'endpoint', ',', 'account_id', '=', 'self', '.', 'account_id', ')', '# A longer timeout is needed due to the', '# amount of data that can be returned', 'response', '=', 'self', '.', '_make_get_request', '(', 'uri', ',', 'parameters', '=', 'parameters', ',', 'timeout', '=', 'max', '(', 'self', '.', 'timeout', ',', '5.0', ')', ')', '# Parsing our response into lightweight objects and creating a list.', '# The dividing factor is the time period covered by the metric,', '# there should be no overlaps in time.', 'metrics', '=', '[', ']', 'for', 'metric', 'in', 'response', '.', 'findall', '(', "'.//metric'", ')', ':', 'metrics', '.', 'append', '(', 'Metric', '(', 'metric', ')', ')', 'return', 'metrics'] | Requires: account ID,
list of application IDs,
list of metrics,
metric fields,
begin,
end
Method: Get
Endpoint: api.newrelic.com
Restrictions: Rate limit to 1x per minute
Errors: 403 Invalid API key, 422 Invalid Parameters
Returns: A list of metric objects, each will have information about its
start/end time, application, metric name and any associated
values | ['Requires', ':', 'account', 'ID', 'list', 'of', 'application', 'IDs', 'list', 'of', 'metrics', 'metric', 'fields', 'begin', 'end', 'Method', ':', 'Get', 'Endpoint', ':', 'api', '.', 'newrelic', '.', 'com', 'Restrictions', ':', 'Rate', 'limit', 'to', '1x', 'per', 'minute', 'Errors', ':', '403', 'Invalid', 'API', 'key', '422', 'Invalid', 'Parameters', 'Returns', ':', 'A', 'list', 'of', 'metric', 'objects', 'each', 'will', 'have', 'information', 'about', 'its', 'start', '/', 'end', 'time', 'application', 'metric', 'name', 'and', 'any', 'associated', 'values'] | train | https://github.com/andrewgross/pyrelic/blob/641abe7bfa56bf850281f2d9c90cebe7ea2dfd1e/pyrelic/client.py#L250-L310 |
6,825 | ClimateImpactLab/DataFS | datafs/core/data_api.py | DataAPI.get_archive | def get_archive(self, archive_name, default_version=None):
'''
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
'''
auth, archive_name = self._normalize_archive_name(archive_name)
res = self.manager.get_archive(archive_name)
if default_version is None:
default_version = self._default_versions.get(archive_name, None)
if (auth is not None) and (auth != res['authority_name']):
raise ValueError(
'Archive "{}" not found on {}.'.format(archive_name, auth) +
' Did you mean "{}://{}"?'.format(
res['authority_name'], archive_name))
return self._ArchiveConstructor(
api=self,
default_version=default_version,
**res) | python | def get_archive(self, archive_name, default_version=None):
'''
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
'''
auth, archive_name = self._normalize_archive_name(archive_name)
res = self.manager.get_archive(archive_name)
if default_version is None:
default_version = self._default_versions.get(archive_name, None)
if (auth is not None) and (auth != res['authority_name']):
raise ValueError(
'Archive "{}" not found on {}.'.format(archive_name, auth) +
' Did you mean "{}://{}"?'.format(
res['authority_name'], archive_name))
return self._ArchiveConstructor(
api=self,
default_version=default_version,
**res) | ['def', 'get_archive', '(', 'self', ',', 'archive_name', ',', 'default_version', '=', 'None', ')', ':', 'auth', ',', 'archive_name', '=', 'self', '.', '_normalize_archive_name', '(', 'archive_name', ')', 'res', '=', 'self', '.', 'manager', '.', 'get_archive', '(', 'archive_name', ')', 'if', 'default_version', 'is', 'None', ':', 'default_version', '=', 'self', '.', '_default_versions', '.', 'get', '(', 'archive_name', ',', 'None', ')', 'if', '(', 'auth', 'is', 'not', 'None', ')', 'and', '(', 'auth', '!=', 'res', '[', "'authority_name'", ']', ')', ':', 'raise', 'ValueError', '(', '\'Archive "{}" not found on {}.\'', '.', 'format', '(', 'archive_name', ',', 'auth', ')', '+', '\' Did you mean "{}://{}"?\'', '.', 'format', '(', 'res', '[', "'authority_name'", ']', ',', 'archive_name', ')', ')', 'return', 'self', '.', '_ArchiveConstructor', '(', 'api', '=', 'self', ',', 'default_version', '=', 'default_version', ',', '*', '*', 'res', ')'] | Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found | ['Retrieve', 'a', 'data', 'archive'] | train | https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L190-L233 |
6,826 | hobson/pug-invest | pug/invest/plot.py | thin_string_list | def thin_string_list(list_of_strings, max_nonempty_strings=50, blank=''):
"""Designed for composing lists of strings suitable for pyplot axis labels
Often the xtick spacing doesn't allow room for 100's of text labels, so this
eliminates every other one, then every other one of those, until they fit.
>>> thin_string_list(['x']*20, 5) # doctring: +NORMALIZE_WHITESPACE
['x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '']
"""
# blank some labels to make sure they don't overlap
list_of_strings = list(list_of_strings)
istep = 2
while sum(bool(s) for s in list_of_strings) > max_nonempty_strings:
list_of_strings = [blank if i % istep else s for i, s in enumerate(list_of_strings)]
istep += 2
return list_of_strings | python | def thin_string_list(list_of_strings, max_nonempty_strings=50, blank=''):
"""Designed for composing lists of strings suitable for pyplot axis labels
Often the xtick spacing doesn't allow room for 100's of text labels, so this
eliminates every other one, then every other one of those, until they fit.
>>> thin_string_list(['x']*20, 5) # doctring: +NORMALIZE_WHITESPACE
['x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '']
"""
# blank some labels to make sure they don't overlap
list_of_strings = list(list_of_strings)
istep = 2
while sum(bool(s) for s in list_of_strings) > max_nonempty_strings:
list_of_strings = [blank if i % istep else s for i, s in enumerate(list_of_strings)]
istep += 2
return list_of_strings | ['def', 'thin_string_list', '(', 'list_of_strings', ',', 'max_nonempty_strings', '=', '50', ',', 'blank', '=', "''", ')', ':', "# blank some labels to make sure they don't overlap", 'list_of_strings', '=', 'list', '(', 'list_of_strings', ')', 'istep', '=', '2', 'while', 'sum', '(', 'bool', '(', 's', ')', 'for', 's', 'in', 'list_of_strings', ')', '>', 'max_nonempty_strings', ':', 'list_of_strings', '=', '[', 'blank', 'if', 'i', '%', 'istep', 'else', 's', 'for', 'i', ',', 's', 'in', 'enumerate', '(', 'list_of_strings', ')', ']', 'istep', '+=', '2', 'return', 'list_of_strings'] | Designed for composing lists of strings suitable for pyplot axis labels
Often the xtick spacing doesn't allow room for 100's of text labels, so this
eliminates every other one, then every other one of those, until they fit.
>>> thin_string_list(['x']*20, 5) # doctring: +NORMALIZE_WHITESPACE
['x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', '', 'x', '', '', ''] | ['Designed', 'for', 'composing', 'lists', 'of', 'strings', 'suitable', 'for', 'pyplot', 'axis', 'labels'] | train | https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/plot.py#L259-L274 |
6,827 | RiotGames/cloud-inquisitor | backend/cloud_inquisitor/plugins/types/issues.py | BaseIssue.search | def search(cls, *, limit=100, page=1, properties=None, return_query=False):
"""Search for issues based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
on its own
Returns:
`list` of `Issue`, `sqlalchemy.orm.Query`
"""
qry = db.Issue.order_by(Issue.issue_id).filter(
Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id
)
if properties:
for prop_name, value in properties.items():
alias = aliased(IssueProperty)
qry = qry.join(alias, Issue.issue_id == alias.issue_id)
if type(value) == list:
where_clause = []
for item in value:
where_clause.append(alias.value == item)
qry = qry.filter(
and_(
alias.name == prop_name,
or_(*where_clause)
).self_group()
)
else:
qry = qry.filter(
and_(
alias.name == prop_name,
alias.value == value
).self_group()
)
if return_query:
return qry
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] | python | def search(cls, *, limit=100, page=1, properties=None, return_query=False):
"""Search for issues based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
on its own
Returns:
`list` of `Issue`, `sqlalchemy.orm.Query`
"""
qry = db.Issue.order_by(Issue.issue_id).filter(
Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id
)
if properties:
for prop_name, value in properties.items():
alias = aliased(IssueProperty)
qry = qry.join(alias, Issue.issue_id == alias.issue_id)
if type(value) == list:
where_clause = []
for item in value:
where_clause.append(alias.value == item)
qry = qry.filter(
and_(
alias.name == prop_name,
or_(*where_clause)
).self_group()
)
else:
qry = qry.filter(
and_(
alias.name == prop_name,
alias.value == value
).self_group()
)
if return_query:
return qry
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] | ['def', 'search', '(', 'cls', ',', '*', ',', 'limit', '=', '100', ',', 'page', '=', '1', ',', 'properties', '=', 'None', ',', 'return_query', '=', 'False', ')', ':', 'qry', '=', 'db', '.', 'Issue', '.', 'order_by', '(', 'Issue', '.', 'issue_id', ')', '.', 'filter', '(', 'Issue', '.', 'issue_type_id', '==', 'IssueType', '.', 'get', '(', 'cls', '.', 'issue_type', ')', '.', 'issue_type_id', ')', 'if', 'properties', ':', 'for', 'prop_name', ',', 'value', 'in', 'properties', '.', 'items', '(', ')', ':', 'alias', '=', 'aliased', '(', 'IssueProperty', ')', 'qry', '=', 'qry', '.', 'join', '(', 'alias', ',', 'Issue', '.', 'issue_id', '==', 'alias', '.', 'issue_id', ')', 'if', 'type', '(', 'value', ')', '==', 'list', ':', 'where_clause', '=', '[', ']', 'for', 'item', 'in', 'value', ':', 'where_clause', '.', 'append', '(', 'alias', '.', 'value', '==', 'item', ')', 'qry', '=', 'qry', '.', 'filter', '(', 'and_', '(', 'alias', '.', 'name', '==', 'prop_name', ',', 'or_', '(', '*', 'where_clause', ')', ')', '.', 'self_group', '(', ')', ')', 'else', ':', 'qry', '=', 'qry', '.', 'filter', '(', 'and_', '(', 'alias', '.', 'name', '==', 'prop_name', ',', 'alias', '.', 'value', '==', 'value', ')', '.', 'self_group', '(', ')', ')', 'if', 'return_query', ':', 'return', 'qry', 'total', '=', 'qry', '.', 'count', '(', ')', 'qry', '=', 'qry', '.', 'limit', '(', 'limit', ')', 'qry', '=', 'qry', '.', 'offset', '(', '(', 'page', '-', '1', ')', '*', 'limit', 'if', 'page', '>', '1', 'else', '0', ')', 'return', 'total', ',', '[', 'cls', '(', 'x', ')', 'for', 'x', 'in', 'qry', '.', 'all', '(', ')', ']'] | Search for issues based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
on its own
Returns:
`list` of `Issue`, `sqlalchemy.orm.Query` | ['Search', 'for', 'issues', 'based', 'on', 'the', 'provided', 'filters'] | train | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/types/issues.py#L182-L231 |
6,828 | openpaperwork/paperwork-backend | paperwork_backend/img/page.py | ImgPage.__get_boxes | def __get_boxes(self):
"""
Get all the word boxes of this page.
"""
boxfile = self.__box_path
try:
box_builder = pyocr.builders.LineBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if boxes != []:
return boxes
# fallback: old format: word boxes
# shouldn't be used anymore ...
box_builder = pyocr.builders.WordBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if len(boxes) <= 0:
return []
logger.warning("WARNING: Doc %s uses old box format" %
(str(self.doc)))
return [pyocr.builders.LineBox(boxes, boxes[0].position)]
except IOError as exc:
logger.error("Unable to get boxes for '%s': %s"
% (self.doc.docid, exc))
return [] | python | def __get_boxes(self):
"""
Get all the word boxes of this page.
"""
boxfile = self.__box_path
try:
box_builder = pyocr.builders.LineBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if boxes != []:
return boxes
# fallback: old format: word boxes
# shouldn't be used anymore ...
box_builder = pyocr.builders.WordBoxBuilder()
with self.fs.open(boxfile, 'r') as file_desc:
boxes = box_builder.read_file(file_desc)
if len(boxes) <= 0:
return []
logger.warning("WARNING: Doc %s uses old box format" %
(str(self.doc)))
return [pyocr.builders.LineBox(boxes, boxes[0].position)]
except IOError as exc:
logger.error("Unable to get boxes for '%s': %s"
% (self.doc.docid, exc))
return [] | ['def', '__get_boxes', '(', 'self', ')', ':', 'boxfile', '=', 'self', '.', '__box_path', 'try', ':', 'box_builder', '=', 'pyocr', '.', 'builders', '.', 'LineBoxBuilder', '(', ')', 'with', 'self', '.', 'fs', '.', 'open', '(', 'boxfile', ',', "'r'", ')', 'as', 'file_desc', ':', 'boxes', '=', 'box_builder', '.', 'read_file', '(', 'file_desc', ')', 'if', 'boxes', '!=', '[', ']', ':', 'return', 'boxes', '# fallback: old format: word boxes', "# shouldn't be used anymore ...", 'box_builder', '=', 'pyocr', '.', 'builders', '.', 'WordBoxBuilder', '(', ')', 'with', 'self', '.', 'fs', '.', 'open', '(', 'boxfile', ',', "'r'", ')', 'as', 'file_desc', ':', 'boxes', '=', 'box_builder', '.', 'read_file', '(', 'file_desc', ')', 'if', 'len', '(', 'boxes', ')', '<=', '0', ':', 'return', '[', ']', 'logger', '.', 'warning', '(', '"WARNING: Doc %s uses old box format"', '%', '(', 'str', '(', 'self', '.', 'doc', ')', ')', ')', 'return', '[', 'pyocr', '.', 'builders', '.', 'LineBox', '(', 'boxes', ',', 'boxes', '[', '0', ']', '.', 'position', ')', ']', 'except', 'IOError', 'as', 'exc', ':', 'logger', '.', 'error', '(', '"Unable to get boxes for \'%s\': %s"', '%', '(', 'self', '.', 'doc', '.', 'docid', ',', 'exc', ')', ')', 'return', '[', ']'] | Get all the word boxes of this page. | ['Get', 'all', 'the', 'word', 'boxes', 'of', 'this', 'page', '.'] | train | https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/img/page.py#L95-L120 |
6,829 | saltstack/salt | salt/modules/aptpkg.py | get_repo_keys | def get_repo_keys():
'''
.. versionadded:: 2017.7.0
List known repo key details.
:return: A dictionary containing the repo keys.
:rtype: dict
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo_keys
'''
ret = dict()
repo_keys = list()
# The double usage of '--with-fingerprint' is necessary in order to
# retrieve the fingerprint of the subkey.
cmd = ['apt-key', 'adv', '--batch', '--list-public-keys', '--with-fingerprint',
'--with-fingerprint', '--with-colons', '--fixed-list-mode']
cmd_ret = _call_apt(cmd, scope=False)
if cmd_ret['retcode'] != 0:
log.error(cmd_ret['stderr'])
return ret
lines = [line for line in cmd_ret['stdout'].splitlines() if line.strip()]
# Reference for the meaning of each item in the colon-separated
# record can be found here: https://goo.gl/KIZbvp
for line in lines:
items = [_convert_if_int(item.strip()) if item.strip() else None for item in line.split(':')]
key_props = dict()
if len(items) < 2:
log.debug('Skipping line: %s', line)
continue
if items[0] in ('pub', 'sub'):
key_props.update({
'algorithm': items[3],
'bits': items[2],
'capability': items[11],
'date_creation': items[5],
'date_expiration': items[6],
'keyid': items[4],
'validity': items[1]
})
if items[0] == 'pub':
repo_keys.append(key_props)
else:
repo_keys[-1]['subkey'] = key_props
elif items[0] == 'fpr':
if repo_keys[-1].get('subkey', False):
repo_keys[-1]['subkey'].update({'fingerprint': items[9]})
else:
repo_keys[-1].update({'fingerprint': items[9]})
elif items[0] == 'uid':
repo_keys[-1].update({
'uid': items[9],
'uid_hash': items[7]
})
for repo_key in repo_keys:
ret[repo_key['keyid']] = repo_key
return ret | python | def get_repo_keys():
'''
.. versionadded:: 2017.7.0
List known repo key details.
:return: A dictionary containing the repo keys.
:rtype: dict
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo_keys
'''
ret = dict()
repo_keys = list()
# The double usage of '--with-fingerprint' is necessary in order to
# retrieve the fingerprint of the subkey.
cmd = ['apt-key', 'adv', '--batch', '--list-public-keys', '--with-fingerprint',
'--with-fingerprint', '--with-colons', '--fixed-list-mode']
cmd_ret = _call_apt(cmd, scope=False)
if cmd_ret['retcode'] != 0:
log.error(cmd_ret['stderr'])
return ret
lines = [line for line in cmd_ret['stdout'].splitlines() if line.strip()]
# Reference for the meaning of each item in the colon-separated
# record can be found here: https://goo.gl/KIZbvp
for line in lines:
items = [_convert_if_int(item.strip()) if item.strip() else None for item in line.split(':')]
key_props = dict()
if len(items) < 2:
log.debug('Skipping line: %s', line)
continue
if items[0] in ('pub', 'sub'):
key_props.update({
'algorithm': items[3],
'bits': items[2],
'capability': items[11],
'date_creation': items[5],
'date_expiration': items[6],
'keyid': items[4],
'validity': items[1]
})
if items[0] == 'pub':
repo_keys.append(key_props)
else:
repo_keys[-1]['subkey'] = key_props
elif items[0] == 'fpr':
if repo_keys[-1].get('subkey', False):
repo_keys[-1]['subkey'].update({'fingerprint': items[9]})
else:
repo_keys[-1].update({'fingerprint': items[9]})
elif items[0] == 'uid':
repo_keys[-1].update({
'uid': items[9],
'uid_hash': items[7]
})
for repo_key in repo_keys:
ret[repo_key['keyid']] = repo_key
return ret | ['def', 'get_repo_keys', '(', ')', ':', 'ret', '=', 'dict', '(', ')', 'repo_keys', '=', 'list', '(', ')', "# The double usage of '--with-fingerprint' is necessary in order to", '# retrieve the fingerprint of the subkey.', 'cmd', '=', '[', "'apt-key'", ',', "'adv'", ',', "'--batch'", ',', "'--list-public-keys'", ',', "'--with-fingerprint'", ',', "'--with-fingerprint'", ',', "'--with-colons'", ',', "'--fixed-list-mode'", ']', 'cmd_ret', '=', '_call_apt', '(', 'cmd', ',', 'scope', '=', 'False', ')', 'if', 'cmd_ret', '[', "'retcode'", ']', '!=', '0', ':', 'log', '.', 'error', '(', 'cmd_ret', '[', "'stderr'", ']', ')', 'return', 'ret', 'lines', '=', '[', 'line', 'for', 'line', 'in', 'cmd_ret', '[', "'stdout'", ']', '.', 'splitlines', '(', ')', 'if', 'line', '.', 'strip', '(', ')', ']', '# Reference for the meaning of each item in the colon-separated', '# record can be found here: https://goo.gl/KIZbvp', 'for', 'line', 'in', 'lines', ':', 'items', '=', '[', '_convert_if_int', '(', 'item', '.', 'strip', '(', ')', ')', 'if', 'item', '.', 'strip', '(', ')', 'else', 'None', 'for', 'item', 'in', 'line', '.', 'split', '(', "':'", ')', ']', 'key_props', '=', 'dict', '(', ')', 'if', 'len', '(', 'items', ')', '<', '2', ':', 'log', '.', 'debug', '(', "'Skipping line: %s'", ',', 'line', ')', 'continue', 'if', 'items', '[', '0', ']', 'in', '(', "'pub'", ',', "'sub'", ')', ':', 'key_props', '.', 'update', '(', '{', "'algorithm'", ':', 'items', '[', '3', ']', ',', "'bits'", ':', 'items', '[', '2', ']', ',', "'capability'", ':', 'items', '[', '11', ']', ',', "'date_creation'", ':', 'items', '[', '5', ']', ',', "'date_expiration'", ':', 'items', '[', '6', ']', ',', "'keyid'", ':', 'items', '[', '4', ']', ',', "'validity'", ':', 'items', '[', '1', ']', '}', ')', 'if', 'items', '[', '0', ']', '==', "'pub'", ':', 'repo_keys', '.', 'append', '(', 'key_props', ')', 'else', ':', 'repo_keys', '[', '-', '1', ']', '[', "'subkey'", ']', '=', 'key_props', 'elif', 'items', '[', '0', ']', '==', "'fpr'", ':', 'if', 'repo_keys', '[', '-', '1', ']', '.', 'get', '(', "'subkey'", ',', 'False', ')', ':', 'repo_keys', '[', '-', '1', ']', '[', "'subkey'", ']', '.', 'update', '(', '{', "'fingerprint'", ':', 'items', '[', '9', ']', '}', ')', 'else', ':', 'repo_keys', '[', '-', '1', ']', '.', 'update', '(', '{', "'fingerprint'", ':', 'items', '[', '9', ']', '}', ')', 'elif', 'items', '[', '0', ']', '==', "'uid'", ':', 'repo_keys', '[', '-', '1', ']', '.', 'update', '(', '{', "'uid'", ':', 'items', '[', '9', ']', ',', "'uid_hash'", ':', 'items', '[', '7', ']', '}', ')', 'for', 'repo_key', 'in', 'repo_keys', ':', 'ret', '[', 'repo_key', '[', "'keyid'", ']', ']', '=', 'repo_key', 'return', 'ret'] | .. versionadded:: 2017.7.0
List known repo key details.
:return: A dictionary containing the repo keys.
:rtype: dict
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo_keys | ['..', 'versionadded', '::', '2017', '.', '7', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptpkg.py#L1835-L1904 |
6,830 | dropseed/configyaml | configyaml/config/base.py | AbstractNode._validate | def _validate(self): # type: () -> None
"""Run validation, save errors to object in self._errors"""
# class can specify it's empty obj -- list would have empty of []
self._errors = []
self._validate_type()
if self.is_valid():
self._validate_value() | python | def _validate(self): # type: () -> None
"""Run validation, save errors to object in self._errors"""
# class can specify it's empty obj -- list would have empty of []
self._errors = []
self._validate_type()
if self.is_valid():
self._validate_value() | ['def', '_validate', '(', 'self', ')', ':', '# type: () -> None', "# class can specify it's empty obj -- list would have empty of []", 'self', '.', '_errors', '=', '[', ']', 'self', '.', '_validate_type', '(', ')', 'if', 'self', '.', 'is_valid', '(', ')', ':', 'self', '.', '_validate_value', '(', ')'] | Run validation, save errors to object in self._errors | ['Run', 'validation', 'save', 'errors', 'to', 'object', 'in', 'self', '.', '_errors'] | train | https://github.com/dropseed/configyaml/blob/d008f251530d054c2d1fb3e8ac1a9030436134c8/configyaml/config/base.py#L163-L171 |
6,831 | gem/oq-engine | openquake/hazardlib/geo/surface/base.py | BaseSurface.get_top_edge_depth | def get_top_edge_depth(self):
"""
Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km.
"""
top_edge = self.mesh[0:1]
if top_edge.depths is None:
return 0
else:
return numpy.min(top_edge.depths) | python | def get_top_edge_depth(self):
"""
Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km.
"""
top_edge = self.mesh[0:1]
if top_edge.depths is None:
return 0
else:
return numpy.min(top_edge.depths) | ['def', 'get_top_edge_depth', '(', 'self', ')', ':', 'top_edge', '=', 'self', '.', 'mesh', '[', '0', ':', '1', ']', 'if', 'top_edge', '.', 'depths', 'is', 'None', ':', 'return', '0', 'else', ':', 'return', 'numpy', '.', 'min', '(', 'top_edge', '.', 'depths', ')'] | Return minimum depth of surface's top edge.
:returns:
Float value, the vertical distance between the earth surface
and the shallowest point in surface's top edge in km. | ['Return', 'minimum', 'depth', 'of', 'surface', 's', 'top', 'edge', '.'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/base.py#L268-L280 |
6,832 | unt-libraries/edtf-validate | edtf_validate/valid_edtf.py | replace_u | def replace_u(matchobj):
"""Break the interval into parts, and replace 'u's.
pieces - [pos/neg, start_year, start_month, start_day,
pos/neg, end_year, end_month, end_day]
"""
pieces = list(matchobj.groups(''))
# Replace "u"s in start and end years.
if 'u' in pieces[1]:
pieces[1] = pieces[1].replace('u', '0')
if 'u' in pieces[5]:
pieces[5] = pieces[5].replace('u', '9')
# Replace "u"s in start month.
if 'u' in pieces[2]:
pieces[2] = '-' + replace_u_start_month(pieces[2])
# Replace "u"s in end month.
if 'u' in pieces[6]:
pieces[6] = '-' + replace_u_end_month(pieces[6])
# Replace "u"s in start day.
if 'u' in pieces[3]:
pieces[3] = '-' + replace_u_start_day(pieces[3])
# Replace "u"s in end day.
if 'u' in pieces[7]:
pieces[7] = '-' + replace_u_end_day(pieces[7], year=pieces[5],
month=pieces[6])
return ''.join((''.join(pieces[:4]), '/', ''.join(pieces[4:]))) | python | def replace_u(matchobj):
"""Break the interval into parts, and replace 'u's.
pieces - [pos/neg, start_year, start_month, start_day,
pos/neg, end_year, end_month, end_day]
"""
pieces = list(matchobj.groups(''))
# Replace "u"s in start and end years.
if 'u' in pieces[1]:
pieces[1] = pieces[1].replace('u', '0')
if 'u' in pieces[5]:
pieces[5] = pieces[5].replace('u', '9')
# Replace "u"s in start month.
if 'u' in pieces[2]:
pieces[2] = '-' + replace_u_start_month(pieces[2])
# Replace "u"s in end month.
if 'u' in pieces[6]:
pieces[6] = '-' + replace_u_end_month(pieces[6])
# Replace "u"s in start day.
if 'u' in pieces[3]:
pieces[3] = '-' + replace_u_start_day(pieces[3])
# Replace "u"s in end day.
if 'u' in pieces[7]:
pieces[7] = '-' + replace_u_end_day(pieces[7], year=pieces[5],
month=pieces[6])
return ''.join((''.join(pieces[:4]), '/', ''.join(pieces[4:]))) | ['def', 'replace_u', '(', 'matchobj', ')', ':', 'pieces', '=', 'list', '(', 'matchobj', '.', 'groups', '(', "''", ')', ')', '# Replace "u"s in start and end years.', 'if', "'u'", 'in', 'pieces', '[', '1', ']', ':', 'pieces', '[', '1', ']', '=', 'pieces', '[', '1', ']', '.', 'replace', '(', "'u'", ',', "'0'", ')', 'if', "'u'", 'in', 'pieces', '[', '5', ']', ':', 'pieces', '[', '5', ']', '=', 'pieces', '[', '5', ']', '.', 'replace', '(', "'u'", ',', "'9'", ')', '# Replace "u"s in start month.', 'if', "'u'", 'in', 'pieces', '[', '2', ']', ':', 'pieces', '[', '2', ']', '=', "'-'", '+', 'replace_u_start_month', '(', 'pieces', '[', '2', ']', ')', '# Replace "u"s in end month.', 'if', "'u'", 'in', 'pieces', '[', '6', ']', ':', 'pieces', '[', '6', ']', '=', "'-'", '+', 'replace_u_end_month', '(', 'pieces', '[', '6', ']', ')', '# Replace "u"s in start day.', 'if', "'u'", 'in', 'pieces', '[', '3', ']', ':', 'pieces', '[', '3', ']', '=', "'-'", '+', 'replace_u_start_day', '(', 'pieces', '[', '3', ']', ')', '# Replace "u"s in end day.', 'if', "'u'", 'in', 'pieces', '[', '7', ']', ':', 'pieces', '[', '7', ']', '=', "'-'", '+', 'replace_u_end_day', '(', 'pieces', '[', '7', ']', ',', 'year', '=', 'pieces', '[', '5', ']', ',', 'month', '=', 'pieces', '[', '6', ']', ')', 'return', "''", '.', 'join', '(', '(', "''", '.', 'join', '(', 'pieces', '[', ':', '4', ']', ')', ',', "'/'", ',', "''", '.', 'join', '(', 'pieces', '[', '4', ':', ']', ')', ')', ')'] | Break the interval into parts, and replace 'u's.
pieces - [pos/neg, start_year, start_month, start_day,
pos/neg, end_year, end_month, end_day] | ['Break', 'the', 'interval', 'into', 'parts', 'and', 'replace', 'u', 's', '.'] | train | https://github.com/unt-libraries/edtf-validate/blob/d6d63141919a66aea4ff1c31fa0cb8ff744ef9d9/edtf_validate/valid_edtf.py#L326-L351 |
6,833 | KelSolaar/Foundations | foundations/rotating_backup.py | RotatingBackup.destination | def destination(self, value):
"""
Setter for **self.__destination** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"destination", value)
self.__destination = value | python | def destination(self, value):
"""
Setter for **self.__destination** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"destination", value)
self.__destination = value | ['def', 'destination', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'not', 'None', ':', 'assert', 'type', '(', 'value', ')', 'is', 'unicode', ',', '"\'{0}\' attribute: \'{1}\' type is not \'unicode\'!"', '.', 'format', '(', '"destination"', ',', 'value', ')', 'self', '.', '__destination', '=', 'value'] | Setter for **self.__destination** attribute.
:param value: Attribute value.
:type value: unicode | ['Setter', 'for', '**', 'self', '.', '__destination', '**', 'attribute', '.'] | train | https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/rotating_backup.py#L136-L147 |
6,834 | linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.updateEmailConfig | def updateEmailConfig(self, Corpnum, EmailType, SendYN, UserID=None):
""" 알림메일 전송설정 수정
args
CorpNum : 팝빌회원 사업자번호
EmailType: 메일전송유형
SendYN: 전송여부 (True-전송, False-미전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if EmailType == None or EmailType == '':
raise PopbillException(-99999999, "메일전송 타입이 입력되지 않았습니다.")
if SendYN == None or SendYN == '':
raise PopbillException(-99999999, "메일전송 여부 항목이 입력되지 않았습니다.")
uri = "/Cashbill/EmailSendConfig?EmailType=" + EmailType + "&SendYN=" + str(SendYN)
return self._httppost(uri, "", Corpnum, UserID) | python | def updateEmailConfig(self, Corpnum, EmailType, SendYN, UserID=None):
""" 알림메일 전송설정 수정
args
CorpNum : 팝빌회원 사업자번호
EmailType: 메일전송유형
SendYN: 전송여부 (True-전송, False-미전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if EmailType == None or EmailType == '':
raise PopbillException(-99999999, "메일전송 타입이 입력되지 않았습니다.")
if SendYN == None or SendYN == '':
raise PopbillException(-99999999, "메일전송 여부 항목이 입력되지 않았습니다.")
uri = "/Cashbill/EmailSendConfig?EmailType=" + EmailType + "&SendYN=" + str(SendYN)
return self._httppost(uri, "", Corpnum, UserID) | ['def', 'updateEmailConfig', '(', 'self', ',', 'Corpnum', ',', 'EmailType', ',', 'SendYN', ',', 'UserID', '=', 'None', ')', ':', 'if', 'EmailType', '==', 'None', 'or', 'EmailType', '==', "''", ':', 'raise', 'PopbillException', '(', '-', '99999999', ',', '"메일전송 타입이 입력되지 않았습니다.")\r', '', 'if', 'SendYN', '==', 'None', 'or', 'SendYN', '==', "''", ':', 'raise', 'PopbillException', '(', '-', '99999999', ',', '"메일전송 여부 항목이 입력되지 않았습니다.")\r', '', 'uri', '=', '"/Cashbill/EmailSendConfig?EmailType="', '+', 'EmailType', '+', '"&SendYN="', '+', 'str', '(', 'SendYN', ')', 'return', 'self', '.', '_httppost', '(', 'uri', ',', '""', ',', 'Corpnum', ',', 'UserID', ')'] | 알림메일 전송설정 수정
args
CorpNum : 팝빌회원 사업자번호
EmailType: 메일전송유형
SendYN: 전송여부 (True-전송, False-미전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | ['알림메일', '전송설정', '수정', 'args', 'CorpNum', ':', '팝빌회원', '사업자번호', 'EmailType', ':', '메일전송유형', 'SendYN', ':', '전송여부', '(', 'True', '-', '전송', 'False', '-', '미전송', ')', 'UserID', ':', '팝빌회원', '아이디', 'return', '처리결과', '.', 'consist', 'of', 'code', 'and', 'message', 'raise', 'PopbillException'] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L596-L615 |
6,835 | urbn/Caesium | caesium/handler.py | BaseHandler.write_hyper_response | def write_hyper_response(self, links=[], meta={}, entity_name=None, entity=None, notifications=[], actions=[]):
"""Writes a hyper media response object
:param list links: A list of links to the resources
:param dict meta: The meta data for this response
:param str entity_name: The entity name
:param object entity: The Entity itself
:param list notifications: List of notifications
:param list actions: List of actions
"""
assert entity_name is not None
assert entity is not None
meta.update({
"status": self.get_status()
})
self.write({
"links": links,
"meta": meta,
entity_name: entity,
"notifications": notifications,
"actions": actions
}) | python | def write_hyper_response(self, links=[], meta={}, entity_name=None, entity=None, notifications=[], actions=[]):
"""Writes a hyper media response object
:param list links: A list of links to the resources
:param dict meta: The meta data for this response
:param str entity_name: The entity name
:param object entity: The Entity itself
:param list notifications: List of notifications
:param list actions: List of actions
"""
assert entity_name is not None
assert entity is not None
meta.update({
"status": self.get_status()
})
self.write({
"links": links,
"meta": meta,
entity_name: entity,
"notifications": notifications,
"actions": actions
}) | ['def', 'write_hyper_response', '(', 'self', ',', 'links', '=', '[', ']', ',', 'meta', '=', '{', '}', ',', 'entity_name', '=', 'None', ',', 'entity', '=', 'None', ',', 'notifications', '=', '[', ']', ',', 'actions', '=', '[', ']', ')', ':', 'assert', 'entity_name', 'is', 'not', 'None', 'assert', 'entity', 'is', 'not', 'None', 'meta', '.', 'update', '(', '{', '"status"', ':', 'self', '.', 'get_status', '(', ')', '}', ')', 'self', '.', 'write', '(', '{', '"links"', ':', 'links', ',', '"meta"', ':', 'meta', ',', 'entity_name', ':', 'entity', ',', '"notifications"', ':', 'notifications', ',', '"actions"', ':', 'actions', '}', ')'] | Writes a hyper media response object
:param list links: A list of links to the resources
:param dict meta: The meta data for this response
:param str entity_name: The entity name
:param object entity: The Entity itself
:param list notifications: List of notifications
:param list actions: List of actions | ['Writes', 'a', 'hyper', 'media', 'response', 'object'] | train | https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L252-L275 |
6,836 | spacetelescope/drizzlepac | drizzlepac/tweakback.py | determine_orig_wcsname | def determine_orig_wcsname(header, wnames, wkeys):
"""
Determine the name of the original, unmodified WCS solution
"""
orig_wcsname = None
orig_key = None
if orig_wcsname is None:
for k,w in wnames.items():
if w[:4] == 'IDC_':
orig_wcsname = w
orig_key = k
break
if orig_wcsname is None:
# No IDC_ wcsname found... revert to second to last if available
if len(wnames) > 1:
orig_key = wkeys[-2]
orig_wcsname = wnames[orig_key]
return orig_wcsname,orig_key | python | def determine_orig_wcsname(header, wnames, wkeys):
"""
Determine the name of the original, unmodified WCS solution
"""
orig_wcsname = None
orig_key = None
if orig_wcsname is None:
for k,w in wnames.items():
if w[:4] == 'IDC_':
orig_wcsname = w
orig_key = k
break
if orig_wcsname is None:
# No IDC_ wcsname found... revert to second to last if available
if len(wnames) > 1:
orig_key = wkeys[-2]
orig_wcsname = wnames[orig_key]
return orig_wcsname,orig_key | ['def', 'determine_orig_wcsname', '(', 'header', ',', 'wnames', ',', 'wkeys', ')', ':', 'orig_wcsname', '=', 'None', 'orig_key', '=', 'None', 'if', 'orig_wcsname', 'is', 'None', ':', 'for', 'k', ',', 'w', 'in', 'wnames', '.', 'items', '(', ')', ':', 'if', 'w', '[', ':', '4', ']', '==', "'IDC_'", ':', 'orig_wcsname', '=', 'w', 'orig_key', '=', 'k', 'break', 'if', 'orig_wcsname', 'is', 'None', ':', '# No IDC_ wcsname found... revert to second to last if available', 'if', 'len', '(', 'wnames', ')', '>', '1', ':', 'orig_key', '=', 'wkeys', '[', '-', '2', ']', 'orig_wcsname', '=', 'wnames', '[', 'orig_key', ']', 'return', 'orig_wcsname', ',', 'orig_key'] | Determine the name of the original, unmodified WCS solution | ['Determine', 'the', 'name', 'of', 'the', 'original', 'unmodified', 'WCS', 'solution'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/tweakback.py#L379-L396 |
6,837 | pacificclimate/cfmeta | cfmeta/cmip5file.py | get_cmor_fp_meta | def get_cmor_fp_meta(fp):
"""Processes a CMOR style file path.
Section 3.1 of the `Data Reference Syntax`_ details:
The standard CMIP5 output tool CMOR optionally writes output files
to a directory structure mapping DRS components to directory names as:
<activity>/<product>/<institute>/<model>/<experiment>/<frequency>/
<modeling_realm>/<variable_name>/<ensemble_member>/<CMOR filename>.nc
Arguments:
fp (str): A file path conforming to DRS spec.
Returns:
dict: Metadata as extracted from the file path.
.. _Data Reference Syntax:
http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf
"""
# Copy metadata list then reverse to start at end of path
directory_meta = list(CMIP5_FP_ATTS)
# Prefer meta extracted from filename
meta = get_dir_meta(fp, directory_meta)
meta.update(get_cmor_fname_meta(fp))
return meta | python | def get_cmor_fp_meta(fp):
"""Processes a CMOR style file path.
Section 3.1 of the `Data Reference Syntax`_ details:
The standard CMIP5 output tool CMOR optionally writes output files
to a directory structure mapping DRS components to directory names as:
<activity>/<product>/<institute>/<model>/<experiment>/<frequency>/
<modeling_realm>/<variable_name>/<ensemble_member>/<CMOR filename>.nc
Arguments:
fp (str): A file path conforming to DRS spec.
Returns:
dict: Metadata as extracted from the file path.
.. _Data Reference Syntax:
http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf
"""
# Copy metadata list then reverse to start at end of path
directory_meta = list(CMIP5_FP_ATTS)
# Prefer meta extracted from filename
meta = get_dir_meta(fp, directory_meta)
meta.update(get_cmor_fname_meta(fp))
return meta | ['def', 'get_cmor_fp_meta', '(', 'fp', ')', ':', '# Copy metadata list then reverse to start at end of path', 'directory_meta', '=', 'list', '(', 'CMIP5_FP_ATTS', ')', '# Prefer meta extracted from filename', 'meta', '=', 'get_dir_meta', '(', 'fp', ',', 'directory_meta', ')', 'meta', '.', 'update', '(', 'get_cmor_fname_meta', '(', 'fp', ')', ')', 'return', 'meta'] | Processes a CMOR style file path.
Section 3.1 of the `Data Reference Syntax`_ details:
The standard CMIP5 output tool CMOR optionally writes output files
to a directory structure mapping DRS components to directory names as:
<activity>/<product>/<institute>/<model>/<experiment>/<frequency>/
<modeling_realm>/<variable_name>/<ensemble_member>/<CMOR filename>.nc
Arguments:
fp (str): A file path conforming to DRS spec.
Returns:
dict: Metadata as extracted from the file path.
.. _Data Reference Syntax:
http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf | ['Processes', 'a', 'CMOR', 'style', 'file', 'path', '.'] | train | https://github.com/pacificclimate/cfmeta/blob/a6eef78d0bce523bb44920ba96233f034b60316a/cfmeta/cmip5file.py#L124-L152 |
6,838 | jopohl/urh | src/urh/controller/CompareFrameController.py | CompareFrameController.protocols | def protocols(self):
"""
:rtype: dict[int, list of ProtocolAnalyzer]
"""
if self.__protocols is None:
self.__protocols = self.proto_tree_model.protocols
return self.__protocols | python | def protocols(self):
"""
:rtype: dict[int, list of ProtocolAnalyzer]
"""
if self.__protocols is None:
self.__protocols = self.proto_tree_model.protocols
return self.__protocols | ['def', 'protocols', '(', 'self', ')', ':', 'if', 'self', '.', '__protocols', 'is', 'None', ':', 'self', '.', '__protocols', '=', 'self', '.', 'proto_tree_model', '.', 'protocols', 'return', 'self', '.', '__protocols'] | :rtype: dict[int, list of ProtocolAnalyzer] | [':', 'rtype', ':', 'dict', '[', 'int', 'list', 'of', 'ProtocolAnalyzer', ']'] | train | https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/controller/CompareFrameController.py#L201-L207 |
6,839 | softlayer/softlayer-python | SoftLayer/CLI/virt/create_options.py | cli | def cli(env):
"""Virtual server order options."""
vsi = SoftLayer.VSManager(env.client)
result = vsi.get_create_options()
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
# Datacenters
datacenters = [dc['template']['datacenter']['name']
for dc in result['datacenters']]
datacenters = sorted(datacenters)
table.add_row(['datacenter',
formatting.listing(datacenters, separator='\n')])
def _add_flavor_rows(flavor_key, flavor_label, flavor_options):
flavors = []
for flavor_option in flavor_options:
flavor_key_name = utils.lookup(flavor_option, 'flavor', 'keyName')
if not flavor_key_name.startswith(flavor_key):
continue
flavors.append(flavor_key_name)
if len(flavors) > 0:
table.add_row(['flavors (%s)' % flavor_label,
formatting.listing(flavors, separator='\n')])
if result.get('flavors', None):
_add_flavor_rows('B1', 'balanced', result['flavors'])
_add_flavor_rows('BL1', 'balanced local - hdd', result['flavors'])
_add_flavor_rows('BL2', 'balanced local - ssd', result['flavors'])
_add_flavor_rows('C1', 'compute', result['flavors'])
_add_flavor_rows('M1', 'memory', result['flavors'])
_add_flavor_rows('AC', 'GPU', result['flavors'])
# CPUs
standard_cpus = [int(x['template']['startCpus']) for x in result['processors']
if not x['template'].get('dedicatedAccountHostOnlyFlag',
False)
and not x['template'].get('dedicatedHost', None)]
ded_cpus = [int(x['template']['startCpus']) for x in result['processors']
if x['template'].get('dedicatedAccountHostOnlyFlag', False)]
ded_host_cpus = [int(x['template']['startCpus']) for x in result['processors']
if x['template'].get('dedicatedHost', None)]
standard_cpus = sorted(standard_cpus)
table.add_row(['cpus (standard)', formatting.listing(standard_cpus, separator=',')])
ded_cpus = sorted(ded_cpus)
table.add_row(['cpus (dedicated)', formatting.listing(ded_cpus, separator=',')])
ded_host_cpus = sorted(ded_host_cpus)
table.add_row(['cpus (dedicated host)', formatting.listing(ded_host_cpus, separator=',')])
# Memory
memory = [int(m['template']['maxMemory']) for m in result['memory']
if not m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
ded_host_memory = [int(m['template']['maxMemory']) for m in result['memory']
if m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
memory = sorted(memory)
table.add_row(['memory',
formatting.listing(memory, separator=',')])
ded_host_memory = sorted(ded_host_memory)
table.add_row(['memory (dedicated host)',
formatting.listing(ded_host_memory, separator=',')])
# Operating Systems
op_sys = [o['template']['operatingSystemReferenceCode'] for o in
result['operatingSystems']]
op_sys = sorted(op_sys)
os_summary = set()
for operating_system in op_sys:
os_summary.add(operating_system[0:operating_system.find('_')])
for summary in sorted(os_summary):
table.add_row([
'os (%s)' % summary,
os.linesep.join(sorted([x for x in op_sys
if x[0:len(summary)] == summary]))
])
# Disk
local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)
and not x['itemPrice'].get('dedicatedHostInstanceFlag',
False)]
ded_host_local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)
and x['itemPrice'].get('dedicatedHostInstanceFlag',
False)]
san_disks = [x for x in result['blockDevices']
if not x['template'].get('localDiskFlag', False)]
def add_block_rows(disks, name):
"""Add block rows to the table."""
simple = {}
for disk in disks:
block = disk['template']['blockDevices'][0]
bid = block['device']
if bid not in simple:
simple[bid] = []
simple[bid].append(str(block['diskImage']['capacity']))
for label in sorted(simple):
table.add_row(['%s disk(%s)' % (name, label),
formatting.listing(simple[label],
separator=',')])
add_block_rows(san_disks, 'san')
add_block_rows(local_disks, 'local')
add_block_rows(ded_host_local_disks, 'local (dedicated host)')
# Network
speeds = []
ded_host_speeds = []
for option in result['networkComponents']:
template = option.get('template', None)
price = option.get('itemPrice', None)
if not template or not price \
or not template.get('networkComponents', None):
continue
if not template['networkComponents'][0] \
or not template['networkComponents'][0].get('maxSpeed', None):
continue
max_speed = str(template['networkComponents'][0]['maxSpeed'])
if price.get('dedicatedHostInstanceFlag', False) \
and max_speed not in ded_host_speeds:
ded_host_speeds.append(max_speed)
elif max_speed not in speeds:
speeds.append(max_speed)
speeds = sorted(speeds)
table.add_row(['nic', formatting.listing(speeds, separator=',')])
ded_host_speeds = sorted(ded_host_speeds)
table.add_row(['nic (dedicated host)',
formatting.listing(ded_host_speeds, separator=',')])
env.fout(table) | python | def cli(env):
"""Virtual server order options."""
vsi = SoftLayer.VSManager(env.client)
result = vsi.get_create_options()
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
# Datacenters
datacenters = [dc['template']['datacenter']['name']
for dc in result['datacenters']]
datacenters = sorted(datacenters)
table.add_row(['datacenter',
formatting.listing(datacenters, separator='\n')])
def _add_flavor_rows(flavor_key, flavor_label, flavor_options):
flavors = []
for flavor_option in flavor_options:
flavor_key_name = utils.lookup(flavor_option, 'flavor', 'keyName')
if not flavor_key_name.startswith(flavor_key):
continue
flavors.append(flavor_key_name)
if len(flavors) > 0:
table.add_row(['flavors (%s)' % flavor_label,
formatting.listing(flavors, separator='\n')])
if result.get('flavors', None):
_add_flavor_rows('B1', 'balanced', result['flavors'])
_add_flavor_rows('BL1', 'balanced local - hdd', result['flavors'])
_add_flavor_rows('BL2', 'balanced local - ssd', result['flavors'])
_add_flavor_rows('C1', 'compute', result['flavors'])
_add_flavor_rows('M1', 'memory', result['flavors'])
_add_flavor_rows('AC', 'GPU', result['flavors'])
# CPUs
standard_cpus = [int(x['template']['startCpus']) for x in result['processors']
if not x['template'].get('dedicatedAccountHostOnlyFlag',
False)
and not x['template'].get('dedicatedHost', None)]
ded_cpus = [int(x['template']['startCpus']) for x in result['processors']
if x['template'].get('dedicatedAccountHostOnlyFlag', False)]
ded_host_cpus = [int(x['template']['startCpus']) for x in result['processors']
if x['template'].get('dedicatedHost', None)]
standard_cpus = sorted(standard_cpus)
table.add_row(['cpus (standard)', formatting.listing(standard_cpus, separator=',')])
ded_cpus = sorted(ded_cpus)
table.add_row(['cpus (dedicated)', formatting.listing(ded_cpus, separator=',')])
ded_host_cpus = sorted(ded_host_cpus)
table.add_row(['cpus (dedicated host)', formatting.listing(ded_host_cpus, separator=',')])
# Memory
memory = [int(m['template']['maxMemory']) for m in result['memory']
if not m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
ded_host_memory = [int(m['template']['maxMemory']) for m in result['memory']
if m['itemPrice'].get('dedicatedHostInstanceFlag', False)]
memory = sorted(memory)
table.add_row(['memory',
formatting.listing(memory, separator=',')])
ded_host_memory = sorted(ded_host_memory)
table.add_row(['memory (dedicated host)',
formatting.listing(ded_host_memory, separator=',')])
# Operating Systems
op_sys = [o['template']['operatingSystemReferenceCode'] for o in
result['operatingSystems']]
op_sys = sorted(op_sys)
os_summary = set()
for operating_system in op_sys:
os_summary.add(operating_system[0:operating_system.find('_')])
for summary in sorted(os_summary):
table.add_row([
'os (%s)' % summary,
os.linesep.join(sorted([x for x in op_sys
if x[0:len(summary)] == summary]))
])
# Disk
local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)
and not x['itemPrice'].get('dedicatedHostInstanceFlag',
False)]
ded_host_local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)
and x['itemPrice'].get('dedicatedHostInstanceFlag',
False)]
san_disks = [x for x in result['blockDevices']
if not x['template'].get('localDiskFlag', False)]
def add_block_rows(disks, name):
"""Add block rows to the table."""
simple = {}
for disk in disks:
block = disk['template']['blockDevices'][0]
bid = block['device']
if bid not in simple:
simple[bid] = []
simple[bid].append(str(block['diskImage']['capacity']))
for label in sorted(simple):
table.add_row(['%s disk(%s)' % (name, label),
formatting.listing(simple[label],
separator=',')])
add_block_rows(san_disks, 'san')
add_block_rows(local_disks, 'local')
add_block_rows(ded_host_local_disks, 'local (dedicated host)')
# Network
speeds = []
ded_host_speeds = []
for option in result['networkComponents']:
template = option.get('template', None)
price = option.get('itemPrice', None)
if not template or not price \
or not template.get('networkComponents', None):
continue
if not template['networkComponents'][0] \
or not template['networkComponents'][0].get('maxSpeed', None):
continue
max_speed = str(template['networkComponents'][0]['maxSpeed'])
if price.get('dedicatedHostInstanceFlag', False) \
and max_speed not in ded_host_speeds:
ded_host_speeds.append(max_speed)
elif max_speed not in speeds:
speeds.append(max_speed)
speeds = sorted(speeds)
table.add_row(['nic', formatting.listing(speeds, separator=',')])
ded_host_speeds = sorted(ded_host_speeds)
table.add_row(['nic (dedicated host)',
formatting.listing(ded_host_speeds, separator=',')])
env.fout(table) | ['def', 'cli', '(', 'env', ')', ':', 'vsi', '=', 'SoftLayer', '.', 'VSManager', '(', 'env', '.', 'client', ')', 'result', '=', 'vsi', '.', 'get_create_options', '(', ')', 'table', '=', 'formatting', '.', 'KeyValueTable', '(', '[', "'name'", ',', "'value'", ']', ')', 'table', '.', 'align', '[', "'name'", ']', '=', "'r'", 'table', '.', 'align', '[', "'value'", ']', '=', "'l'", '# Datacenters', 'datacenters', '=', '[', 'dc', '[', "'template'", ']', '[', "'datacenter'", ']', '[', "'name'", ']', 'for', 'dc', 'in', 'result', '[', "'datacenters'", ']', ']', 'datacenters', '=', 'sorted', '(', 'datacenters', ')', 'table', '.', 'add_row', '(', '[', "'datacenter'", ',', 'formatting', '.', 'listing', '(', 'datacenters', ',', 'separator', '=', "'\\n'", ')', ']', ')', 'def', '_add_flavor_rows', '(', 'flavor_key', ',', 'flavor_label', ',', 'flavor_options', ')', ':', 'flavors', '=', '[', ']', 'for', 'flavor_option', 'in', 'flavor_options', ':', 'flavor_key_name', '=', 'utils', '.', 'lookup', '(', 'flavor_option', ',', "'flavor'", ',', "'keyName'", ')', 'if', 'not', 'flavor_key_name', '.', 'startswith', '(', 'flavor_key', ')', ':', 'continue', 'flavors', '.', 'append', '(', 'flavor_key_name', ')', 'if', 'len', '(', 'flavors', ')', '>', '0', ':', 'table', '.', 'add_row', '(', '[', "'flavors (%s)'", '%', 'flavor_label', ',', 'formatting', '.', 'listing', '(', 'flavors', ',', 'separator', '=', "'\\n'", ')', ']', ')', 'if', 'result', '.', 'get', '(', "'flavors'", ',', 'None', ')', ':', '_add_flavor_rows', '(', "'B1'", ',', "'balanced'", ',', 'result', '[', "'flavors'", ']', ')', '_add_flavor_rows', '(', "'BL1'", ',', "'balanced local - hdd'", ',', 'result', '[', "'flavors'", ']', ')', '_add_flavor_rows', '(', "'BL2'", ',', "'balanced local - ssd'", ',', 'result', '[', "'flavors'", ']', ')', '_add_flavor_rows', '(', "'C1'", ',', "'compute'", ',', 'result', '[', "'flavors'", ']', ')', '_add_flavor_rows', '(', "'M1'", ',', "'memory'", ',', 'result', '[', "'flavors'", ']', ')', '_add_flavor_rows', '(', "'AC'", ',', "'GPU'", ',', 'result', '[', "'flavors'", ']', ')', '# CPUs', 'standard_cpus', '=', '[', 'int', '(', 'x', '[', "'template'", ']', '[', "'startCpus'", ']', ')', 'for', 'x', 'in', 'result', '[', "'processors'", ']', 'if', 'not', 'x', '[', "'template'", ']', '.', 'get', '(', "'dedicatedAccountHostOnlyFlag'", ',', 'False', ')', 'and', 'not', 'x', '[', "'template'", ']', '.', 'get', '(', "'dedicatedHost'", ',', 'None', ')', ']', 'ded_cpus', '=', '[', 'int', '(', 'x', '[', "'template'", ']', '[', "'startCpus'", ']', ')', 'for', 'x', 'in', 'result', '[', "'processors'", ']', 'if', 'x', '[', "'template'", ']', '.', 'get', '(', "'dedicatedAccountHostOnlyFlag'", ',', 'False', ')', ']', 'ded_host_cpus', '=', '[', 'int', '(', 'x', '[', "'template'", ']', '[', "'startCpus'", ']', ')', 'for', 'x', 'in', 'result', '[', "'processors'", ']', 'if', 'x', '[', "'template'", ']', '.', 'get', '(', "'dedicatedHost'", ',', 'None', ')', ']', 'standard_cpus', '=', 'sorted', '(', 'standard_cpus', ')', 'table', '.', 'add_row', '(', '[', "'cpus (standard)'", ',', 'formatting', '.', 'listing', '(', 'standard_cpus', ',', 'separator', '=', "','", ')', ']', ')', 'ded_cpus', '=', 'sorted', '(', 'ded_cpus', ')', 'table', '.', 'add_row', '(', '[', "'cpus (dedicated)'", ',', 'formatting', '.', 'listing', '(', 'ded_cpus', ',', 'separator', '=', "','", ')', ']', ')', 'ded_host_cpus', '=', 'sorted', '(', 'ded_host_cpus', ')', 'table', '.', 'add_row', '(', '[', "'cpus (dedicated host)'", ',', 'formatting', '.', 'listing', '(', 'ded_host_cpus', ',', 'separator', '=', "','", ')', ']', ')', '# Memory', 'memory', '=', '[', 'int', '(', 'm', '[', "'template'", ']', '[', "'maxMemory'", ']', ')', 'for', 'm', 'in', 'result', '[', "'memory'", ']', 'if', 'not', 'm', '[', "'itemPrice'", ']', '.', 'get', '(', "'dedicatedHostInstanceFlag'", ',', 'False', ')', ']', 'ded_host_memory', '=', '[', 'int', '(', 'm', '[', "'template'", ']', '[', "'maxMemory'", ']', ')', 'for', 'm', 'in', 'result', '[', "'memory'", ']', 'if', 'm', '[', "'itemPrice'", ']', '.', 'get', '(', "'dedicatedHostInstanceFlag'", ',', 'False', ')', ']', 'memory', '=', 'sorted', '(', 'memory', ')', 'table', '.', 'add_row', '(', '[', "'memory'", ',', 'formatting', '.', 'listing', '(', 'memory', ',', 'separator', '=', "','", ')', ']', ')', 'ded_host_memory', '=', 'sorted', '(', 'ded_host_memory', ')', 'table', '.', 'add_row', '(', '[', "'memory (dedicated host)'", ',', 'formatting', '.', 'listing', '(', 'ded_host_memory', ',', 'separator', '=', "','", ')', ']', ')', '# Operating Systems', 'op_sys', '=', '[', 'o', '[', "'template'", ']', '[', "'operatingSystemReferenceCode'", ']', 'for', 'o', 'in', 'result', '[', "'operatingSystems'", ']', ']', 'op_sys', '=', 'sorted', '(', 'op_sys', ')', 'os_summary', '=', 'set', '(', ')', 'for', 'operating_system', 'in', 'op_sys', ':', 'os_summary', '.', 'add', '(', 'operating_system', '[', '0', ':', 'operating_system', '.', 'find', '(', "'_'", ')', ']', ')', 'for', 'summary', 'in', 'sorted', '(', 'os_summary', ')', ':', 'table', '.', 'add_row', '(', '[', "'os (%s)'", '%', 'summary', ',', 'os', '.', 'linesep', '.', 'join', '(', 'sorted', '(', '[', 'x', 'for', 'x', 'in', 'op_sys', 'if', 'x', '[', '0', ':', 'len', '(', 'summary', ')', ']', '==', 'summary', ']', ')', ')', ']', ')', '# Disk', 'local_disks', '=', '[', 'x', 'for', 'x', 'in', 'result', '[', "'blockDevices'", ']', 'if', 'x', '[', "'template'", ']', '.', 'get', '(', "'localDiskFlag'", ',', 'False', ')', 'and', 'not', 'x', '[', "'itemPrice'", ']', '.', 'get', '(', "'dedicatedHostInstanceFlag'", ',', 'False', ')', ']', 'ded_host_local_disks', '=', '[', 'x', 'for', 'x', 'in', 'result', '[', "'blockDevices'", ']', 'if', 'x', '[', "'template'", ']', '.', 'get', '(', "'localDiskFlag'", ',', 'False', ')', 'and', 'x', '[', "'itemPrice'", ']', '.', 'get', '(', "'dedicatedHostInstanceFlag'", ',', 'False', ')', ']', 'san_disks', '=', '[', 'x', 'for', 'x', 'in', 'result', '[', "'blockDevices'", ']', 'if', 'not', 'x', '[', "'template'", ']', '.', 'get', '(', "'localDiskFlag'", ',', 'False', ')', ']', 'def', 'add_block_rows', '(', 'disks', ',', 'name', ')', ':', '"""Add block rows to the table."""', 'simple', '=', '{', '}', 'for', 'disk', 'in', 'disks', ':', 'block', '=', 'disk', '[', "'template'", ']', '[', "'blockDevices'", ']', '[', '0', ']', 'bid', '=', 'block', '[', "'device'", ']', 'if', 'bid', 'not', 'in', 'simple', ':', 'simple', '[', 'bid', ']', '=', '[', ']', 'simple', '[', 'bid', ']', '.', 'append', '(', 'str', '(', 'block', '[', "'diskImage'", ']', '[', "'capacity'", ']', ')', ')', 'for', 'label', 'in', 'sorted', '(', 'simple', ')', ':', 'table', '.', 'add_row', '(', '[', "'%s disk(%s)'", '%', '(', 'name', ',', 'label', ')', ',', 'formatting', '.', 'listing', '(', 'simple', '[', 'label', ']', ',', 'separator', '=', "','", ')', ']', ')', 'add_block_rows', '(', 'san_disks', ',', "'san'", ')', 'add_block_rows', '(', 'local_disks', ',', "'local'", ')', 'add_block_rows', '(', 'ded_host_local_disks', ',', "'local (dedicated host)'", ')', '# Network', 'speeds', '=', '[', ']', 'ded_host_speeds', '=', '[', ']', 'for', 'option', 'in', 'result', '[', "'networkComponents'", ']', ':', 'template', '=', 'option', '.', 'get', '(', "'template'", ',', 'None', ')', 'price', '=', 'option', '.', 'get', '(', "'itemPrice'", ',', 'None', ')', 'if', 'not', 'template', 'or', 'not', 'price', 'or', 'not', 'template', '.', 'get', '(', "'networkComponents'", ',', 'None', ')', ':', 'continue', 'if', 'not', 'template', '[', "'networkComponents'", ']', '[', '0', ']', 'or', 'not', 'template', '[', "'networkComponents'", ']', '[', '0', ']', '.', 'get', '(', "'maxSpeed'", ',', 'None', ')', ':', 'continue', 'max_speed', '=', 'str', '(', 'template', '[', "'networkComponents'", ']', '[', '0', ']', '[', "'maxSpeed'", ']', ')', 'if', 'price', '.', 'get', '(', "'dedicatedHostInstanceFlag'", ',', 'False', ')', 'and', 'max_speed', 'not', 'in', 'ded_host_speeds', ':', 'ded_host_speeds', '.', 'append', '(', 'max_speed', ')', 'elif', 'max_speed', 'not', 'in', 'speeds', ':', 'speeds', '.', 'append', '(', 'max_speed', ')', 'speeds', '=', 'sorted', '(', 'speeds', ')', 'table', '.', 'add_row', '(', '[', "'nic'", ',', 'formatting', '.', 'listing', '(', 'speeds', ',', 'separator', '=', "','", ')', ']', ')', 'ded_host_speeds', '=', 'sorted', '(', 'ded_host_speeds', ')', 'table', '.', 'add_row', '(', '[', "'nic (dedicated host)'", ',', 'formatting', '.', 'listing', '(', 'ded_host_speeds', ',', 'separator', '=', "','", ')', ']', ')', 'env', '.', 'fout', '(', 'table', ')'] | Virtual server order options. | ['Virtual', 'server', 'order', 'options', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/virt/create_options.py#L17-L169 |
6,840 | shaypal5/strct | strct/sortedlists/sortedlist.py | find_point_in_section_list | def find_point_in_section_list(point, section_list):
"""Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30
"""
if point < section_list[0] or point > section_list[-1]:
return None
if point in section_list:
if point == section_list[-1]:
return section_list[-2]
ind = section_list.bisect(point)-1
if ind == 0:
return section_list[0]
return section_list[ind]
try:
ind = section_list.bisect(point)
return section_list[ind-1]
except IndexError:
return None | python | def find_point_in_section_list(point, section_list):
"""Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30
"""
if point < section_list[0] or point > section_list[-1]:
return None
if point in section_list:
if point == section_list[-1]:
return section_list[-2]
ind = section_list.bisect(point)-1
if ind == 0:
return section_list[0]
return section_list[ind]
try:
ind = section_list.bisect(point)
return section_list[ind-1]
except IndexError:
return None | ['def', 'find_point_in_section_list', '(', 'point', ',', 'section_list', ')', ':', 'if', 'point', '<', 'section_list', '[', '0', ']', 'or', 'point', '>', 'section_list', '[', '-', '1', ']', ':', 'return', 'None', 'if', 'point', 'in', 'section_list', ':', 'if', 'point', '==', 'section_list', '[', '-', '1', ']', ':', 'return', 'section_list', '[', '-', '2', ']', 'ind', '=', 'section_list', '.', 'bisect', '(', 'point', ')', '-', '1', 'if', 'ind', '==', '0', ':', 'return', 'section_list', '[', '0', ']', 'return', 'section_list', '[', 'ind', ']', 'try', ':', 'ind', '=', 'section_list', '.', 'bisect', '(', 'point', ')', 'return', 'section_list', '[', 'ind', '-', '1', ']', 'except', 'IndexError', ':', 'return', 'None'] | Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30 | ['Returns', 'the', 'start', 'of', 'the', 'section', 'the', 'given', 'point', 'belongs', 'to', '.'] | train | https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/sortedlists/sortedlist.py#L4-L53 |
6,841 | dmlc/gluon-nlp | scripts/bert/staticbert/static_finetune_squad.py | evaluate | def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loader dev data...')
if version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
log.info('Number of records in Train data:{}'.format(len(dev_data)))
dev_dataset = dev_data.transform(
SQuADTransform(
berttoken,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_pad=True,
is_training=False)._transform)
dev_data_transform, _ = preprocess_dataset(
dev_data, SQuADTransform(
berttoken,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_pad=True,
is_training=False))
log.info('The number of examples after preprocessing:{}'.format(
len(dev_data_transform)))
dev_dataloader = mx.gluon.data.DataLoader(
dev_data_transform,
batchify_fn=batchify_fn,
num_workers=4, batch_size=test_batch_size, shuffle=False, last_batch='keep')
log.info('Start predict')
_Result = collections.namedtuple(
'_Result', ['example_id', 'start_logits', 'end_logits'])
all_results = {}
epoch_tic = time.time()
total_num = 0
for data in dev_dataloader:
example_ids, inputs, token_types, valid_length, _, _ = data
total_num += len(inputs)
out = net(inputs.astype('float32').as_in_context(ctx),
token_types.astype('float32').as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
output = nd.split(out, axis=2, num_outputs=2)
start_logits = output[0].reshape((0, -3)).asnumpy()
end_logits = output[1].reshape((0, -3)).asnumpy()
for example_id, start, end in zip(example_ids, start_logits, end_logits):
example_id = example_id.asscalar()
if example_id not in all_results:
all_results[example_id] = []
all_results[example_id].append(
_Result(example_id, start.tolist(), end.tolist()))
epoch_toc = time.time()
log.info('Inference time cost={:.2f} s, Thoughput={:.2f} samples/s'
.format(epoch_toc - epoch_tic,
len(dev_dataloader) / (epoch_toc - epoch_tic)))
log.info('Get prediction results...')
all_predictions, all_nbest_json, scores_diff_json = predictions(
dev_dataset=dev_dataset,
all_results=all_results,
tokenizer=nlp.data.BERTBasicTokenizer(lower=lower),
max_answer_length=max_answer_length,
null_score_diff_threshold=null_score_diff_threshold,
n_best_size=n_best_size,
version_2=version_2)
with open(os.path.join(output_dir, 'predictions.json'),
'w', encoding='utf-8') as all_predictions_write:
all_predictions_write.write(json.dumps(all_predictions))
with open(os.path.join(output_dir, 'nbest_predictions.json'),
'w', encoding='utf-8') as all_predictions_write:
all_predictions_write.write(json.dumps(all_nbest_json))
if version_2:
with open(os.path.join(output_dir, 'null_odds.json'),
'w', encoding='utf-8') as all_predictions_write:
all_predictions_write.write(json.dumps(scores_diff_json))
else:
log.info(get_F1_EM(dev_data, all_predictions)) | python | def evaluate():
"""Evaluate the model on validation dataset.
"""
log.info('Loader dev data...')
if version_2:
dev_data = SQuAD('dev', version='2.0')
else:
dev_data = SQuAD('dev', version='1.1')
log.info('Number of records in Train data:{}'.format(len(dev_data)))
dev_dataset = dev_data.transform(
SQuADTransform(
berttoken,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_pad=True,
is_training=False)._transform)
dev_data_transform, _ = preprocess_dataset(
dev_data, SQuADTransform(
berttoken,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_pad=True,
is_training=False))
log.info('The number of examples after preprocessing:{}'.format(
len(dev_data_transform)))
dev_dataloader = mx.gluon.data.DataLoader(
dev_data_transform,
batchify_fn=batchify_fn,
num_workers=4, batch_size=test_batch_size, shuffle=False, last_batch='keep')
log.info('Start predict')
_Result = collections.namedtuple(
'_Result', ['example_id', 'start_logits', 'end_logits'])
all_results = {}
epoch_tic = time.time()
total_num = 0
for data in dev_dataloader:
example_ids, inputs, token_types, valid_length, _, _ = data
total_num += len(inputs)
out = net(inputs.astype('float32').as_in_context(ctx),
token_types.astype('float32').as_in_context(ctx),
valid_length.astype('float32').as_in_context(ctx))
output = nd.split(out, axis=2, num_outputs=2)
start_logits = output[0].reshape((0, -3)).asnumpy()
end_logits = output[1].reshape((0, -3)).asnumpy()
for example_id, start, end in zip(example_ids, start_logits, end_logits):
example_id = example_id.asscalar()
if example_id not in all_results:
all_results[example_id] = []
all_results[example_id].append(
_Result(example_id, start.tolist(), end.tolist()))
epoch_toc = time.time()
log.info('Inference time cost={:.2f} s, Thoughput={:.2f} samples/s'
.format(epoch_toc - epoch_tic,
len(dev_dataloader) / (epoch_toc - epoch_tic)))
log.info('Get prediction results...')
all_predictions, all_nbest_json, scores_diff_json = predictions(
dev_dataset=dev_dataset,
all_results=all_results,
tokenizer=nlp.data.BERTBasicTokenizer(lower=lower),
max_answer_length=max_answer_length,
null_score_diff_threshold=null_score_diff_threshold,
n_best_size=n_best_size,
version_2=version_2)
with open(os.path.join(output_dir, 'predictions.json'),
'w', encoding='utf-8') as all_predictions_write:
all_predictions_write.write(json.dumps(all_predictions))
with open(os.path.join(output_dir, 'nbest_predictions.json'),
'w', encoding='utf-8') as all_predictions_write:
all_predictions_write.write(json.dumps(all_nbest_json))
if version_2:
with open(os.path.join(output_dir, 'null_odds.json'),
'w', encoding='utf-8') as all_predictions_write:
all_predictions_write.write(json.dumps(scores_diff_json))
else:
log.info(get_F1_EM(dev_data, all_predictions)) | ['def', 'evaluate', '(', ')', ':', 'log', '.', 'info', '(', "'Loader dev data...'", ')', 'if', 'version_2', ':', 'dev_data', '=', 'SQuAD', '(', "'dev'", ',', 'version', '=', "'2.0'", ')', 'else', ':', 'dev_data', '=', 'SQuAD', '(', "'dev'", ',', 'version', '=', "'1.1'", ')', 'log', '.', 'info', '(', "'Number of records in Train data:{}'", '.', 'format', '(', 'len', '(', 'dev_data', ')', ')', ')', 'dev_dataset', '=', 'dev_data', '.', 'transform', '(', 'SQuADTransform', '(', 'berttoken', ',', 'max_seq_length', '=', 'max_seq_length', ',', 'doc_stride', '=', 'doc_stride', ',', 'max_query_length', '=', 'max_query_length', ',', 'is_pad', '=', 'True', ',', 'is_training', '=', 'False', ')', '.', '_transform', ')', 'dev_data_transform', ',', '_', '=', 'preprocess_dataset', '(', 'dev_data', ',', 'SQuADTransform', '(', 'berttoken', ',', 'max_seq_length', '=', 'max_seq_length', ',', 'doc_stride', '=', 'doc_stride', ',', 'max_query_length', '=', 'max_query_length', ',', 'is_pad', '=', 'True', ',', 'is_training', '=', 'False', ')', ')', 'log', '.', 'info', '(', "'The number of examples after preprocessing:{}'", '.', 'format', '(', 'len', '(', 'dev_data_transform', ')', ')', ')', 'dev_dataloader', '=', 'mx', '.', 'gluon', '.', 'data', '.', 'DataLoader', '(', 'dev_data_transform', ',', 'batchify_fn', '=', 'batchify_fn', ',', 'num_workers', '=', '4', ',', 'batch_size', '=', 'test_batch_size', ',', 'shuffle', '=', 'False', ',', 'last_batch', '=', "'keep'", ')', 'log', '.', 'info', '(', "'Start predict'", ')', '_Result', '=', 'collections', '.', 'namedtuple', '(', "'_Result'", ',', '[', "'example_id'", ',', "'start_logits'", ',', "'end_logits'", ']', ')', 'all_results', '=', '{', '}', 'epoch_tic', '=', 'time', '.', 'time', '(', ')', 'total_num', '=', '0', 'for', 'data', 'in', 'dev_dataloader', ':', 'example_ids', ',', 'inputs', ',', 'token_types', ',', 'valid_length', ',', '_', ',', '_', '=', 'data', 'total_num', '+=', 'len', '(', 'inputs', ')', 'out', '=', 'net', '(', 'inputs', '.', 'astype', '(', "'float32'", ')', '.', 'as_in_context', '(', 'ctx', ')', ',', 'token_types', '.', 'astype', '(', "'float32'", ')', '.', 'as_in_context', '(', 'ctx', ')', ',', 'valid_length', '.', 'astype', '(', "'float32'", ')', '.', 'as_in_context', '(', 'ctx', ')', ')', 'output', '=', 'nd', '.', 'split', '(', 'out', ',', 'axis', '=', '2', ',', 'num_outputs', '=', '2', ')', 'start_logits', '=', 'output', '[', '0', ']', '.', 'reshape', '(', '(', '0', ',', '-', '3', ')', ')', '.', 'asnumpy', '(', ')', 'end_logits', '=', 'output', '[', '1', ']', '.', 'reshape', '(', '(', '0', ',', '-', '3', ')', ')', '.', 'asnumpy', '(', ')', 'for', 'example_id', ',', 'start', ',', 'end', 'in', 'zip', '(', 'example_ids', ',', 'start_logits', ',', 'end_logits', ')', ':', 'example_id', '=', 'example_id', '.', 'asscalar', '(', ')', 'if', 'example_id', 'not', 'in', 'all_results', ':', 'all_results', '[', 'example_id', ']', '=', '[', ']', 'all_results', '[', 'example_id', ']', '.', 'append', '(', '_Result', '(', 'example_id', ',', 'start', '.', 'tolist', '(', ')', ',', 'end', '.', 'tolist', '(', ')', ')', ')', 'epoch_toc', '=', 'time', '.', 'time', '(', ')', 'log', '.', 'info', '(', "'Inference time cost={:.2f} s, Thoughput={:.2f} samples/s'", '.', 'format', '(', 'epoch_toc', '-', 'epoch_tic', ',', 'len', '(', 'dev_dataloader', ')', '/', '(', 'epoch_toc', '-', 'epoch_tic', ')', ')', ')', 'log', '.', 'info', '(', "'Get prediction results...'", ')', 'all_predictions', ',', 'all_nbest_json', ',', 'scores_diff_json', '=', 'predictions', '(', 'dev_dataset', '=', 'dev_dataset', ',', 'all_results', '=', 'all_results', ',', 'tokenizer', '=', 'nlp', '.', 'data', '.', 'BERTBasicTokenizer', '(', 'lower', '=', 'lower', ')', ',', 'max_answer_length', '=', 'max_answer_length', ',', 'null_score_diff_threshold', '=', 'null_score_diff_threshold', ',', 'n_best_size', '=', 'n_best_size', ',', 'version_2', '=', 'version_2', ')', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', "'predictions.json'", ')', ',', "'w'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'all_predictions_write', ':', 'all_predictions_write', '.', 'write', '(', 'json', '.', 'dumps', '(', 'all_predictions', ')', ')', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', "'nbest_predictions.json'", ')', ',', "'w'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'all_predictions_write', ':', 'all_predictions_write', '.', 'write', '(', 'json', '.', 'dumps', '(', 'all_nbest_json', ')', ')', 'if', 'version_2', ':', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', "'null_odds.json'", ')', ',', "'w'", ',', 'encoding', '=', "'utf-8'", ')', 'as', 'all_predictions_write', ':', 'all_predictions_write', '.', 'write', '(', 'json', '.', 'dumps', '(', 'scores_diff_json', ')', ')', 'else', ':', 'log', '.', 'info', '(', 'get_F1_EM', '(', 'dev_data', ',', 'all_predictions', ')', ')'] | Evaluate the model on validation dataset. | ['Evaluate', 'the', 'model', 'on', 'validation', 'dataset', '.'] | train | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/staticbert/static_finetune_squad.py#L429-L517 |
6,842 | hydraplatform/hydra-base | hydra_base/lib/users.py | get_usernames_like | def get_usernames_like(username,**kwargs):
"""
Return a list of usernames like the given string.
"""
checkname = "%%%s%%"%username
rs = db.DBSession.query(User.username).filter(User.username.like(checkname)).all()
return [r.username for r in rs] | python | def get_usernames_like(username,**kwargs):
"""
Return a list of usernames like the given string.
"""
checkname = "%%%s%%"%username
rs = db.DBSession.query(User.username).filter(User.username.like(checkname)).all()
return [r.username for r in rs] | ['def', 'get_usernames_like', '(', 'username', ',', '*', '*', 'kwargs', ')', ':', 'checkname', '=', '"%%%s%%"', '%', 'username', 'rs', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'User', '.', 'username', ')', '.', 'filter', '(', 'User', '.', 'username', '.', 'like', '(', 'checkname', ')', ')', '.', 'all', '(', ')', 'return', '[', 'r', '.', 'username', 'for', 'r', 'in', 'rs', ']'] | Return a list of usernames like the given string. | ['Return', 'a', 'list', 'of', 'usernames', 'like', 'the', 'given', 'string', '.'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/users.py#L75-L81 |
6,843 | chimera0/accel-brain-code | Automatic-Summarization/pysummarization/nlpbase/autoabstractor/n_gram_auto_abstractor.py | NgramAutoAbstractor.set_n | def set_n(self, value):
''' setter '''
if isinstance(value, int):
self.__n = value
else:
raise TypeError("The type of n must be int.") | python | def set_n(self, value):
''' setter '''
if isinstance(value, int):
self.__n = value
else:
raise TypeError("The type of n must be int.") | ['def', 'set_n', '(', 'self', ',', 'value', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'int', ')', ':', 'self', '.', '__n', '=', 'value', 'else', ':', 'raise', 'TypeError', '(', '"The type of n must be int."', ')'] | setter | ['setter'] | train | https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/pysummarization/nlpbase/autoabstractor/n_gram_auto_abstractor.py#L41-L46 |
6,844 | digidotcom/python-devicecloud | devicecloud/__init__.py | DeviceCloudConnection.hostname | def hostname(self):
"""Get the hostname that this connection is associated with"""
from six.moves.urllib.parse import urlparse
return urlparse(self._base_url).netloc.split(':', 1)[0] | python | def hostname(self):
"""Get the hostname that this connection is associated with"""
from six.moves.urllib.parse import urlparse
return urlparse(self._base_url).netloc.split(':', 1)[0] | ['def', 'hostname', '(', 'self', ')', ':', 'from', 'six', '.', 'moves', '.', 'urllib', '.', 'parse', 'import', 'urlparse', 'return', 'urlparse', '(', 'self', '.', '_base_url', ')', '.', 'netloc', '.', 'split', '(', "':'", ',', '1', ')', '[', '0', ']'] | Get the hostname that this connection is associated with | ['Get', 'the', 'hostname', 'that', 'this', 'connection', 'is', 'associated', 'with'] | train | https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/__init__.py#L131-L134 |
6,845 | bspaans/python-mingus | mingus/containers/note_container.py | NoteContainer.remove_note | def remove_note(self, note, octave=-1):
"""Remove note from container.
The note can either be a Note object or a string representing the
note's name. If no specific octave is given, the note gets removed
in every octave.
"""
res = []
for x in self.notes:
if type(note) == str:
if x.name != note:
res.append(x)
else:
if x.octave != octave and octave != -1:
res.append(x)
else:
if x != note:
res.append(x)
self.notes = res
return res | python | def remove_note(self, note, octave=-1):
"""Remove note from container.
The note can either be a Note object or a string representing the
note's name. If no specific octave is given, the note gets removed
in every octave.
"""
res = []
for x in self.notes:
if type(note) == str:
if x.name != note:
res.append(x)
else:
if x.octave != octave and octave != -1:
res.append(x)
else:
if x != note:
res.append(x)
self.notes = res
return res | ['def', 'remove_note', '(', 'self', ',', 'note', ',', 'octave', '=', '-', '1', ')', ':', 'res', '=', '[', ']', 'for', 'x', 'in', 'self', '.', 'notes', ':', 'if', 'type', '(', 'note', ')', '==', 'str', ':', 'if', 'x', '.', 'name', '!=', 'note', ':', 'res', '.', 'append', '(', 'x', ')', 'else', ':', 'if', 'x', '.', 'octave', '!=', 'octave', 'and', 'octave', '!=', '-', '1', ':', 'res', '.', 'append', '(', 'x', ')', 'else', ':', 'if', 'x', '!=', 'note', ':', 'res', '.', 'append', '(', 'x', ')', 'self', '.', 'notes', '=', 'res', 'return', 'res'] | Remove note from container.
The note can either be a Note object or a string representing the
note's name. If no specific octave is given, the note gets removed
in every octave. | ['Remove', 'note', 'from', 'container', '.'] | train | https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note_container.py#L213-L232 |
6,846 | rsalmaso/django-fluo | fluo/views/decorators.py | login_required | def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
"""
if required:
if django.VERSION < (1, 11):
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
else:
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
# login not required
def decorator(view_func):
def _wrapper(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wraps(function)(_wrapper)
return method_decorator(decorator) | python | def login_required(function=None, required=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary.
"""
if required:
if django.VERSION < (1, 11):
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
else:
actual_decorator = user_passes_test(
lambda u: u.is_authenticated,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
# login not required
def decorator(view_func):
def _wrapper(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wraps(function)(_wrapper)
return method_decorator(decorator) | ['def', 'login_required', '(', 'function', '=', 'None', ',', 'required', '=', 'False', ',', 'redirect_field_name', '=', 'REDIRECT_FIELD_NAME', ')', ':', 'if', 'required', ':', 'if', 'django', '.', 'VERSION', '<', '(', '1', ',', '11', ')', ':', 'actual_decorator', '=', 'user_passes_test', '(', 'lambda', 'u', ':', 'u', '.', 'is_authenticated', '(', ')', ',', 'redirect_field_name', '=', 'redirect_field_name', ')', 'else', ':', 'actual_decorator', '=', 'user_passes_test', '(', 'lambda', 'u', ':', 'u', '.', 'is_authenticated', ',', 'redirect_field_name', '=', 'redirect_field_name', ')', 'if', 'function', ':', 'return', 'actual_decorator', '(', 'function', ')', 'return', 'actual_decorator', '# login not required', 'def', 'decorator', '(', 'view_func', ')', ':', 'def', '_wrapper', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'function', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'wraps', '(', 'function', ')', '(', '_wrapper', ')', 'return', 'method_decorator', '(', 'decorator', ')'] | Decorator for views that, if required, checks that the user is logged in and redirect
to the log-in page if necessary. | ['Decorator', 'for', 'views', 'that', 'if', 'required', 'checks', 'that', 'the', 'user', 'is', 'logged', 'in', 'and', 'redirect', 'to', 'the', 'log', '-', 'in', 'page', 'if', 'necessary', '.'] | train | https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/views/decorators.py#L54-L78 |
6,847 | The-Politico/politico-civic-almanac | almanac/utils/auth.py | secure | def secure(view):
"""
Authentication decorator for views.
If DEBUG is on, we serve the view without authenticating.
Default is 'django.contrib.auth.decorators.login_required'.
Can also be 'django.contrib.admin.views.decorators.staff_member_required'
or a custom decorator.
"""
auth_decorator = import_class(settings.AUTH_DECORATOR)
return (
view if project_settings.DEBUG
else method_decorator(auth_decorator, name='dispatch')(view)
) | python | def secure(view):
"""
Authentication decorator for views.
If DEBUG is on, we serve the view without authenticating.
Default is 'django.contrib.auth.decorators.login_required'.
Can also be 'django.contrib.admin.views.decorators.staff_member_required'
or a custom decorator.
"""
auth_decorator = import_class(settings.AUTH_DECORATOR)
return (
view if project_settings.DEBUG
else method_decorator(auth_decorator, name='dispatch')(view)
) | ['def', 'secure', '(', 'view', ')', ':', 'auth_decorator', '=', 'import_class', '(', 'settings', '.', 'AUTH_DECORATOR', ')', 'return', '(', 'view', 'if', 'project_settings', '.', 'DEBUG', 'else', 'method_decorator', '(', 'auth_decorator', ',', 'name', '=', "'dispatch'", ')', '(', 'view', ')', ')'] | Authentication decorator for views.
If DEBUG is on, we serve the view without authenticating.
Default is 'django.contrib.auth.decorators.login_required'.
Can also be 'django.contrib.admin.views.decorators.staff_member_required'
or a custom decorator. | ['Authentication', 'decorator', 'for', 'views', '.'] | train | https://github.com/The-Politico/politico-civic-almanac/blob/f97521fabd445c8a0fa97a435f6d39f517ef3892/almanac/utils/auth.py#L8-L21 |
6,848 | bfontaine/p7magma | magma/session.py | Session.get_url | def get_url(self, url):
"""
Get an absolute URL from a given one.
"""
if url.startswith('/'):
url = '%s%s' % (self.base_url, url)
return url | python | def get_url(self, url):
"""
Get an absolute URL from a given one.
"""
if url.startswith('/'):
url = '%s%s' % (self.base_url, url)
return url | ['def', 'get_url', '(', 'self', ',', 'url', ')', ':', 'if', 'url', '.', 'startswith', '(', "'/'", ')', ':', 'url', '=', "'%s%s'", '%', '(', 'self', '.', 'base_url', ',', 'url', ')', 'return', 'url'] | Get an absolute URL from a given one. | ['Get', 'an', 'absolute', 'URL', 'from', 'a', 'given', 'one', '.'] | train | https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L57-L63 |
6,849 | honeynet/beeswarm | beeswarm/shared/asciify.py | _asciify_list | def _asciify_list(data):
""" Ascii-fies list values """
ret = []
for item in data:
if isinstance(item, unicode):
item = _remove_accents(item)
item = item.encode('utf-8')
elif isinstance(item, list):
item = _asciify_list(item)
elif isinstance(item, dict):
item = _asciify_dict(item)
ret.append(item)
return ret | python | def _asciify_list(data):
""" Ascii-fies list values """
ret = []
for item in data:
if isinstance(item, unicode):
item = _remove_accents(item)
item = item.encode('utf-8')
elif isinstance(item, list):
item = _asciify_list(item)
elif isinstance(item, dict):
item = _asciify_dict(item)
ret.append(item)
return ret | ['def', '_asciify_list', '(', 'data', ')', ':', 'ret', '=', '[', ']', 'for', 'item', 'in', 'data', ':', 'if', 'isinstance', '(', 'item', ',', 'unicode', ')', ':', 'item', '=', '_remove_accents', '(', 'item', ')', 'item', '=', 'item', '.', 'encode', '(', "'utf-8'", ')', 'elif', 'isinstance', '(', 'item', ',', 'list', ')', ':', 'item', '=', '_asciify_list', '(', 'item', ')', 'elif', 'isinstance', '(', 'item', ',', 'dict', ')', ':', 'item', '=', '_asciify_dict', '(', 'item', ')', 'ret', '.', 'append', '(', 'item', ')', 'return', 'ret'] | Ascii-fies list values | ['Ascii', '-', 'fies', 'list', 'values'] | train | https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/shared/asciify.py#L15-L27 |
6,850 | datascopeanalytics/scrubadub | scrubadub/scrubbers.py | Scrubber.iter_filth | def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self._detectors.values():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
# Sort by start position. If two filths start in the same place then
# return the longer one first
all_filths.sort(key=lambda f: (f.beg, -f.end))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth | python | def iter_filth(self, text):
"""Iterate over the different types of filth that can exist.
"""
# currently doing this by aggregating all_filths and then sorting
# inline instead of with a Filth.__cmp__ method, which is apparently
# much slower http://stackoverflow.com/a/988728/564709
#
# NOTE: we could probably do this in a more efficient way by iterating
# over all detectors simultaneously. just trying to get something
# working right now and we can worry about efficiency later
all_filths = []
for detector in self._detectors.values():
for filth in detector.iter_filth(text):
if not isinstance(filth, Filth):
raise TypeError('iter_filth must always yield Filth')
all_filths.append(filth)
# Sort by start position. If two filths start in the same place then
# return the longer one first
all_filths.sort(key=lambda f: (f.beg, -f.end))
# this is where the Scrubber does its hard work and merges any
# overlapping filths.
if not all_filths:
raise StopIteration
filth = all_filths[0]
for next_filth in all_filths[1:]:
if filth.end < next_filth.beg:
yield filth
filth = next_filth
else:
filth = filth.merge(next_filth)
yield filth | ['def', 'iter_filth', '(', 'self', ',', 'text', ')', ':', '# currently doing this by aggregating all_filths and then sorting', '# inline instead of with a Filth.__cmp__ method, which is apparently', '# much slower http://stackoverflow.com/a/988728/564709', '#', '# NOTE: we could probably do this in a more efficient way by iterating', '# over all detectors simultaneously. just trying to get something', '# working right now and we can worry about efficiency later', 'all_filths', '=', '[', ']', 'for', 'detector', 'in', 'self', '.', '_detectors', '.', 'values', '(', ')', ':', 'for', 'filth', 'in', 'detector', '.', 'iter_filth', '(', 'text', ')', ':', 'if', 'not', 'isinstance', '(', 'filth', ',', 'Filth', ')', ':', 'raise', 'TypeError', '(', "'iter_filth must always yield Filth'", ')', 'all_filths', '.', 'append', '(', 'filth', ')', '# Sort by start position. If two filths start in the same place then', '# return the longer one first', 'all_filths', '.', 'sort', '(', 'key', '=', 'lambda', 'f', ':', '(', 'f', '.', 'beg', ',', '-', 'f', '.', 'end', ')', ')', '# this is where the Scrubber does its hard work and merges any', '# overlapping filths.', 'if', 'not', 'all_filths', ':', 'raise', 'StopIteration', 'filth', '=', 'all_filths', '[', '0', ']', 'for', 'next_filth', 'in', 'all_filths', '[', '1', ':', ']', ':', 'if', 'filth', '.', 'end', '<', 'next_filth', '.', 'beg', ':', 'yield', 'filth', 'filth', '=', 'next_filth', 'else', ':', 'filth', '=', 'filth', '.', 'merge', '(', 'next_filth', ')', 'yield', 'filth'] | Iterate over the different types of filth that can exist. | ['Iterate', 'over', 'the', 'different', 'types', 'of', 'filth', 'that', 'can', 'exist', '.'] | train | https://github.com/datascopeanalytics/scrubadub/blob/914bda49a16130b44af43df6a2f84755477c407c/scrubadub/scrubbers.py#L64-L96 |
6,851 | pkgw/pwkit | pwkit/io.py | Path.read_json | def read_json (self, mode='rt', **kwargs):
"""Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure.
"""
import json
with self.open (mode=mode) as f:
return json.load (f, **kwargs) | python | def read_json (self, mode='rt', **kwargs):
"""Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure.
"""
import json
with self.open (mode=mode) as f:
return json.load (f, **kwargs) | ['def', 'read_json', '(', 'self', ',', 'mode', '=', "'rt'", ',', '*', '*', 'kwargs', ')', ':', 'import', 'json', 'with', 'self', '.', 'open', '(', 'mode', '=', 'mode', ')', 'as', 'f', ':', 'return', 'json', '.', 'load', '(', 'f', ',', '*', '*', 'kwargs', ')'] | Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure. | ['Use', 'the', ':', 'mod', ':', 'json', 'module', 'to', 'read', 'in', 'this', 'file', 'as', 'a', 'JSON', '-', 'formatted', 'data', 'structure', '.', 'Keyword', 'arguments', 'are', 'passed', 'to', ':', 'func', ':', 'json', '.', 'load', '.', 'Returns', 'the', 'read', '-', 'in', 'data', 'structure', '.'] | train | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L791-L800 |
6,852 | frmdstryr/enamlx | enamlx/qt/qt_key_event.py | QtKeyEvent.init_widget | def init_widget(self):
""" The KeyEvent uses the parent_widget as it's widget """
super(QtKeyEvent, self).init_widget()
d = self.declaration
widget = self.widget
self._keyPressEvent = widget.keyPressEvent
self._keyReleaseEvent = widget.keyReleaseEvent
self.set_enabled(d.enabled)
self.set_keys(d.keys) | python | def init_widget(self):
""" The KeyEvent uses the parent_widget as it's widget """
super(QtKeyEvent, self).init_widget()
d = self.declaration
widget = self.widget
self._keyPressEvent = widget.keyPressEvent
self._keyReleaseEvent = widget.keyReleaseEvent
self.set_enabled(d.enabled)
self.set_keys(d.keys) | ['def', 'init_widget', '(', 'self', ')', ':', 'super', '(', 'QtKeyEvent', ',', 'self', ')', '.', 'init_widget', '(', ')', 'd', '=', 'self', '.', 'declaration', 'widget', '=', 'self', '.', 'widget', 'self', '.', '_keyPressEvent', '=', 'widget', '.', 'keyPressEvent', 'self', '.', '_keyReleaseEvent', '=', 'widget', '.', 'keyReleaseEvent', 'self', '.', 'set_enabled', '(', 'd', '.', 'enabled', ')', 'self', '.', 'set_keys', '(', 'd', '.', 'keys', ')'] | The KeyEvent uses the parent_widget as it's widget | ['The', 'KeyEvent', 'uses', 'the', 'parent_widget', 'as', 'it', 's', 'widget'] | train | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_key_event.py#L47-L55 |
6,853 | bjodah/pyodesys | pyodesys/symbolic.py | SymbolicSys.get_dfdx | def get_dfdx(self):
""" Calculates 2nd derivatives of ``self.exprs`` """
if self._dfdx is True:
if self.indep is None:
zero = 0*self.be.Dummy()**0
self._dfdx = self.be.Matrix(1, self.ny, [zero]*self.ny)
else:
self._dfdx = self.be.Matrix(1, self.ny, [expr.diff(self.indep) for expr in self.exprs])
elif self._dfdx is False:
return False
return self._dfdx | python | def get_dfdx(self):
""" Calculates 2nd derivatives of ``self.exprs`` """
if self._dfdx is True:
if self.indep is None:
zero = 0*self.be.Dummy()**0
self._dfdx = self.be.Matrix(1, self.ny, [zero]*self.ny)
else:
self._dfdx = self.be.Matrix(1, self.ny, [expr.diff(self.indep) for expr in self.exprs])
elif self._dfdx is False:
return False
return self._dfdx | ['def', 'get_dfdx', '(', 'self', ')', ':', 'if', 'self', '.', '_dfdx', 'is', 'True', ':', 'if', 'self', '.', 'indep', 'is', 'None', ':', 'zero', '=', '0', '*', 'self', '.', 'be', '.', 'Dummy', '(', ')', '**', '0', 'self', '.', '_dfdx', '=', 'self', '.', 'be', '.', 'Matrix', '(', '1', ',', 'self', '.', 'ny', ',', '[', 'zero', ']', '*', 'self', '.', 'ny', ')', 'else', ':', 'self', '.', '_dfdx', '=', 'self', '.', 'be', '.', 'Matrix', '(', '1', ',', 'self', '.', 'ny', ',', '[', 'expr', '.', 'diff', '(', 'self', '.', 'indep', ')', 'for', 'expr', 'in', 'self', '.', 'exprs', ']', ')', 'elif', 'self', '.', '_dfdx', 'is', 'False', ':', 'return', 'False', 'return', 'self', '.', '_dfdx'] | Calculates 2nd derivatives of ``self.exprs`` | ['Calculates', '2nd', 'derivatives', 'of', 'self', '.', 'exprs'] | train | https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L674-L684 |
6,854 | jessevdk/cldoc | cldoc/clang/cindex.py | Diagnostic.format | def format(self, options=None):
"""
Format this diagnostic for display. The options argument takes
Diagnostic.Display* flags, which can be combined using bitwise OR. If
the options argument is not provided, the default display options will
be used.
"""
if options is None:
options = conf.lib.clang_defaultDiagnosticDisplayOptions()
if options & ~Diagnostic._FormatOptionsMask:
raise ValueError('Invalid format options')
return conf.lib.clang_formatDiagnostic(self, options) | python | def format(self, options=None):
"""
Format this diagnostic for display. The options argument takes
Diagnostic.Display* flags, which can be combined using bitwise OR. If
the options argument is not provided, the default display options will
be used.
"""
if options is None:
options = conf.lib.clang_defaultDiagnosticDisplayOptions()
if options & ~Diagnostic._FormatOptionsMask:
raise ValueError('Invalid format options')
return conf.lib.clang_formatDiagnostic(self, options) | ['def', 'format', '(', 'self', ',', 'options', '=', 'None', ')', ':', 'if', 'options', 'is', 'None', ':', 'options', '=', 'conf', '.', 'lib', '.', 'clang_defaultDiagnosticDisplayOptions', '(', ')', 'if', 'options', '&', '~', 'Diagnostic', '.', '_FormatOptionsMask', ':', 'raise', 'ValueError', '(', "'Invalid format options'", ')', 'return', 'conf', '.', 'lib', '.', 'clang_formatDiagnostic', '(', 'self', ',', 'options', ')'] | Format this diagnostic for display. The options argument takes
Diagnostic.Display* flags, which can be combined using bitwise OR. If
the options argument is not provided, the default display options will
be used. | ['Format', 'this', 'diagnostic', 'for', 'display', '.', 'The', 'options', 'argument', 'takes', 'Diagnostic', '.', 'Display', '*', 'flags', 'which', 'can', 'be', 'combined', 'using', 'bitwise', 'OR', '.', 'If', 'the', 'options', 'argument', 'is', 'not', 'provided', 'the', 'default', 'display', 'options', 'will', 'be', 'used', '.'] | train | https://github.com/jessevdk/cldoc/blob/fc7f59405c4a891b8367c80a700f5aa3c5c9230c/cldoc/clang/cindex.py#L486-L497 |
6,855 | bwohlberg/sporco | sporco/admm/admm.py | ADMM.ustep | def ustep(self):
"""Dual variable update."""
self.U += self.rsdl_r(self.AX, self.Y) | python | def ustep(self):
"""Dual variable update."""
self.U += self.rsdl_r(self.AX, self.Y) | ['def', 'ustep', '(', 'self', ')', ':', 'self', '.', 'U', '+=', 'self', '.', 'rsdl_r', '(', 'self', '.', 'AX', ',', 'self', '.', 'Y', ')'] | Dual variable update. | ['Dual', 'variable', 'update', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/admm.py#L433-L436 |
6,856 | aacanakin/glim | glim/app.py | Glim.flatten_urls | def flatten_urls(self, urls):
"""
Function flatten urls for route grouping feature of glim.
Args
----
urls (dict): a dict of url definitions.
current_key (unknown type): a dict or a string marking the
current key that is used for recursive calls.
ruleset (dict): the ruleset that is eventually returned to
dispatcher.
Returns
-------
ruleset (list): a list of ruleset dict with endpoint, url and method functions
"""
available_methods = ['POST', 'PUT', 'OPTIONS', 'GET', 'DELETE', 'TRACE', 'COPY']
ruleset = []
for route, endpoint in urls.items():
route_pieces = route.split(' ')
try:
methods = url = None
if len(route_pieces) > 1:
methods = [route_pieces[0]]
url = route_pieces[1]
else:
methods = available_methods
url = route_pieces[0]
endpoint_pieces = endpoint.split('.')
if len(endpoint_pieces) > 1:
rule = {'url': url, 'endpoint': endpoint, 'methods': methods}
ruleset.append(rule)
else:
for method in available_methods:
rule = {
'url': url,
'endpoint': '%s.%s' % (endpoint, method.lower()),
'methods': [method]
}
ruleset.append(rule)
except Exception as e:
raise InvalidRouteDefinitionError()
return ruleset | python | def flatten_urls(self, urls):
"""
Function flatten urls for route grouping feature of glim.
Args
----
urls (dict): a dict of url definitions.
current_key (unknown type): a dict or a string marking the
current key that is used for recursive calls.
ruleset (dict): the ruleset that is eventually returned to
dispatcher.
Returns
-------
ruleset (list): a list of ruleset dict with endpoint, url and method functions
"""
available_methods = ['POST', 'PUT', 'OPTIONS', 'GET', 'DELETE', 'TRACE', 'COPY']
ruleset = []
for route, endpoint in urls.items():
route_pieces = route.split(' ')
try:
methods = url = None
if len(route_pieces) > 1:
methods = [route_pieces[0]]
url = route_pieces[1]
else:
methods = available_methods
url = route_pieces[0]
endpoint_pieces = endpoint.split('.')
if len(endpoint_pieces) > 1:
rule = {'url': url, 'endpoint': endpoint, 'methods': methods}
ruleset.append(rule)
else:
for method in available_methods:
rule = {
'url': url,
'endpoint': '%s.%s' % (endpoint, method.lower()),
'methods': [method]
}
ruleset.append(rule)
except Exception as e:
raise InvalidRouteDefinitionError()
return ruleset | ['def', 'flatten_urls', '(', 'self', ',', 'urls', ')', ':', 'available_methods', '=', '[', "'POST'", ',', "'PUT'", ',', "'OPTIONS'", ',', "'GET'", ',', "'DELETE'", ',', "'TRACE'", ',', "'COPY'", ']', 'ruleset', '=', '[', ']', 'for', 'route', ',', 'endpoint', 'in', 'urls', '.', 'items', '(', ')', ':', 'route_pieces', '=', 'route', '.', 'split', '(', "' '", ')', 'try', ':', 'methods', '=', 'url', '=', 'None', 'if', 'len', '(', 'route_pieces', ')', '>', '1', ':', 'methods', '=', '[', 'route_pieces', '[', '0', ']', ']', 'url', '=', 'route_pieces', '[', '1', ']', 'else', ':', 'methods', '=', 'available_methods', 'url', '=', 'route_pieces', '[', '0', ']', 'endpoint_pieces', '=', 'endpoint', '.', 'split', '(', "'.'", ')', 'if', 'len', '(', 'endpoint_pieces', ')', '>', '1', ':', 'rule', '=', '{', "'url'", ':', 'url', ',', "'endpoint'", ':', 'endpoint', ',', "'methods'", ':', 'methods', '}', 'ruleset', '.', 'append', '(', 'rule', ')', 'else', ':', 'for', 'method', 'in', 'available_methods', ':', 'rule', '=', '{', "'url'", ':', 'url', ',', "'endpoint'", ':', "'%s.%s'", '%', '(', 'endpoint', ',', 'method', '.', 'lower', '(', ')', ')', ',', "'methods'", ':', '[', 'method', ']', '}', 'ruleset', '.', 'append', '(', 'rule', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'InvalidRouteDefinitionError', '(', ')', 'return', 'ruleset'] | Function flatten urls for route grouping feature of glim.
Args
----
urls (dict): a dict of url definitions.
current_key (unknown type): a dict or a string marking the
current key that is used for recursive calls.
ruleset (dict): the ruleset that is eventually returned to
dispatcher.
Returns
-------
ruleset (list): a list of ruleset dict with endpoint, url and method functions | ['Function', 'flatten', 'urls', 'for', 'route', 'grouping', 'feature', 'of', 'glim', '.'] | train | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/app.py#L165-L209 |
6,857 | RudolfCardinal/pythonlib | cardinal_pythonlib/winservice.py | ProcessManager._taskkill | def _taskkill(self, force: bool = False) -> int:
"""
Executes a Windows ``TASKKILL /pid PROCESS_ID /t`` command
(``/t`` for "tree kill" = "kill all children").
Args:
force: also add ``/f`` (forcefully)
Returns:
return code from ``TASKKILL``
**Test code:**
Firstly we need a program that won't let itself be killed. Save this as
``nokill.py``:
.. code-block:: python
#!/usr/bin/env python
import logging
import time
import os
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
from cardinal_pythonlib.signalfunc import trap_ctrl_c_ctrl_break
main_only_quicksetup_rootlogger(level=logging.DEBUG)
trap_ctrl_c_ctrl_break()
while True:
print("Process ID is {}; time is {} s".format(os.getpid(), time.clock()))
time.sleep(1)
Now run that with ``python nokill.py``. It should resist CTRL-C and
CTRL-BREAK. Start another command prompt in which to play with
``TASKKILL``.
.. code-block:: bat
REM Firstly, avoid this single-ampersand syntax:
REM taskkill /im notepad.exe & echo %errorlevel%
REM ... as it prints the WRONG (previous?) errorlevel.
notepad.exe
taskkill /im notepad.exe
echo %errorlevel%
REM ... 0 for success (Windows 10), e.g.
REM 'SUCCESS: Sent termination signal to the process "notepad.exe" with PID 6988.'
taskkill /im notepad.exe
echo %errorlevel%
REM ... 128 for "not found" (Windows 10), e.g.
REM 'ERROR: The process "notepad.exe" not found.'
REM Now run notepad.exe as Administrator
taskkill /im notepad.exe & echo %errorlevel%
REM ... 1 for "access denied" (Windows 10)
REM Now kill the nokill.py process by its PID (e.g. 11892 here):
taskkill /pid 11892
echo %errorlevel%
REM ... 1 for "not allowed" (Windows 10), e.g.
REM 'ERROR: The process with PID 11892 could not be terminated.'
REM 'Reason: This process can only be terminated forcefully (with /F option).'
REM Now forcefully:
taskkill /pid 11892 /f
echo %errorlevel%
REM ... 0 for success (Windows 10), e.g.
REM 'SUCCESS: The process with PID 11892 has been terminated.'
""" # noqa
args = [
"taskkill", # built in to Windows XP and higher
"/pid", str(self.process.pid),
"/t", # tree kill: kill all children
]
if force:
args.append("/f") # forcefully
callname = " ".join(args)
retcode = subprocess.call(args)
# http://stackoverflow.com/questions/18682681/what-are-exit-codes-from-the-taskkill-utility # noqa
if retcode == winerror.ERROR_SUCCESS: # 0
self.info("Killed with " + repr(callname))
elif retcode == winerror.ERROR_INVALID_FUNCTION: # 1
self.warning(
repr(callname) +
" failed (error code 1 = ERROR_INVALID_FUNCTION; "
"can mean 'Access denied', or 'This process can only be "
"terminated forcefully (with /F option)').")
elif retcode == winerror.ERROR_WAIT_NO_CHILDREN: # 128
self.warning(
repr(callname) +
" failed (error code 128 = ERROR_WAIT_NO_CHILDREN "
"= 'There are no child processes to wait for', but also "
"occurs when the process doesn't exist, and when processes "
"require a forceful [/F] termination)")
elif retcode == winerror.ERROR_EA_LIST_INCONSISTENT: # 255
self.warning(
repr(callname) +
" failed (error code 255 = ERROR_EA_LIST_INCONSISTENT "
"= 'The extended attributes are inconsistent.')")
else:
self.warning(callname + " failed: error code {}".format(retcode))
return retcode | python | def _taskkill(self, force: bool = False) -> int:
"""
Executes a Windows ``TASKKILL /pid PROCESS_ID /t`` command
(``/t`` for "tree kill" = "kill all children").
Args:
force: also add ``/f`` (forcefully)
Returns:
return code from ``TASKKILL``
**Test code:**
Firstly we need a program that won't let itself be killed. Save this as
``nokill.py``:
.. code-block:: python
#!/usr/bin/env python
import logging
import time
import os
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
from cardinal_pythonlib.signalfunc import trap_ctrl_c_ctrl_break
main_only_quicksetup_rootlogger(level=logging.DEBUG)
trap_ctrl_c_ctrl_break()
while True:
print("Process ID is {}; time is {} s".format(os.getpid(), time.clock()))
time.sleep(1)
Now run that with ``python nokill.py``. It should resist CTRL-C and
CTRL-BREAK. Start another command prompt in which to play with
``TASKKILL``.
.. code-block:: bat
REM Firstly, avoid this single-ampersand syntax:
REM taskkill /im notepad.exe & echo %errorlevel%
REM ... as it prints the WRONG (previous?) errorlevel.
notepad.exe
taskkill /im notepad.exe
echo %errorlevel%
REM ... 0 for success (Windows 10), e.g.
REM 'SUCCESS: Sent termination signal to the process "notepad.exe" with PID 6988.'
taskkill /im notepad.exe
echo %errorlevel%
REM ... 128 for "not found" (Windows 10), e.g.
REM 'ERROR: The process "notepad.exe" not found.'
REM Now run notepad.exe as Administrator
taskkill /im notepad.exe & echo %errorlevel%
REM ... 1 for "access denied" (Windows 10)
REM Now kill the nokill.py process by its PID (e.g. 11892 here):
taskkill /pid 11892
echo %errorlevel%
REM ... 1 for "not allowed" (Windows 10), e.g.
REM 'ERROR: The process with PID 11892 could not be terminated.'
REM 'Reason: This process can only be terminated forcefully (with /F option).'
REM Now forcefully:
taskkill /pid 11892 /f
echo %errorlevel%
REM ... 0 for success (Windows 10), e.g.
REM 'SUCCESS: The process with PID 11892 has been terminated.'
""" # noqa
args = [
"taskkill", # built in to Windows XP and higher
"/pid", str(self.process.pid),
"/t", # tree kill: kill all children
]
if force:
args.append("/f") # forcefully
callname = " ".join(args)
retcode = subprocess.call(args)
# http://stackoverflow.com/questions/18682681/what-are-exit-codes-from-the-taskkill-utility # noqa
if retcode == winerror.ERROR_SUCCESS: # 0
self.info("Killed with " + repr(callname))
elif retcode == winerror.ERROR_INVALID_FUNCTION: # 1
self.warning(
repr(callname) +
" failed (error code 1 = ERROR_INVALID_FUNCTION; "
"can mean 'Access denied', or 'This process can only be "
"terminated forcefully (with /F option)').")
elif retcode == winerror.ERROR_WAIT_NO_CHILDREN: # 128
self.warning(
repr(callname) +
" failed (error code 128 = ERROR_WAIT_NO_CHILDREN "
"= 'There are no child processes to wait for', but also "
"occurs when the process doesn't exist, and when processes "
"require a forceful [/F] termination)")
elif retcode == winerror.ERROR_EA_LIST_INCONSISTENT: # 255
self.warning(
repr(callname) +
" failed (error code 255 = ERROR_EA_LIST_INCONSISTENT "
"= 'The extended attributes are inconsistent.')")
else:
self.warning(callname + " failed: error code {}".format(retcode))
return retcode | ['def', '_taskkill', '(', 'self', ',', 'force', ':', 'bool', '=', 'False', ')', '->', 'int', ':', '# noqa', 'args', '=', '[', '"taskkill"', ',', '# built in to Windows XP and higher', '"/pid"', ',', 'str', '(', 'self', '.', 'process', '.', 'pid', ')', ',', '"/t"', ',', '# tree kill: kill all children', ']', 'if', 'force', ':', 'args', '.', 'append', '(', '"/f"', ')', '# forcefully', 'callname', '=', '" "', '.', 'join', '(', 'args', ')', 'retcode', '=', 'subprocess', '.', 'call', '(', 'args', ')', '# http://stackoverflow.com/questions/18682681/what-are-exit-codes-from-the-taskkill-utility # noqa', 'if', 'retcode', '==', 'winerror', '.', 'ERROR_SUCCESS', ':', '# 0', 'self', '.', 'info', '(', '"Killed with "', '+', 'repr', '(', 'callname', ')', ')', 'elif', 'retcode', '==', 'winerror', '.', 'ERROR_INVALID_FUNCTION', ':', '# 1', 'self', '.', 'warning', '(', 'repr', '(', 'callname', ')', '+', '" failed (error code 1 = ERROR_INVALID_FUNCTION; "', '"can mean \'Access denied\', or \'This process can only be "', '"terminated forcefully (with /F option)\')."', ')', 'elif', 'retcode', '==', 'winerror', '.', 'ERROR_WAIT_NO_CHILDREN', ':', '# 128', 'self', '.', 'warning', '(', 'repr', '(', 'callname', ')', '+', '" failed (error code 128 = ERROR_WAIT_NO_CHILDREN "', '"= \'There are no child processes to wait for\', but also "', '"occurs when the process doesn\'t exist, and when processes "', '"require a forceful [/F] termination)"', ')', 'elif', 'retcode', '==', 'winerror', '.', 'ERROR_EA_LIST_INCONSISTENT', ':', '# 255', 'self', '.', 'warning', '(', 'repr', '(', 'callname', ')', '+', '" failed (error code 255 = ERROR_EA_LIST_INCONSISTENT "', '"= \'The extended attributes are inconsistent.\')"', ')', 'else', ':', 'self', '.', 'warning', '(', 'callname', '+', '" failed: error code {}"', '.', 'format', '(', 'retcode', ')', ')', 'return', 'retcode'] | Executes a Windows ``TASKKILL /pid PROCESS_ID /t`` command
(``/t`` for "tree kill" = "kill all children").
Args:
force: also add ``/f`` (forcefully)
Returns:
return code from ``TASKKILL``
**Test code:**
Firstly we need a program that won't let itself be killed. Save this as
``nokill.py``:
.. code-block:: python
#!/usr/bin/env python
import logging
import time
import os
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
from cardinal_pythonlib.signalfunc import trap_ctrl_c_ctrl_break
main_only_quicksetup_rootlogger(level=logging.DEBUG)
trap_ctrl_c_ctrl_break()
while True:
print("Process ID is {}; time is {} s".format(os.getpid(), time.clock()))
time.sleep(1)
Now run that with ``python nokill.py``. It should resist CTRL-C and
CTRL-BREAK. Start another command prompt in which to play with
``TASKKILL``.
.. code-block:: bat
REM Firstly, avoid this single-ampersand syntax:
REM taskkill /im notepad.exe & echo %errorlevel%
REM ... as it prints the WRONG (previous?) errorlevel.
notepad.exe
taskkill /im notepad.exe
echo %errorlevel%
REM ... 0 for success (Windows 10), e.g.
REM 'SUCCESS: Sent termination signal to the process "notepad.exe" with PID 6988.'
taskkill /im notepad.exe
echo %errorlevel%
REM ... 128 for "not found" (Windows 10), e.g.
REM 'ERROR: The process "notepad.exe" not found.'
REM Now run notepad.exe as Administrator
taskkill /im notepad.exe & echo %errorlevel%
REM ... 1 for "access denied" (Windows 10)
REM Now kill the nokill.py process by its PID (e.g. 11892 here):
taskkill /pid 11892
echo %errorlevel%
REM ... 1 for "not allowed" (Windows 10), e.g.
REM 'ERROR: The process with PID 11892 could not be terminated.'
REM 'Reason: This process can only be terminated forcefully (with /F option).'
REM Now forcefully:
taskkill /pid 11892 /f
echo %errorlevel%
REM ... 0 for success (Windows 10), e.g.
REM 'SUCCESS: The process with PID 11892 has been terminated.' | ['Executes', 'a', 'Windows', 'TASKKILL', '/', 'pid', 'PROCESS_ID', '/', 't', 'command', '(', '/', 't', 'for', 'tree', 'kill', '=', 'kill', 'all', 'children', ')', '.'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/winservice.py#L571-L675 |
6,858 | IceflowRE/unidown | unidown/plugin/link_item.py | LinkItem.to_protobuf | def to_protobuf(self) -> LinkItemProto:
"""
Create protobuf item.
:return: protobuf structure
:rtype: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto
"""
result = LinkItemProto()
result.name = self._name
result.time.CopyFrom(datetime_to_timestamp(self._time))
return result | python | def to_protobuf(self) -> LinkItemProto:
"""
Create protobuf item.
:return: protobuf structure
:rtype: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto
"""
result = LinkItemProto()
result.name = self._name
result.time.CopyFrom(datetime_to_timestamp(self._time))
return result | ['def', 'to_protobuf', '(', 'self', ')', '->', 'LinkItemProto', ':', 'result', '=', 'LinkItemProto', '(', ')', 'result', '.', 'name', '=', 'self', '.', '_name', 'result', '.', 'time', '.', 'CopyFrom', '(', 'datetime_to_timestamp', '(', 'self', '.', '_time', ')', ')', 'return', 'result'] | Create protobuf item.
:return: protobuf structure
:rtype: ~unidown.plugin.protobuf.link_item_pb2.LinkItemProto | ['Create', 'protobuf', 'item', '.'] | train | https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/link_item.py#L70-L80 |
6,859 | ambitioninc/django-manager-utils | manager_utils/upsert2.py | _fetch | def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
) | python | def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
) | ['def', '_fetch', '(', 'queryset', ',', 'model_objs', ',', 'unique_fields', ',', 'update_fields', ',', 'returning', ',', 'sync', ',', 'ignore_duplicate_updates', '=', 'True', ',', 'return_untouched', '=', 'False', ')', ':', 'model', '=', 'queryset', '.', 'model', 'if', '(', 'return_untouched', 'or', 'sync', ')', 'and', 'returning', 'is', 'not', 'True', ':', 'returning', '=', 'set', '(', 'returning', ')', 'if', 'returning', 'else', 'set', '(', ')', 'returning', '.', 'add', '(', 'model', '.', '_meta', '.', 'pk', '.', 'name', ')', 'upserted', '=', '[', ']', 'deleted', '=', '[', ']', '# We must return untouched rows when doing a sync operation', 'return_untouched', '=', 'True', 'if', 'sync', 'else', 'return_untouched', 'if', 'model_objs', ':', 'sql', ',', 'sql_args', '=', '_get_upsert_sql', '(', 'queryset', ',', 'model_objs', ',', 'unique_fields', ',', 'update_fields', ',', 'returning', ',', 'ignore_duplicate_updates', '=', 'ignore_duplicate_updates', ',', 'return_untouched', '=', 'return_untouched', ')', 'with', 'connection', '.', 'cursor', '(', ')', 'as', 'cursor', ':', 'cursor', '.', 'execute', '(', 'sql', ',', 'sql_args', ')', 'if', 'cursor', '.', 'description', ':', 'nt_result', '=', 'namedtuple', '(', "'Result'", ',', '[', 'col', '[', '0', ']', 'for', 'col', 'in', 'cursor', '.', 'description', ']', ')', 'upserted', '=', '[', 'nt_result', '(', '*', 'row', ')', 'for', 'row', 'in', 'cursor', '.', 'fetchall', '(', ')', ']', 'pk_field', '=', 'model', '.', '_meta', '.', 'pk', '.', 'name', 'if', 'sync', ':', 'orig_ids', '=', 'queryset', '.', 'values_list', '(', 'pk_field', ',', 'flat', '=', 'True', ')', 'deleted', '=', 'set', '(', 'orig_ids', ')', '-', '{', 'getattr', '(', 'r', ',', 'pk_field', ')', 'for', 'r', 'in', 'upserted', '}', 'model', '.', 'objects', '.', 'filter', '(', 'pk__in', '=', 'deleted', ')', '.', 'delete', '(', ')', 'nt_deleted_result', '=', 'namedtuple', '(', "'DeletedResult'", ',', '[', 'model', '.', '_meta', '.', 'pk', '.', 'name', ',', "'status_'", ']', ')', 'return', 'UpsertResult', '(', 'upserted', '+', '[', 'nt_deleted_result', '(', '*', '*', '{', 'pk_field', ':', 'd', ',', "'status_'", ':', "'d'", '}', ')', 'for', 'd', 'in', 'deleted', ']', ')'] | Perfom the upsert and do an optional sync operation | ['Perfom', 'the', 'upsert', 'and', 'do', 'an', 'optional', 'sync', 'operation'] | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L252-L288 |
6,860 | facebook/watchman | build/fbcode_builder/shell_quoting.py | shell_comment | def shell_comment(c):
'Do not shell-escape raw strings in comments, but do handle line breaks.'
return ShellQuoted('# {c}').format(c=ShellQuoted(
(raw_shell(c) if isinstance(c, ShellQuoted) else c)
.replace('\n', '\n# ')
)) | python | def shell_comment(c):
'Do not shell-escape raw strings in comments, but do handle line breaks.'
return ShellQuoted('# {c}').format(c=ShellQuoted(
(raw_shell(c) if isinstance(c, ShellQuoted) else c)
.replace('\n', '\n# ')
)) | ['def', 'shell_comment', '(', 'c', ')', ':', 'return', 'ShellQuoted', '(', "'# {c}'", ')', '.', 'format', '(', 'c', '=', 'ShellQuoted', '(', '(', 'raw_shell', '(', 'c', ')', 'if', 'isinstance', '(', 'c', ',', 'ShellQuoted', ')', 'else', 'c', ')', '.', 'replace', '(', "'\\n'", ',', "'\\n# '", ')', ')', ')'] | Do not shell-escape raw strings in comments, but do handle line breaks. | ['Do', 'not', 'shell', '-', 'escape', 'raw', 'strings', 'in', 'comments', 'but', 'do', 'handle', 'line', 'breaks', '.'] | train | https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/build/fbcode_builder/shell_quoting.py#L94-L99 |
6,861 | orbingol/NURBS-Python | geomdl/compatibility.py | flip_ctrlpts2d_file | def flip_ctrlpts2d_file(file_in='', file_out='ctrlpts_flip.txt'):
""" Flips u and v directions of a 2D control points file and saves flipped coordinates to a file.
:param file_in: name of the input file (to be read)
:type file_in: str
:param file_out: name of the output file (to be saved)
:type file_out: str
:raises IOError: an error occurred reading or writing the file
"""
# Read control points
ctrlpts2d, size_u, size_v = _read_ctrltps2d_file(file_in)
# Flip control points array
new_ctrlpts2d = flip_ctrlpts2d(ctrlpts2d, size_u, size_v)
# Save new control points
_save_ctrlpts2d_file(new_ctrlpts2d, size_u, size_v, file_out) | python | def flip_ctrlpts2d_file(file_in='', file_out='ctrlpts_flip.txt'):
""" Flips u and v directions of a 2D control points file and saves flipped coordinates to a file.
:param file_in: name of the input file (to be read)
:type file_in: str
:param file_out: name of the output file (to be saved)
:type file_out: str
:raises IOError: an error occurred reading or writing the file
"""
# Read control points
ctrlpts2d, size_u, size_v = _read_ctrltps2d_file(file_in)
# Flip control points array
new_ctrlpts2d = flip_ctrlpts2d(ctrlpts2d, size_u, size_v)
# Save new control points
_save_ctrlpts2d_file(new_ctrlpts2d, size_u, size_v, file_out) | ['def', 'flip_ctrlpts2d_file', '(', 'file_in', '=', "''", ',', 'file_out', '=', "'ctrlpts_flip.txt'", ')', ':', '# Read control points', 'ctrlpts2d', ',', 'size_u', ',', 'size_v', '=', '_read_ctrltps2d_file', '(', 'file_in', ')', '# Flip control points array', 'new_ctrlpts2d', '=', 'flip_ctrlpts2d', '(', 'ctrlpts2d', ',', 'size_u', ',', 'size_v', ')', '# Save new control points', '_save_ctrlpts2d_file', '(', 'new_ctrlpts2d', ',', 'size_u', ',', 'size_v', ',', 'file_out', ')'] | Flips u and v directions of a 2D control points file and saves flipped coordinates to a file.
:param file_in: name of the input file (to be read)
:type file_in: str
:param file_out: name of the output file (to be saved)
:type file_out: str
:raises IOError: an error occurred reading or writing the file | ['Flips', 'u', 'and', 'v', 'directions', 'of', 'a', '2D', 'control', 'points', 'file', 'and', 'saves', 'flipped', 'coordinates', 'to', 'a', 'file', '.'] | train | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/compatibility.py#L238-L254 |
6,862 | nicolargo/glances | glances/processes.py | GlancesProcesses.pid_max | def pid_max(self):
"""
Get the maximum PID value.
On Linux, the value is read from the `/proc/sys/kernel/pid_max` file.
From `man 5 proc`:
The default value for this file, 32768, results in the same range of
PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum
value for pid_max. On 64-bit systems, pid_max can be set to any value
up to 2^22 (PID_MAX_LIMIT, approximately 4 million).
If the file is unreadable or not available for whatever reason,
returns None.
Some other OSes:
- On FreeBSD and macOS the maximum is 99999.
- On OpenBSD >= 6.0 the maximum is 99999 (was 32766).
- On NetBSD the maximum is 30000.
:returns: int or None
"""
if LINUX:
# XXX: waiting for https://github.com/giampaolo/psutil/issues/720
try:
with open('/proc/sys/kernel/pid_max', 'rb') as f:
return int(f.read())
except (OSError, IOError):
return None
else:
return None | python | def pid_max(self):
"""
Get the maximum PID value.
On Linux, the value is read from the `/proc/sys/kernel/pid_max` file.
From `man 5 proc`:
The default value for this file, 32768, results in the same range of
PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum
value for pid_max. On 64-bit systems, pid_max can be set to any value
up to 2^22 (PID_MAX_LIMIT, approximately 4 million).
If the file is unreadable or not available for whatever reason,
returns None.
Some other OSes:
- On FreeBSD and macOS the maximum is 99999.
- On OpenBSD >= 6.0 the maximum is 99999 (was 32766).
- On NetBSD the maximum is 30000.
:returns: int or None
"""
if LINUX:
# XXX: waiting for https://github.com/giampaolo/psutil/issues/720
try:
with open('/proc/sys/kernel/pid_max', 'rb') as f:
return int(f.read())
except (OSError, IOError):
return None
else:
return None | ['def', 'pid_max', '(', 'self', ')', ':', 'if', 'LINUX', ':', '# XXX: waiting for https://github.com/giampaolo/psutil/issues/720', 'try', ':', 'with', 'open', '(', "'/proc/sys/kernel/pid_max'", ',', "'rb'", ')', 'as', 'f', ':', 'return', 'int', '(', 'f', '.', 'read', '(', ')', ')', 'except', '(', 'OSError', ',', 'IOError', ')', ':', 'return', 'None', 'else', ':', 'return', 'None'] | Get the maximum PID value.
On Linux, the value is read from the `/proc/sys/kernel/pid_max` file.
From `man 5 proc`:
The default value for this file, 32768, results in the same range of
PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum
value for pid_max. On 64-bit systems, pid_max can be set to any value
up to 2^22 (PID_MAX_LIMIT, approximately 4 million).
If the file is unreadable or not available for whatever reason,
returns None.
Some other OSes:
- On FreeBSD and macOS the maximum is 99999.
- On OpenBSD >= 6.0 the maximum is 99999 (was 32766).
- On NetBSD the maximum is 30000.
:returns: int or None | ['Get', 'the', 'maximum', 'PID', 'value', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/processes.py#L123-L153 |
6,863 | trevisanj/a99 | a99/gui/a_WBase.py | WBase.add_log_error | def add_log_error(self, x, flag_also_show=False, E=None):
"""Delegates to parent form"""
self.parent_form.add_log_error(x, flag_also_show, E) | python | def add_log_error(self, x, flag_also_show=False, E=None):
"""Delegates to parent form"""
self.parent_form.add_log_error(x, flag_also_show, E) | ['def', 'add_log_error', '(', 'self', ',', 'x', ',', 'flag_also_show', '=', 'False', ',', 'E', '=', 'None', ')', ':', 'self', '.', 'parent_form', '.', 'add_log_error', '(', 'x', ',', 'flag_also_show', ',', 'E', ')'] | Delegates to parent form | ['Delegates', 'to', 'parent', 'form'] | train | https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/a_WBase.py#L38-L40 |
6,864 | Esri/ArcREST | src/arcresthelper/featureservicetools.py | featureservicetools.RemoveAndAddFeatures | def RemoveAndAddFeatures(self, url, pathToFeatureClass, id_field, chunksize=1000):
"""Deletes all features in a feature service and uploads features from a feature class on disk.
Args:
url (str): The URL of the feature service.
pathToFeatureClass (str): The path of the feature class on disk.
id_field (str): The name of the field in the feature class to use for chunking.
chunksize (int): The maximum amount of features to upload at a time. Defaults to 1000.
Raises:
ArcRestHelperError: if ``arcpy`` can't be found.
"""
fl = None
try:
if arcpyFound == False:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror": "ArcPy required for this function"
})
arcpy.env.overwriteOutput = True
tempaddlayer= 'ewtdwedfew'
if not arcpy.Exists(pathToFeatureClass):
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror": "%s does not exist" % pathToFeatureClass
}
)
fields = arcpy.ListFields(pathToFeatureClass,wild_card=id_field)
if len(fields) == 0:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror": "%s field does not exist" % id_field
})
strFld = True
if fields[0].type != 'String':
strFld = False
fl = FeatureLayer(
url=url,
securityHandler=self._securityHandler)
id_field_local = arcpy.AddFieldDelimiters(pathToFeatureClass, id_field)
idlist = []
print( arcpy.GetCount_management(in_rows=pathToFeatureClass).getOutput(0) + " features in the layer")
with arcpy.da.SearchCursor(pathToFeatureClass, (id_field)) as cursor:
allidlist = []
for row in cursor:
if (strFld):
idlist.append("'" + row[0] +"'")
else:
idlist.append(row[0])
if len(idlist) >= chunksize:
allidlist.append(idlist)
idlist = []
if len(idlist) > 0:
allidlist.append(idlist)
for idlist in allidlist:
idstring = ' in (' + ','.join(map(str,idlist)) + ')'
sql = id_field + idstring
sqlLocalFC = id_field_local + idstring
results = fl.deleteFeatures(where=sql,
rollbackOnFailure=True)
if 'error' in results:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror":results['error']
})
elif 'deleteResults' in results:
print ("%s features deleted" % len(results['deleteResults']))
for itm in results['deleteResults']:
if itm['success'] != True:
print (itm)
else:
print (results)
arcpy.MakeFeatureLayer_management(pathToFeatureClass,tempaddlayer,sqlLocalFC)
results = fl.addFeatures(fc=tempaddlayer)
if 'error' in results:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror":results['error']
})
elif 'addResults' in results:
print ("%s features added" % len(results['addResults']))
for itm in results['addResults']:
if itm['success'] != True:
print (itm)
else:
print (results)
idlist = []
if 'error' in results:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror":results['error']
})
else:
print (results)
except arcpy.ExecuteError:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "create_report_layers_using_config",
"line": line,
"filename": filename,
"synerror": synerror,
"arcpyError": arcpy.GetMessages(2),
}
)
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "AddFeaturesToFeatureLayer",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
gc.collect() | python | def RemoveAndAddFeatures(self, url, pathToFeatureClass, id_field, chunksize=1000):
"""Deletes all features in a feature service and uploads features from a feature class on disk.
Args:
url (str): The URL of the feature service.
pathToFeatureClass (str): The path of the feature class on disk.
id_field (str): The name of the field in the feature class to use for chunking.
chunksize (int): The maximum amount of features to upload at a time. Defaults to 1000.
Raises:
ArcRestHelperError: if ``arcpy`` can't be found.
"""
fl = None
try:
if arcpyFound == False:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror": "ArcPy required for this function"
})
arcpy.env.overwriteOutput = True
tempaddlayer= 'ewtdwedfew'
if not arcpy.Exists(pathToFeatureClass):
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror": "%s does not exist" % pathToFeatureClass
}
)
fields = arcpy.ListFields(pathToFeatureClass,wild_card=id_field)
if len(fields) == 0:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror": "%s field does not exist" % id_field
})
strFld = True
if fields[0].type != 'String':
strFld = False
fl = FeatureLayer(
url=url,
securityHandler=self._securityHandler)
id_field_local = arcpy.AddFieldDelimiters(pathToFeatureClass, id_field)
idlist = []
print( arcpy.GetCount_management(in_rows=pathToFeatureClass).getOutput(0) + " features in the layer")
with arcpy.da.SearchCursor(pathToFeatureClass, (id_field)) as cursor:
allidlist = []
for row in cursor:
if (strFld):
idlist.append("'" + row[0] +"'")
else:
idlist.append(row[0])
if len(idlist) >= chunksize:
allidlist.append(idlist)
idlist = []
if len(idlist) > 0:
allidlist.append(idlist)
for idlist in allidlist:
idstring = ' in (' + ','.join(map(str,idlist)) + ')'
sql = id_field + idstring
sqlLocalFC = id_field_local + idstring
results = fl.deleteFeatures(where=sql,
rollbackOnFailure=True)
if 'error' in results:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror":results['error']
})
elif 'deleteResults' in results:
print ("%s features deleted" % len(results['deleteResults']))
for itm in results['deleteResults']:
if itm['success'] != True:
print (itm)
else:
print (results)
arcpy.MakeFeatureLayer_management(pathToFeatureClass,tempaddlayer,sqlLocalFC)
results = fl.addFeatures(fc=tempaddlayer)
if 'error' in results:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror":results['error']
})
elif 'addResults' in results:
print ("%s features added" % len(results['addResults']))
for itm in results['addResults']:
if itm['success'] != True:
print (itm)
else:
print (results)
idlist = []
if 'error' in results:
raise common.ArcRestHelperError({
"function": "RemoveAndAddFeatures",
"line": inspect.currentframe().f_back.f_lineno,
"filename": 'featureservicetools',
"synerror":results['error']
})
else:
print (results)
except arcpy.ExecuteError:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "create_report_layers_using_config",
"line": line,
"filename": filename,
"synerror": synerror,
"arcpyError": arcpy.GetMessages(2),
}
)
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "AddFeaturesToFeatureLayer",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
gc.collect() | ['def', 'RemoveAndAddFeatures', '(', 'self', ',', 'url', ',', 'pathToFeatureClass', ',', 'id_field', ',', 'chunksize', '=', '1000', ')', ':', 'fl', '=', 'None', 'try', ':', 'if', 'arcpyFound', '==', 'False', ':', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"RemoveAndAddFeatures"', ',', '"line"', ':', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', '.', 'f_lineno', ',', '"filename"', ':', "'featureservicetools'", ',', '"synerror"', ':', '"ArcPy required for this function"', '}', ')', 'arcpy', '.', 'env', '.', 'overwriteOutput', '=', 'True', 'tempaddlayer', '=', "'ewtdwedfew'", 'if', 'not', 'arcpy', '.', 'Exists', '(', 'pathToFeatureClass', ')', ':', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"RemoveAndAddFeatures"', ',', '"line"', ':', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', '.', 'f_lineno', ',', '"filename"', ':', "'featureservicetools'", ',', '"synerror"', ':', '"%s does not exist"', '%', 'pathToFeatureClass', '}', ')', 'fields', '=', 'arcpy', '.', 'ListFields', '(', 'pathToFeatureClass', ',', 'wild_card', '=', 'id_field', ')', 'if', 'len', '(', 'fields', ')', '==', '0', ':', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"RemoveAndAddFeatures"', ',', '"line"', ':', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', '.', 'f_lineno', ',', '"filename"', ':', "'featureservicetools'", ',', '"synerror"', ':', '"%s field does not exist"', '%', 'id_field', '}', ')', 'strFld', '=', 'True', 'if', 'fields', '[', '0', ']', '.', 'type', '!=', "'String'", ':', 'strFld', '=', 'False', 'fl', '=', 'FeatureLayer', '(', 'url', '=', 'url', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ')', 'id_field_local', '=', 'arcpy', '.', 'AddFieldDelimiters', '(', 'pathToFeatureClass', ',', 'id_field', ')', 'idlist', '=', '[', ']', 'print', '(', 'arcpy', '.', 'GetCount_management', '(', 'in_rows', '=', 'pathToFeatureClass', ')', '.', 'getOutput', '(', '0', ')', '+', '" features in the layer"', ')', 'with', 'arcpy', '.', 'da', '.', 'SearchCursor', '(', 'pathToFeatureClass', ',', '(', 'id_field', ')', ')', 'as', 'cursor', ':', 'allidlist', '=', '[', ']', 'for', 'row', 'in', 'cursor', ':', 'if', '(', 'strFld', ')', ':', 'idlist', '.', 'append', '(', '"\'"', '+', 'row', '[', '0', ']', '+', '"\'"', ')', 'else', ':', 'idlist', '.', 'append', '(', 'row', '[', '0', ']', ')', 'if', 'len', '(', 'idlist', ')', '>=', 'chunksize', ':', 'allidlist', '.', 'append', '(', 'idlist', ')', 'idlist', '=', '[', ']', 'if', 'len', '(', 'idlist', ')', '>', '0', ':', 'allidlist', '.', 'append', '(', 'idlist', ')', 'for', 'idlist', 'in', 'allidlist', ':', 'idstring', '=', "' in ('", '+', "','", '.', 'join', '(', 'map', '(', 'str', ',', 'idlist', ')', ')', '+', "')'", 'sql', '=', 'id_field', '+', 'idstring', 'sqlLocalFC', '=', 'id_field_local', '+', 'idstring', 'results', '=', 'fl', '.', 'deleteFeatures', '(', 'where', '=', 'sql', ',', 'rollbackOnFailure', '=', 'True', ')', 'if', "'error'", 'in', 'results', ':', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"RemoveAndAddFeatures"', ',', '"line"', ':', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', '.', 'f_lineno', ',', '"filename"', ':', "'featureservicetools'", ',', '"synerror"', ':', 'results', '[', "'error'", ']', '}', ')', 'elif', "'deleteResults'", 'in', 'results', ':', 'print', '(', '"%s features deleted"', '%', 'len', '(', 'results', '[', "'deleteResults'", ']', ')', ')', 'for', 'itm', 'in', 'results', '[', "'deleteResults'", ']', ':', 'if', 'itm', '[', "'success'", ']', '!=', 'True', ':', 'print', '(', 'itm', ')', 'else', ':', 'print', '(', 'results', ')', 'arcpy', '.', 'MakeFeatureLayer_management', '(', 'pathToFeatureClass', ',', 'tempaddlayer', ',', 'sqlLocalFC', ')', 'results', '=', 'fl', '.', 'addFeatures', '(', 'fc', '=', 'tempaddlayer', ')', 'if', "'error'", 'in', 'results', ':', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"RemoveAndAddFeatures"', ',', '"line"', ':', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', '.', 'f_lineno', ',', '"filename"', ':', "'featureservicetools'", ',', '"synerror"', ':', 'results', '[', "'error'", ']', '}', ')', 'elif', "'addResults'", 'in', 'results', ':', 'print', '(', '"%s features added"', '%', 'len', '(', 'results', '[', "'addResults'", ']', ')', ')', 'for', 'itm', 'in', 'results', '[', "'addResults'", ']', ':', 'if', 'itm', '[', "'success'", ']', '!=', 'True', ':', 'print', '(', 'itm', ')', 'else', ':', 'print', '(', 'results', ')', 'idlist', '=', '[', ']', 'if', "'error'", 'in', 'results', ':', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"RemoveAndAddFeatures"', ',', '"line"', ':', 'inspect', '.', 'currentframe', '(', ')', '.', 'f_back', '.', 'f_lineno', ',', '"filename"', ':', "'featureservicetools'", ',', '"synerror"', ':', 'results', '[', "'error'", ']', '}', ')', 'else', ':', 'print', '(', 'results', ')', 'except', 'arcpy', '.', 'ExecuteError', ':', 'line', ',', 'filename', ',', 'synerror', '=', 'trace', '(', ')', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"create_report_layers_using_config"', ',', '"line"', ':', 'line', ',', '"filename"', ':', 'filename', ',', '"synerror"', ':', 'synerror', ',', '"arcpyError"', ':', 'arcpy', '.', 'GetMessages', '(', '2', ')', ',', '}', ')', 'except', ':', 'line', ',', 'filename', ',', 'synerror', '=', 'trace', '(', ')', 'raise', 'common', '.', 'ArcRestHelperError', '(', '{', '"function"', ':', '"AddFeaturesToFeatureLayer"', ',', '"line"', ':', 'line', ',', '"filename"', ':', 'filename', ',', '"synerror"', ':', 'synerror', ',', '}', ')', 'finally', ':', 'gc', '.', 'collect', '(', ')'] | Deletes all features in a feature service and uploads features from a feature class on disk.
Args:
url (str): The URL of the feature service.
pathToFeatureClass (str): The path of the feature class on disk.
id_field (str): The name of the field in the feature class to use for chunking.
chunksize (int): The maximum amount of features to upload at a time. Defaults to 1000.
Raises:
ArcRestHelperError: if ``arcpy`` can't be found. | ['Deletes', 'all', 'features', 'in', 'a', 'feature', 'service', 'and', 'uploads', 'features', 'from', 'a', 'feature', 'class', 'on', 'disk', '.'] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/featureservicetools.py#L60-L195 |
6,865 | stephenmcd/django-forms-builder | forms_builder/forms/admin.py | FormAdmin.get_queryset | def get_queryset(self, request):
"""
Annotate the queryset with the entries count for use in the
admin list view.
"""
qs = super(FormAdmin, self).get_queryset(request)
return qs.annotate(total_entries=Count("entries")) | python | def get_queryset(self, request):
"""
Annotate the queryset with the entries count for use in the
admin list view.
"""
qs = super(FormAdmin, self).get_queryset(request)
return qs.annotate(total_entries=Count("entries")) | ['def', 'get_queryset', '(', 'self', ',', 'request', ')', ':', 'qs', '=', 'super', '(', 'FormAdmin', ',', 'self', ')', '.', 'get_queryset', '(', 'request', ')', 'return', 'qs', '.', 'annotate', '(', 'total_entries', '=', 'Count', '(', '"entries"', ')', ')'] | Annotate the queryset with the entries count for use in the
admin list view. | ['Annotate', 'the', 'queryset', 'with', 'the', 'entries', 'count', 'for', 'use', 'in', 'the', 'admin', 'list', 'view', '.'] | train | https://github.com/stephenmcd/django-forms-builder/blob/89fe03100ec09a6166cc0bf0022399bbbdca6298/forms_builder/forms/admin.py#L77-L83 |
6,866 | saltstack/salt | salt/modules/http.py | query | def query(url, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
'''
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
return salt.utils.http.query(url=url, opts=opts, **kwargs) | python | def query(url, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
'''
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
return salt.utils.http.query(url=url, opts=opts, **kwargs) | ['def', 'query', '(', 'url', ',', '*', '*', 'kwargs', ')', ':', 'opts', '=', '__opts__', '.', 'copy', '(', ')', 'if', "'opts'", 'in', 'kwargs', ':', 'opts', '.', 'update', '(', 'kwargs', '[', "'opts'", ']', ')', 'del', 'kwargs', '[', "'opts'", ']', 'return', 'salt', '.', 'utils', '.', 'http', '.', 'query', '(', 'url', '=', 'url', ',', 'opts', '=', 'opts', ',', '*', '*', 'kwargs', ')'] | Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`. | ['Query', 'a', 'resource', 'and', 'decode', 'the', 'return', 'data'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/http.py#L17-L44 |
6,867 | nion-software/nionswift | nion/swift/DocumentController.py | DocumentController.__update_display_items_model | def __update_display_items_model(self, display_items_model: ListModel.FilteredListModel, data_group: typing.Optional[DataGroup.DataGroup], filter_id: typing.Optional[str]) -> None:
"""Update the data item model with a new container, filter, and sorting.
This is called when the data item model is created or when the user changes
the data group or sorting settings.
"""
with display_items_model.changes(): # change filter and sort together
if data_group is not None:
display_items_model.container = data_group
display_items_model.filter = ListModel.Filter(True)
display_items_model.sort_key = None
display_items_model.filter_id = None
elif filter_id == "latest-session":
display_items_model.container = self.document_model
display_items_model.filter = ListModel.EqFilter("session_id", self.document_model.session_id)
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
elif filter_id == "temporary":
display_items_model.container = self.document_model
display_items_model.filter = ListModel.NotEqFilter("category", "persistent")
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
elif filter_id == "none": # not intended to be used directly
display_items_model.container = self.document_model
display_items_model.filter = ListModel.Filter(False)
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
else: # "all"
display_items_model.container = self.document_model
display_items_model.filter = ListModel.EqFilter("category", "persistent")
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = None | python | def __update_display_items_model(self, display_items_model: ListModel.FilteredListModel, data_group: typing.Optional[DataGroup.DataGroup], filter_id: typing.Optional[str]) -> None:
"""Update the data item model with a new container, filter, and sorting.
This is called when the data item model is created or when the user changes
the data group or sorting settings.
"""
with display_items_model.changes(): # change filter and sort together
if data_group is not None:
display_items_model.container = data_group
display_items_model.filter = ListModel.Filter(True)
display_items_model.sort_key = None
display_items_model.filter_id = None
elif filter_id == "latest-session":
display_items_model.container = self.document_model
display_items_model.filter = ListModel.EqFilter("session_id", self.document_model.session_id)
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
elif filter_id == "temporary":
display_items_model.container = self.document_model
display_items_model.filter = ListModel.NotEqFilter("category", "persistent")
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
elif filter_id == "none": # not intended to be used directly
display_items_model.container = self.document_model
display_items_model.filter = ListModel.Filter(False)
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = filter_id
else: # "all"
display_items_model.container = self.document_model
display_items_model.filter = ListModel.EqFilter("category", "persistent")
display_items_model.sort_key = DataItem.sort_by_date_key
display_items_model.sort_reverse = True
display_items_model.filter_id = None | ['def', '__update_display_items_model', '(', 'self', ',', 'display_items_model', ':', 'ListModel', '.', 'FilteredListModel', ',', 'data_group', ':', 'typing', '.', 'Optional', '[', 'DataGroup', '.', 'DataGroup', ']', ',', 'filter_id', ':', 'typing', '.', 'Optional', '[', 'str', ']', ')', '->', 'None', ':', 'with', 'display_items_model', '.', 'changes', '(', ')', ':', '# change filter and sort together', 'if', 'data_group', 'is', 'not', 'None', ':', 'display_items_model', '.', 'container', '=', 'data_group', 'display_items_model', '.', 'filter', '=', 'ListModel', '.', 'Filter', '(', 'True', ')', 'display_items_model', '.', 'sort_key', '=', 'None', 'display_items_model', '.', 'filter_id', '=', 'None', 'elif', 'filter_id', '==', '"latest-session"', ':', 'display_items_model', '.', 'container', '=', 'self', '.', 'document_model', 'display_items_model', '.', 'filter', '=', 'ListModel', '.', 'EqFilter', '(', '"session_id"', ',', 'self', '.', 'document_model', '.', 'session_id', ')', 'display_items_model', '.', 'sort_key', '=', 'DataItem', '.', 'sort_by_date_key', 'display_items_model', '.', 'sort_reverse', '=', 'True', 'display_items_model', '.', 'filter_id', '=', 'filter_id', 'elif', 'filter_id', '==', '"temporary"', ':', 'display_items_model', '.', 'container', '=', 'self', '.', 'document_model', 'display_items_model', '.', 'filter', '=', 'ListModel', '.', 'NotEqFilter', '(', '"category"', ',', '"persistent"', ')', 'display_items_model', '.', 'sort_key', '=', 'DataItem', '.', 'sort_by_date_key', 'display_items_model', '.', 'sort_reverse', '=', 'True', 'display_items_model', '.', 'filter_id', '=', 'filter_id', 'elif', 'filter_id', '==', '"none"', ':', '# not intended to be used directly', 'display_items_model', '.', 'container', '=', 'self', '.', 'document_model', 'display_items_model', '.', 'filter', '=', 'ListModel', '.', 'Filter', '(', 'False', ')', 'display_items_model', '.', 'sort_key', '=', 'DataItem', '.', 'sort_by_date_key', 'display_items_model', '.', 'sort_reverse', '=', 'True', 'display_items_model', '.', 'filter_id', '=', 'filter_id', 'else', ':', '# "all"', 'display_items_model', '.', 'container', '=', 'self', '.', 'document_model', 'display_items_model', '.', 'filter', '=', 'ListModel', '.', 'EqFilter', '(', '"category"', ',', '"persistent"', ')', 'display_items_model', '.', 'sort_key', '=', 'DataItem', '.', 'sort_by_date_key', 'display_items_model', '.', 'sort_reverse', '=', 'True', 'display_items_model', '.', 'filter_id', '=', 'None'] | Update the data item model with a new container, filter, and sorting.
This is called when the data item model is created or when the user changes
the data group or sorting settings. | ['Update', 'the', 'data', 'item', 'model', 'with', 'a', 'new', 'container', 'filter', 'and', 'sorting', '.'] | train | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/DocumentController.py#L658-L694 |
6,868 | cbclab/MOT | mot/lib/cl_function.py | apply_cl_function | def apply_cl_function(cl_function, kernel_data, nmr_instances, use_local_reduction=False, cl_runtime_info=None):
"""Run the given function/procedure on the given set of data.
This class will wrap the given CL function in a kernel call and execute that that for every data instance using
the provided kernel data. This class will respect the read write setting of the kernel data elements such that
output can be written back to the according kernel data elements.
Args:
cl_function (mot.lib.cl_function.CLFunction): the function to
run on the datasets. Either a name function tuple or an actual CLFunction object.
kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function.
nmr_instances (int): the number of parallel threads to run (used as ``global_size``)
use_local_reduction (boolean): set this to True if you want to use local memory reduction in
your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances)
by the work group sizes.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
"""
cl_runtime_info = cl_runtime_info or CLRuntimeInfo()
cl_environments = cl_runtime_info.cl_environments
for param in cl_function.get_parameters():
if param.name not in kernel_data:
names = [param.name for param in cl_function.get_parameters()]
missing_names = [name for name in names if name not in kernel_data]
raise ValueError('Some parameters are missing an input value, '
'required parameters are: {}, missing inputs are: {}'.format(names, missing_names))
if cl_function.get_return_type() != 'void':
kernel_data['_results'] = Zeros((nmr_instances,), cl_function.get_return_type())
workers = []
for ind, cl_environment in enumerate(cl_environments):
worker = _ProcedureWorker(cl_environment, cl_runtime_info.compile_flags,
cl_function, kernel_data, cl_runtime_info.double_precision, use_local_reduction)
workers.append(worker)
def enqueue_batch(batch_size, offset):
items_per_worker = [batch_size // len(cl_environments) for _ in range(len(cl_environments) - 1)]
items_per_worker.append(batch_size - sum(items_per_worker))
for ind, worker in enumerate(workers):
worker.calculate(offset, offset + items_per_worker[ind])
offset += items_per_worker[ind]
worker.cl_queue.flush()
for worker in workers:
worker.cl_queue.finish()
return offset
total_offset = 0
for batch_start, batch_end in split_in_batches(nmr_instances, 1e4 * len(workers)):
total_offset = enqueue_batch(batch_end - batch_start, total_offset)
if cl_function.get_return_type() != 'void':
return kernel_data['_results'].get_data() | python | def apply_cl_function(cl_function, kernel_data, nmr_instances, use_local_reduction=False, cl_runtime_info=None):
"""Run the given function/procedure on the given set of data.
This class will wrap the given CL function in a kernel call and execute that that for every data instance using
the provided kernel data. This class will respect the read write setting of the kernel data elements such that
output can be written back to the according kernel data elements.
Args:
cl_function (mot.lib.cl_function.CLFunction): the function to
run on the datasets. Either a name function tuple or an actual CLFunction object.
kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function.
nmr_instances (int): the number of parallel threads to run (used as ``global_size``)
use_local_reduction (boolean): set this to True if you want to use local memory reduction in
your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances)
by the work group sizes.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
"""
cl_runtime_info = cl_runtime_info or CLRuntimeInfo()
cl_environments = cl_runtime_info.cl_environments
for param in cl_function.get_parameters():
if param.name not in kernel_data:
names = [param.name for param in cl_function.get_parameters()]
missing_names = [name for name in names if name not in kernel_data]
raise ValueError('Some parameters are missing an input value, '
'required parameters are: {}, missing inputs are: {}'.format(names, missing_names))
if cl_function.get_return_type() != 'void':
kernel_data['_results'] = Zeros((nmr_instances,), cl_function.get_return_type())
workers = []
for ind, cl_environment in enumerate(cl_environments):
worker = _ProcedureWorker(cl_environment, cl_runtime_info.compile_flags,
cl_function, kernel_data, cl_runtime_info.double_precision, use_local_reduction)
workers.append(worker)
def enqueue_batch(batch_size, offset):
items_per_worker = [batch_size // len(cl_environments) for _ in range(len(cl_environments) - 1)]
items_per_worker.append(batch_size - sum(items_per_worker))
for ind, worker in enumerate(workers):
worker.calculate(offset, offset + items_per_worker[ind])
offset += items_per_worker[ind]
worker.cl_queue.flush()
for worker in workers:
worker.cl_queue.finish()
return offset
total_offset = 0
for batch_start, batch_end in split_in_batches(nmr_instances, 1e4 * len(workers)):
total_offset = enqueue_batch(batch_end - batch_start, total_offset)
if cl_function.get_return_type() != 'void':
return kernel_data['_results'].get_data() | ['def', 'apply_cl_function', '(', 'cl_function', ',', 'kernel_data', ',', 'nmr_instances', ',', 'use_local_reduction', '=', 'False', ',', 'cl_runtime_info', '=', 'None', ')', ':', 'cl_runtime_info', '=', 'cl_runtime_info', 'or', 'CLRuntimeInfo', '(', ')', 'cl_environments', '=', 'cl_runtime_info', '.', 'cl_environments', 'for', 'param', 'in', 'cl_function', '.', 'get_parameters', '(', ')', ':', 'if', 'param', '.', 'name', 'not', 'in', 'kernel_data', ':', 'names', '=', '[', 'param', '.', 'name', 'for', 'param', 'in', 'cl_function', '.', 'get_parameters', '(', ')', ']', 'missing_names', '=', '[', 'name', 'for', 'name', 'in', 'names', 'if', 'name', 'not', 'in', 'kernel_data', ']', 'raise', 'ValueError', '(', "'Some parameters are missing an input value, '", "'required parameters are: {}, missing inputs are: {}'", '.', 'format', '(', 'names', ',', 'missing_names', ')', ')', 'if', 'cl_function', '.', 'get_return_type', '(', ')', '!=', "'void'", ':', 'kernel_data', '[', "'_results'", ']', '=', 'Zeros', '(', '(', 'nmr_instances', ',', ')', ',', 'cl_function', '.', 'get_return_type', '(', ')', ')', 'workers', '=', '[', ']', 'for', 'ind', ',', 'cl_environment', 'in', 'enumerate', '(', 'cl_environments', ')', ':', 'worker', '=', '_ProcedureWorker', '(', 'cl_environment', ',', 'cl_runtime_info', '.', 'compile_flags', ',', 'cl_function', ',', 'kernel_data', ',', 'cl_runtime_info', '.', 'double_precision', ',', 'use_local_reduction', ')', 'workers', '.', 'append', '(', 'worker', ')', 'def', 'enqueue_batch', '(', 'batch_size', ',', 'offset', ')', ':', 'items_per_worker', '=', '[', 'batch_size', '//', 'len', '(', 'cl_environments', ')', 'for', '_', 'in', 'range', '(', 'len', '(', 'cl_environments', ')', '-', '1', ')', ']', 'items_per_worker', '.', 'append', '(', 'batch_size', '-', 'sum', '(', 'items_per_worker', ')', ')', 'for', 'ind', ',', 'worker', 'in', 'enumerate', '(', 'workers', ')', ':', 'worker', '.', 'calculate', '(', 'offset', ',', 'offset', '+', 'items_per_worker', '[', 'ind', ']', ')', 'offset', '+=', 'items_per_worker', '[', 'ind', ']', 'worker', '.', 'cl_queue', '.', 'flush', '(', ')', 'for', 'worker', 'in', 'workers', ':', 'worker', '.', 'cl_queue', '.', 'finish', '(', ')', 'return', 'offset', 'total_offset', '=', '0', 'for', 'batch_start', ',', 'batch_end', 'in', 'split_in_batches', '(', 'nmr_instances', ',', '1e4', '*', 'len', '(', 'workers', ')', ')', ':', 'total_offset', '=', 'enqueue_batch', '(', 'batch_end', '-', 'batch_start', ',', 'total_offset', ')', 'if', 'cl_function', '.', 'get_return_type', '(', ')', '!=', "'void'", ':', 'return', 'kernel_data', '[', "'_results'", ']', '.', 'get_data', '(', ')'] | Run the given function/procedure on the given set of data.
This class will wrap the given CL function in a kernel call and execute that that for every data instance using
the provided kernel data. This class will respect the read write setting of the kernel data elements such that
output can be written back to the according kernel data elements.
Args:
cl_function (mot.lib.cl_function.CLFunction): the function to
run on the datasets. Either a name function tuple or an actual CLFunction object.
kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function.
nmr_instances (int): the number of parallel threads to run (used as ``global_size``)
use_local_reduction (boolean): set this to True if you want to use local memory reduction in
your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances)
by the work group sizes.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information | ['Run', 'the', 'given', 'function', '/', 'procedure', 'on', 'the', 'given', 'set', 'of', 'data', '.'] | train | https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L578-L633 |
6,869 | limodou/uliweb | uliweb/lib/werkzeug/datastructures.py | WWWAuthenticate.set_basic | def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self) | python | def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self) | ['def', 'set_basic', '(', 'self', ',', 'realm', '=', "'authentication required'", ')', ':', 'dict', '.', 'clear', '(', 'self', ')', 'dict', '.', 'update', '(', 'self', ',', '{', "'__auth_type__'", ':', "'basic'", ',', "'realm'", ':', 'realm', '}', ')', 'if', 'self', '.', 'on_update', ':', 'self', '.', 'on_update', '(', 'self', ')'] | Clear the auth info and enable basic auth. | ['Clear', 'the', 'auth', 'info', 'and', 'enable', 'basic', 'auth', '.'] | train | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/datastructures.py#L2359-L2364 |
6,870 | andymccurdy/redis-py | redis/client.py | Redis.flushall | def flushall(self, asynchronous=False):
"""
Delete all keys in all databases on the current host.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server.
"""
args = []
if asynchronous:
args.append(Token.get_token('ASYNC'))
return self.execute_command('FLUSHALL', *args) | python | def flushall(self, asynchronous=False):
"""
Delete all keys in all databases on the current host.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server.
"""
args = []
if asynchronous:
args.append(Token.get_token('ASYNC'))
return self.execute_command('FLUSHALL', *args) | ['def', 'flushall', '(', 'self', ',', 'asynchronous', '=', 'False', ')', ':', 'args', '=', '[', ']', 'if', 'asynchronous', ':', 'args', '.', 'append', '(', 'Token', '.', 'get_token', '(', "'ASYNC'", ')', ')', 'return', 'self', '.', 'execute_command', '(', "'FLUSHALL'", ',', '*', 'args', ')'] | Delete all keys in all databases on the current host.
``asynchronous`` indicates whether the operation is
executed asynchronously by the server. | ['Delete', 'all', 'keys', 'in', 'all', 'databases', 'on', 'the', 'current', 'host', '.'] | train | https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L930-L940 |
6,871 | NicolasLM/spinach | spinach/brokers/redis.py | RedisBroker.inspect_periodic_tasks | def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
"""Get the next periodic task schedule.
Used only for debugging and during tests.
"""
rv = self._r.zrangebyscore(
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
'-inf', '+inf', withscores=True
)
return [(int(r[1]), r[0].decode()) for r in rv] | python | def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
"""Get the next periodic task schedule.
Used only for debugging and during tests.
"""
rv = self._r.zrangebyscore(
self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),
'-inf', '+inf', withscores=True
)
return [(int(r[1]), r[0].decode()) for r in rv] | ['def', 'inspect_periodic_tasks', '(', 'self', ')', '->', 'List', '[', 'Tuple', '[', 'int', ',', 'str', ']', ']', ':', 'rv', '=', 'self', '.', '_r', '.', 'zrangebyscore', '(', 'self', '.', '_to_namespaced', '(', 'PERIODIC_TASKS_QUEUE_KEY', ')', ',', "'-inf'", ',', "'+inf'", ',', 'withscores', '=', 'True', ')', 'return', '[', '(', 'int', '(', 'r', '[', '1', ']', ')', ',', 'r', '[', '0', ']', '.', 'decode', '(', ')', ')', 'for', 'r', 'in', 'rv', ']'] | Get the next periodic task schedule.
Used only for debugging and during tests. | ['Get', 'the', 'next', 'periodic', 'task', 'schedule', '.'] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/redis.py#L215-L224 |
6,872 | ozak/georasters | georasters/georasters.py | GeoRaster.raster_weights | def raster_weights(self, **kwargs):
"""
Compute neighbor weights for GeoRaster.
See help(gr.raster_weights) for options
Usage:
geo.raster_weights(rook=True)
"""
if self.weights is None:
self.weights = raster_weights(self.raster, **kwargs)
pass | python | def raster_weights(self, **kwargs):
"""
Compute neighbor weights for GeoRaster.
See help(gr.raster_weights) for options
Usage:
geo.raster_weights(rook=True)
"""
if self.weights is None:
self.weights = raster_weights(self.raster, **kwargs)
pass | ['def', 'raster_weights', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', 'weights', 'is', 'None', ':', 'self', '.', 'weights', '=', 'raster_weights', '(', 'self', '.', 'raster', ',', '*', '*', 'kwargs', ')', 'pass'] | Compute neighbor weights for GeoRaster.
See help(gr.raster_weights) for options
Usage:
geo.raster_weights(rook=True) | ['Compute', 'neighbor', 'weights', 'for', 'GeoRaster', '.', 'See', 'help', '(', 'gr', '.', 'raster_weights', ')', 'for', 'options'] | train | https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L944-L954 |
6,873 | nteract/papermill | papermill/s3.py | S3.cp_string | def cp_string(self, source, dest, **kwargs):
"""
Copies source string into the destination location.
Parameters
----------
source: string
the string with the content to copy
dest: string
the s3 location
"""
assert isinstance(source, six.string_types), "source must be a string"
assert self._is_s3(dest), "Destination must be s3 location"
return self._put_string(source, dest, **kwargs) | python | def cp_string(self, source, dest, **kwargs):
"""
Copies source string into the destination location.
Parameters
----------
source: string
the string with the content to copy
dest: string
the s3 location
"""
assert isinstance(source, six.string_types), "source must be a string"
assert self._is_s3(dest), "Destination must be s3 location"
return self._put_string(source, dest, **kwargs) | ['def', 'cp_string', '(', 'self', ',', 'source', ',', 'dest', ',', '*', '*', 'kwargs', ')', ':', 'assert', 'isinstance', '(', 'source', ',', 'six', '.', 'string_types', ')', ',', '"source must be a string"', 'assert', 'self', '.', '_is_s3', '(', 'dest', ')', ',', '"Destination must be s3 location"', 'return', 'self', '.', '_put_string', '(', 'source', ',', 'dest', ',', '*', '*', 'kwargs', ')'] | Copies source string into the destination location.
Parameters
----------
source: string
the string with the content to copy
dest: string
the s3 location | ['Copies', 'source', 'string', 'into', 'the', 'destination', 'location', '.'] | train | https://github.com/nteract/papermill/blob/7423a303f3fa22ec6d03edf5fd9700d659b5a6fa/papermill/s3.py#L355-L370 |
6,874 | Rapptz/discord.py | discord/shard.py | AutoShardedClient.latency | def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This operates similarly to :meth:`.Client.latency` except it uses the average
latency of every shard's latency. To get a list of shard latency, check the
:attr:`latencies` property. Returns ``nan`` if there are no shards ready.
"""
if not self.shards:
return float('nan')
return sum(latency for _, latency in self.latencies) / len(self.shards) | python | def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This operates similarly to :meth:`.Client.latency` except it uses the average
latency of every shard's latency. To get a list of shard latency, check the
:attr:`latencies` property. Returns ``nan`` if there are no shards ready.
"""
if not self.shards:
return float('nan')
return sum(latency for _, latency in self.latencies) / len(self.shards) | ['def', 'latency', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'shards', ':', 'return', 'float', '(', "'nan'", ')', 'return', 'sum', '(', 'latency', 'for', '_', ',', 'latency', 'in', 'self', '.', 'latencies', ')', '/', 'len', '(', 'self', '.', 'shards', ')'] | :class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This operates similarly to :meth:`.Client.latency` except it uses the average
latency of every shard's latency. To get a list of shard latency, check the
:attr:`latencies` property. Returns ``nan`` if there are no shards ready. | [':', 'class', ':', 'float', ':', 'Measures', 'latency', 'between', 'a', 'HEARTBEAT', 'and', 'a', 'HEARTBEAT_ACK', 'in', 'seconds', '.'] | train | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/shard.py#L163-L172 |
6,875 | vmware/pyvmomi | pyVim/connect.py | VimSessionOrientedStub.makeCredBearerTokenLoginMethod | def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin | python | def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin | ['def', 'makeCredBearerTokenLoginMethod', '(', 'username', ',', 'password', ',', 'stsUrl', ',', 'stsCert', '=', 'None', ')', ':', 'assert', '(', 'username', ')', 'assert', '(', 'password', ')', 'assert', '(', 'stsUrl', ')', 'def', '_doLogin', '(', 'soapStub', ')', ':', 'from', '.', 'import', 'sso', 'cert', '=', 'soapStub', '.', 'schemeArgs', '[', "'cert_file'", ']', 'key', '=', 'soapStub', '.', 'schemeArgs', '[', "'key_file'", ']', 'authenticator', '=', 'sso', '.', 'SsoAuthenticator', '(', 'sts_url', '=', 'stsUrl', ',', 'sts_cert', '=', 'stsCert', ')', 'samlAssertion', '=', 'authenticator', '.', 'get_bearer_saml_assertion', '(', 'username', ',', 'password', ',', 'cert', ',', 'key', ')', 'si', '=', 'vim', '.', 'ServiceInstance', '(', '"ServiceInstance"', ',', 'soapStub', ')', 'sm', '=', 'si', '.', 'content', '.', 'sessionManager', 'if', 'not', 'sm', '.', 'currentSession', ':', 'try', ':', 'soapStub', '.', 'samlToken', '=', 'samlAssertion', 'si', '.', 'content', '.', 'sessionManager', '.', 'LoginByToken', '(', ')', 'finally', ':', 'soapStub', '.', 'samlToken', '=', 'None', 'return', '_doLogin'] | Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service. | ['Return', 'a', 'function', 'that', 'will', 'call', 'the', 'vim', '.', 'SessionManager', '.', 'LoginByToken', '()', 'after', 'obtaining', 'a', 'Bearer', 'token', 'from', 'the', 'STS', '.', 'The', 'result', 'of', 'this', 'function', 'can', 'be', 'passed', 'as', 'the', 'loginMethod', 'to', 'a', 'SessionOrientedStub', 'constructor', '.'] | train | https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVim/connect.py#L154-L190 |
6,876 | ejhigson/nestcheck | setup.py | get_version | def get_version():
"""Get single-source __version__."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace('\'', '') | python | def get_version():
"""Get single-source __version__."""
pkg_dir = get_package_dir()
with open(os.path.join(pkg_dir, 'nestcheck/_version.py')) as ver_file:
string = ver_file.read()
return string.strip().replace('__version__ = ', '').replace('\'', '') | ['def', 'get_version', '(', ')', ':', 'pkg_dir', '=', 'get_package_dir', '(', ')', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'pkg_dir', ',', "'nestcheck/_version.py'", ')', ')', 'as', 'ver_file', ':', 'string', '=', 'ver_file', '.', 'read', '(', ')', 'return', 'string', '.', 'strip', '(', ')', '.', 'replace', '(', "'__version__ = '", ',', "''", ')', '.', 'replace', '(', "'\\''", ',', "''", ')'] | Get single-source __version__. | ['Get', 'single', '-', 'source', '__version__', '.'] | train | https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/setup.py#L24-L29 |
6,877 | saltstack/salt | salt/modules/file.py | get_diff | def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
'''
Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames : True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes : True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template : False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: 2018.3.0
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
'''
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for filename in paths:
try:
with salt.utils.files.fopen(filename, 'rb') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_unicode(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = '<Obfuscated Template>'
elif not show_changes:
ret = '<show_changes=False>'
else:
bdiff = _binary_replace(*paths) # pylint: disable=no-value-for-parameter
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(paths)
ret = __utils__['stringutils.get_diff'](*args)
return ret
return '' | python | def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
'''
Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames : True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes : True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template : False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: 2018.3.0
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
'''
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for filename in paths:
try:
with salt.utils.files.fopen(filename, 'rb') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_unicode(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = '<Obfuscated Template>'
elif not show_changes:
ret = '<show_changes=False>'
else:
bdiff = _binary_replace(*paths) # pylint: disable=no-value-for-parameter
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(paths)
ret = __utils__['stringutils.get_diff'](*args)
return ret
return '' | ['def', 'get_diff', '(', 'file1', ',', 'file2', ',', 'saltenv', '=', "'base'", ',', 'show_filenames', '=', 'True', ',', 'show_changes', '=', 'True', ',', 'template', '=', 'False', ',', 'source_hash_file1', '=', 'None', ',', 'source_hash_file2', '=', 'None', ')', ':', 'files', '=', '(', 'file1', ',', 'file2', ')', 'source_hashes', '=', '(', 'source_hash_file1', ',', 'source_hash_file2', ')', 'paths', '=', '[', ']', 'errors', '=', '[', ']', 'for', 'filename', ',', 'source_hash', 'in', 'zip', '(', 'files', ',', 'source_hashes', ')', ':', 'try', ':', '# Local file paths will just return the same path back when passed', '# to cp.cache_file.', 'cached_path', '=', '__salt__', '[', "'cp.cache_file'", ']', '(', 'filename', ',', 'saltenv', ',', 'source_hash', '=', 'source_hash', ')', 'if', 'cached_path', 'is', 'False', ':', 'errors', '.', 'append', '(', "'File {0} not found'", '.', 'format', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'filename', ')', ')', ')', 'continue', 'paths', '.', 'append', '(', 'cached_path', ')', 'except', 'MinionError', 'as', 'exc', ':', 'errors', '.', 'append', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'exc', '.', '__str__', '(', ')', ')', ')', 'continue', 'if', 'errors', ':', 'raise', 'CommandExecutionError', '(', "'Failed to cache one or more files'", ',', 'info', '=', 'errors', ')', 'args', '=', '[', ']', 'for', 'filename', 'in', 'paths', ':', 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'filename', ',', "'rb'", ')', 'as', 'fp_', ':', 'args', '.', 'append', '(', 'fp_', '.', 'readlines', '(', ')', ')', 'except', '(', 'IOError', ',', 'OSError', ')', 'as', 'exc', ':', 'raise', 'CommandExecutionError', '(', "'Failed to read {0}: {1}'", '.', 'format', '(', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'filename', ')', ',', 'exc', '.', 'strerror', ')', ')', 'if', 'args', '[', '0', ']', '!=', 'args', '[', '1', ']', ':', 'if', 'template', 'and', '__salt__', '[', "'config.option'", ']', '(', "'obfuscate_templates'", ')', ':', 'ret', '=', "'<Obfuscated Template>'", 'elif', 'not', 'show_changes', ':', 'ret', '=', "'<show_changes=False>'", 'else', ':', 'bdiff', '=', '_binary_replace', '(', '*', 'paths', ')', '# pylint: disable=no-value-for-parameter', 'if', 'bdiff', ':', 'ret', '=', 'bdiff', 'else', ':', 'if', 'show_filenames', ':', 'args', '.', 'extend', '(', 'paths', ')', 'ret', '=', '__utils__', '[', "'stringutils.get_diff'", ']', '(', '*', 'args', ')', 'return', 'ret', 'return', "''"] | Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames : True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes : True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template : False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: 2018.3.0
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt | ['Return', 'unified', 'diff', 'of', 'two', 'files'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L5205-L5327 |
6,878 | twilio/twilio-python | twilio/rest/serverless/v1/service/function/function_version.py | FunctionVersionList.create | def create(self, path, visibility):
"""
Create a new FunctionVersionInstance
:param unicode path: The path
:param FunctionVersionInstance.Visibility visibility: The visibility
:returns: Newly created FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance
"""
data = values.of({'Path': path, 'Visibility': visibility, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FunctionVersionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
) | python | def create(self, path, visibility):
"""
Create a new FunctionVersionInstance
:param unicode path: The path
:param FunctionVersionInstance.Visibility visibility: The visibility
:returns: Newly created FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance
"""
data = values.of({'Path': path, 'Visibility': visibility, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return FunctionVersionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
) | ['def', 'create', '(', 'self', ',', 'path', ',', 'visibility', ')', ':', 'data', '=', 'values', '.', 'of', '(', '{', "'Path'", ':', 'path', ',', "'Visibility'", ':', 'visibility', ',', '}', ')', 'payload', '=', 'self', '.', '_version', '.', 'create', '(', "'POST'", ',', 'self', '.', '_uri', ',', 'data', '=', 'data', ',', ')', 'return', 'FunctionVersionInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'service_sid', '=', 'self', '.', '_solution', '[', "'service_sid'", ']', ',', 'function_sid', '=', 'self', '.', '_solution', '[', "'function_sid'", ']', ',', ')'] | Create a new FunctionVersionInstance
:param unicode path: The path
:param FunctionVersionInstance.Visibility visibility: The visibility
:returns: Newly created FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance | ['Create', 'a', 'new', 'FunctionVersionInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/function/function_version.py#L120-L143 |
6,879 | pymc-devs/pymc | pymc/database/base.py | Trace._initialize | def _initialize(self, chain, length):
"""Prepare for tallying. Create a new chain."""
# If this db was loaded from the disk, it may not have its
# tallied step methods' getfuncs yet.
if self._getfunc is None:
self._getfunc = self.db.model._funs_to_tally[self.name] | python | def _initialize(self, chain, length):
"""Prepare for tallying. Create a new chain."""
# If this db was loaded from the disk, it may not have its
# tallied step methods' getfuncs yet.
if self._getfunc is None:
self._getfunc = self.db.model._funs_to_tally[self.name] | ['def', '_initialize', '(', 'self', ',', 'chain', ',', 'length', ')', ':', '# If this db was loaded from the disk, it may not have its', "# tallied step methods' getfuncs yet.", 'if', 'self', '.', '_getfunc', 'is', 'None', ':', 'self', '.', '_getfunc', '=', 'self', '.', 'db', '.', 'model', '.', '_funs_to_tally', '[', 'self', '.', 'name', ']'] | Prepare for tallying. Create a new chain. | ['Prepare', 'for', 'tallying', '.', 'Create', 'a', 'new', 'chain', '.'] | train | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/base.py#L88-L93 |
6,880 | allenai/allennlp | allennlp/data/dataset_readers/reading_comprehension/util.py | make_reading_comprehension_instance_quac | def make_reading_comprehension_instance_quac(question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields['passage'] = passage_field
fields['question'] = ListField([TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens])
metadata = {'original_passage': passage_text,
'token_offsets': passage_offsets,
'question_tokens': [[token.text for token in question_tokens] \
for question_tokens in question_list_tokens],
'passage_tokens': [token.text for token in passage_tokens], }
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except:
raise ValueError("Previous {0:d}th answer span should have been updated!".format(prev_answer_distance))
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(prev_answer_distance, "in")
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [["O"] * len(passage_tokens), ["O"] * len(passage_tokens),
["O"] * len(passage_tokens), ["O"] * len(passage_tokens)]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[3],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 1:
p2_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[2],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 0:
p1_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[1],
passage_field,
label_namespace="answer_tags"))
fields['span_start'] = ListField(span_start_list)
fields['span_end'] = ListField(span_end_list)
if num_context_answers > 0:
fields['p1_answer_marker'] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields['p2_answer_marker'] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields['p3_answer_marker'] = ListField(p3_answer_marker_list)
fields['yesno_list'] = ListField( \
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list])
fields['followup_list'] = ListField([LabelField(followup, label_namespace="followup_labels") \
for followup in followup_list])
metadata.update(additional_metadata)
fields['metadata'] = MetadataField(metadata)
return Instance(fields) | python | def make_reading_comprehension_instance_quac(question_list_tokens: List[List[Token]],
passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
token_span_lists: List[List[Tuple[int, int]]] = None,
yesno_list: List[int] = None,
followup_list: List[int] = None,
additional_metadata: Dict[str, Any] = None,
num_context_answers: int = 0) -> Instance:
"""
Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct.
"""
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
fields['passage'] = passage_field
fields['question'] = ListField([TextField(q_tokens, token_indexers) for q_tokens in question_list_tokens])
metadata = {'original_passage': passage_text,
'token_offsets': passage_offsets,
'question_tokens': [[token.text for token in question_tokens] \
for question_tokens in question_list_tokens],
'passage_tokens': [token.text for token in passage_tokens], }
p1_answer_marker_list: List[Field] = []
p2_answer_marker_list: List[Field] = []
p3_answer_marker_list: List[Field] = []
def get_tag(i, i_name):
# Generate a tag to mark previous answer span in the passage.
return "<{0:d}_{1:s}>".format(i, i_name)
def mark_tag(span_start, span_end, passage_tags, prev_answer_distance):
try:
assert span_start >= 0
assert span_end >= 0
except:
raise ValueError("Previous {0:d}th answer span should have been updated!".format(prev_answer_distance))
# Modify "tags" to mark previous answer span.
if span_start == span_end:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "")
else:
passage_tags[prev_answer_distance][span_start] = get_tag(prev_answer_distance, "start")
passage_tags[prev_answer_distance][span_end] = get_tag(prev_answer_distance, "end")
for passage_index in range(span_start + 1, span_end):
passage_tags[prev_answer_distance][passage_index] = get_tag(prev_answer_distance, "in")
if token_span_lists:
span_start_list: List[Field] = []
span_end_list: List[Field] = []
p1_span_start, p1_span_end, p2_span_start = -1, -1, -1
p2_span_end, p3_span_start, p3_span_end = -1, -1, -1
# Looping each <<answers>>.
for question_index, answer_span_lists in enumerate(token_span_lists):
span_start, span_end = answer_span_lists[-1] # Last one is the original answer
span_start_list.append(IndexField(span_start, passage_field))
span_end_list.append(IndexField(span_end, passage_field))
prev_answer_marker_lists = [["O"] * len(passage_tokens), ["O"] * len(passage_tokens),
["O"] * len(passage_tokens), ["O"] * len(passage_tokens)]
if question_index > 0 and num_context_answers > 0:
mark_tag(p1_span_start, p1_span_end, prev_answer_marker_lists, 1)
if question_index > 1 and num_context_answers > 1:
mark_tag(p2_span_start, p2_span_end, prev_answer_marker_lists, 2)
if question_index > 2 and num_context_answers > 2:
mark_tag(p3_span_start, p3_span_end, prev_answer_marker_lists, 3)
p3_span_start = p2_span_start
p3_span_end = p2_span_end
p2_span_start = p1_span_start
p2_span_end = p1_span_end
p1_span_start = span_start
p1_span_end = span_end
if num_context_answers > 2:
p3_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[3],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 1:
p2_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[2],
passage_field,
label_namespace="answer_tags"))
if num_context_answers > 0:
p1_answer_marker_list.append(SequenceLabelField(prev_answer_marker_lists[1],
passage_field,
label_namespace="answer_tags"))
fields['span_start'] = ListField(span_start_list)
fields['span_end'] = ListField(span_end_list)
if num_context_answers > 0:
fields['p1_answer_marker'] = ListField(p1_answer_marker_list)
if num_context_answers > 1:
fields['p2_answer_marker'] = ListField(p2_answer_marker_list)
if num_context_answers > 2:
fields['p3_answer_marker'] = ListField(p3_answer_marker_list)
fields['yesno_list'] = ListField( \
[LabelField(yesno, label_namespace="yesno_labels") for yesno in yesno_list])
fields['followup_list'] = ListField([LabelField(followup, label_namespace="followup_labels") \
for followup in followup_list])
metadata.update(additional_metadata)
fields['metadata'] = MetadataField(metadata)
return Instance(fields) | ['def', 'make_reading_comprehension_instance_quac', '(', 'question_list_tokens', ':', 'List', '[', 'List', '[', 'Token', ']', ']', ',', 'passage_tokens', ':', 'List', '[', 'Token', ']', ',', 'token_indexers', ':', 'Dict', '[', 'str', ',', 'TokenIndexer', ']', ',', 'passage_text', ':', 'str', ',', 'token_span_lists', ':', 'List', '[', 'List', '[', 'Tuple', '[', 'int', ',', 'int', ']', ']', ']', '=', 'None', ',', 'yesno_list', ':', 'List', '[', 'int', ']', '=', 'None', ',', 'followup_list', ':', 'List', '[', 'int', ']', '=', 'None', ',', 'additional_metadata', ':', 'Dict', '[', 'str', ',', 'Any', ']', '=', 'None', ',', 'num_context_answers', ':', 'int', '=', '0', ')', '->', 'Instance', ':', 'additional_metadata', '=', 'additional_metadata', 'or', '{', '}', 'fields', ':', 'Dict', '[', 'str', ',', 'Field', ']', '=', '{', '}', 'passage_offsets', '=', '[', '(', 'token', '.', 'idx', ',', 'token', '.', 'idx', '+', 'len', '(', 'token', '.', 'text', ')', ')', 'for', 'token', 'in', 'passage_tokens', ']', '# This is separate so we can reference it later with a known type.', 'passage_field', '=', 'TextField', '(', 'passage_tokens', ',', 'token_indexers', ')', 'fields', '[', "'passage'", ']', '=', 'passage_field', 'fields', '[', "'question'", ']', '=', 'ListField', '(', '[', 'TextField', '(', 'q_tokens', ',', 'token_indexers', ')', 'for', 'q_tokens', 'in', 'question_list_tokens', ']', ')', 'metadata', '=', '{', "'original_passage'", ':', 'passage_text', ',', "'token_offsets'", ':', 'passage_offsets', ',', "'question_tokens'", ':', '[', '[', 'token', '.', 'text', 'for', 'token', 'in', 'question_tokens', ']', 'for', 'question_tokens', 'in', 'question_list_tokens', ']', ',', "'passage_tokens'", ':', '[', 'token', '.', 'text', 'for', 'token', 'in', 'passage_tokens', ']', ',', '}', 'p1_answer_marker_list', ':', 'List', '[', 'Field', ']', '=', '[', ']', 'p2_answer_marker_list', ':', 'List', '[', 'Field', ']', '=', '[', ']', 'p3_answer_marker_list', ':', 'List', '[', 'Field', ']', '=', '[', ']', 'def', 'get_tag', '(', 'i', ',', 'i_name', ')', ':', '# Generate a tag to mark previous answer span in the passage.', 'return', '"<{0:d}_{1:s}>"', '.', 'format', '(', 'i', ',', 'i_name', ')', 'def', 'mark_tag', '(', 'span_start', ',', 'span_end', ',', 'passage_tags', ',', 'prev_answer_distance', ')', ':', 'try', ':', 'assert', 'span_start', '>=', '0', 'assert', 'span_end', '>=', '0', 'except', ':', 'raise', 'ValueError', '(', '"Previous {0:d}th answer span should have been updated!"', '.', 'format', '(', 'prev_answer_distance', ')', ')', '# Modify "tags" to mark previous answer span.', 'if', 'span_start', '==', 'span_end', ':', 'passage_tags', '[', 'prev_answer_distance', ']', '[', 'span_start', ']', '=', 'get_tag', '(', 'prev_answer_distance', ',', '""', ')', 'else', ':', 'passage_tags', '[', 'prev_answer_distance', ']', '[', 'span_start', ']', '=', 'get_tag', '(', 'prev_answer_distance', ',', '"start"', ')', 'passage_tags', '[', 'prev_answer_distance', ']', '[', 'span_end', ']', '=', 'get_tag', '(', 'prev_answer_distance', ',', '"end"', ')', 'for', 'passage_index', 'in', 'range', '(', 'span_start', '+', '1', ',', 'span_end', ')', ':', 'passage_tags', '[', 'prev_answer_distance', ']', '[', 'passage_index', ']', '=', 'get_tag', '(', 'prev_answer_distance', ',', '"in"', ')', 'if', 'token_span_lists', ':', 'span_start_list', ':', 'List', '[', 'Field', ']', '=', '[', ']', 'span_end_list', ':', 'List', '[', 'Field', ']', '=', '[', ']', 'p1_span_start', ',', 'p1_span_end', ',', 'p2_span_start', '=', '-', '1', ',', '-', '1', ',', '-', '1', 'p2_span_end', ',', 'p3_span_start', ',', 'p3_span_end', '=', '-', '1', ',', '-', '1', ',', '-', '1', '# Looping each <<answers>>.', 'for', 'question_index', ',', 'answer_span_lists', 'in', 'enumerate', '(', 'token_span_lists', ')', ':', 'span_start', ',', 'span_end', '=', 'answer_span_lists', '[', '-', '1', ']', '# Last one is the original answer', 'span_start_list', '.', 'append', '(', 'IndexField', '(', 'span_start', ',', 'passage_field', ')', ')', 'span_end_list', '.', 'append', '(', 'IndexField', '(', 'span_end', ',', 'passage_field', ')', ')', 'prev_answer_marker_lists', '=', '[', '[', '"O"', ']', '*', 'len', '(', 'passage_tokens', ')', ',', '[', '"O"', ']', '*', 'len', '(', 'passage_tokens', ')', ',', '[', '"O"', ']', '*', 'len', '(', 'passage_tokens', ')', ',', '[', '"O"', ']', '*', 'len', '(', 'passage_tokens', ')', ']', 'if', 'question_index', '>', '0', 'and', 'num_context_answers', '>', '0', ':', 'mark_tag', '(', 'p1_span_start', ',', 'p1_span_end', ',', 'prev_answer_marker_lists', ',', '1', ')', 'if', 'question_index', '>', '1', 'and', 'num_context_answers', '>', '1', ':', 'mark_tag', '(', 'p2_span_start', ',', 'p2_span_end', ',', 'prev_answer_marker_lists', ',', '2', ')', 'if', 'question_index', '>', '2', 'and', 'num_context_answers', '>', '2', ':', 'mark_tag', '(', 'p3_span_start', ',', 'p3_span_end', ',', 'prev_answer_marker_lists', ',', '3', ')', 'p3_span_start', '=', 'p2_span_start', 'p3_span_end', '=', 'p2_span_end', 'p2_span_start', '=', 'p1_span_start', 'p2_span_end', '=', 'p1_span_end', 'p1_span_start', '=', 'span_start', 'p1_span_end', '=', 'span_end', 'if', 'num_context_answers', '>', '2', ':', 'p3_answer_marker_list', '.', 'append', '(', 'SequenceLabelField', '(', 'prev_answer_marker_lists', '[', '3', ']', ',', 'passage_field', ',', 'label_namespace', '=', '"answer_tags"', ')', ')', 'if', 'num_context_answers', '>', '1', ':', 'p2_answer_marker_list', '.', 'append', '(', 'SequenceLabelField', '(', 'prev_answer_marker_lists', '[', '2', ']', ',', 'passage_field', ',', 'label_namespace', '=', '"answer_tags"', ')', ')', 'if', 'num_context_answers', '>', '0', ':', 'p1_answer_marker_list', '.', 'append', '(', 'SequenceLabelField', '(', 'prev_answer_marker_lists', '[', '1', ']', ',', 'passage_field', ',', 'label_namespace', '=', '"answer_tags"', ')', ')', 'fields', '[', "'span_start'", ']', '=', 'ListField', '(', 'span_start_list', ')', 'fields', '[', "'span_end'", ']', '=', 'ListField', '(', 'span_end_list', ')', 'if', 'num_context_answers', '>', '0', ':', 'fields', '[', "'p1_answer_marker'", ']', '=', 'ListField', '(', 'p1_answer_marker_list', ')', 'if', 'num_context_answers', '>', '1', ':', 'fields', '[', "'p2_answer_marker'", ']', '=', 'ListField', '(', 'p2_answer_marker_list', ')', 'if', 'num_context_answers', '>', '2', ':', 'fields', '[', "'p3_answer_marker'", ']', '=', 'ListField', '(', 'p3_answer_marker_list', ')', 'fields', '[', "'yesno_list'", ']', '=', 'ListField', '(', '[', 'LabelField', '(', 'yesno', ',', 'label_namespace', '=', '"yesno_labels"', ')', 'for', 'yesno', 'in', 'yesno_list', ']', ')', 'fields', '[', "'followup_list'", ']', '=', 'ListField', '(', '[', 'LabelField', '(', 'followup', ',', 'label_namespace', '=', '"followup_labels"', ')', 'for', 'followup', 'in', 'followup_list', ']', ')', 'metadata', '.', 'update', '(', 'additional_metadata', ')', 'fields', '[', "'metadata'", ']', '=', 'MetadataField', '(', 'metadata', ')', 'return', 'Instance', '(', 'fields', ')'] | Converts a question, a passage, and an optional answer (or answers) to an ``Instance`` for use
in a reading comprehension model.
Creates an ``Instance`` with at least these fields: ``question`` and ``passage``, both
``TextFields``; and ``metadata``, a ``MetadataField``. Additionally, if both ``answer_texts``
and ``char_span_starts`` are given, the ``Instance`` has ``span_start`` and ``span_end``
fields, which are both ``IndexFields``.
Parameters
----------
question_list_tokens : ``List[List[Token]]``
An already-tokenized list of questions. Each dialog have multiple questions.
passage_tokens : ``List[Token]``
An already-tokenized passage that contains the answer to the given question.
token_indexers : ``Dict[str, TokenIndexer]``
Determines how the question and passage ``TextFields`` will be converted into tensors that
get input to a model. See :class:`TokenIndexer`.
passage_text : ``str``
The original passage text. We need this so that we can recover the actual span from the
original passage that the model predicts as the answer to the question. This is used in
official evaluation scripts.
token_span_lists : ``List[List[Tuple[int, int]]]``, optional
Indices into ``passage_tokens`` to use as the answer to the question for training. This is
a list of list, first because there is multiple questions per dialog, and
because there might be several possible correct answer spans in the passage.
Currently, we just select the last span in this list (i.e., QuAC has multiple
annotations on the dev set; this will select the last span, which was given by the original annotator).
yesno_list : ``List[int]``
List of the affirmation bit for each question answer pairs.
followup_list : ``List[int]``
List of the continuation bit for each question answer pairs.
num_context_answers : ``int``, optional
How many answers to encode into the passage.
additional_metadata : ``Dict[str, Any]``, optional
The constructed ``metadata`` field will by default contain ``original_passage``,
``token_offsets``, ``question_tokens``, ``passage_tokens``, and ``answer_texts`` keys. If
you want any other metadata to be associated with each instance, you can pass that in here.
This dictionary will get added to the ``metadata`` dictionary we already construct. | ['Converts', 'a', 'question', 'a', 'passage', 'and', 'an', 'optional', 'answer', '(', 'or', 'answers', ')', 'to', 'an', 'Instance', 'for', 'use', 'in', 'a', 'reading', 'comprehension', 'model', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/reading_comprehension/util.py#L217-L351 |
6,881 | numberoverzero/bloop | bloop/conditions.py | printable_name | def printable_name(column, path=None):
"""Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
"""
pieces = [column.name]
path = path or path_of(column)
for segment in path:
if isinstance(segment, str):
pieces.append(segment)
else:
pieces[-1] += "[{}]".format(segment)
return ".".join(pieces) | python | def printable_name(column, path=None):
"""Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
"""
pieces = [column.name]
path = path or path_of(column)
for segment in path:
if isinstance(segment, str):
pieces.append(segment)
else:
pieces[-1] += "[{}]".format(segment)
return ".".join(pieces) | ['def', 'printable_name', '(', 'column', ',', 'path', '=', 'None', ')', ':', 'pieces', '=', '[', 'column', '.', 'name', ']', 'path', '=', 'path', 'or', 'path_of', '(', 'column', ')', 'for', 'segment', 'in', 'path', ':', 'if', 'isinstance', '(', 'segment', ',', 'str', ')', ':', 'pieces', '.', 'append', '(', 'segment', ')', 'else', ':', 'pieces', '[', '-', '1', ']', '+=', '"[{}]"', '.', 'format', '(', 'segment', ')', 'return', '"."', '.', 'join', '(', 'pieces', ')'] | Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar | ['Provided', 'for', 'debug', 'output', 'when', 'rendering', 'conditions', '.'] | train | https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L887-L899 |
6,882 | google/openhtf | openhtf/plugs/usb/shell_service.py | AsyncCommandHandle.wait | def wait(self, timeout_ms=None):
"""Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''.
"""
closed = timeouts.loop_until_timeout_or_true(
timeouts.PolledTimeout.from_millis(timeout_ms),
self.stream.is_closed, .1)
if closed:
if hasattr(self.stdout, 'getvalue'):
return self.stdout.getvalue()
return True
return None | python | def wait(self, timeout_ms=None):
"""Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''.
"""
closed = timeouts.loop_until_timeout_or_true(
timeouts.PolledTimeout.from_millis(timeout_ms),
self.stream.is_closed, .1)
if closed:
if hasattr(self.stdout, 'getvalue'):
return self.stdout.getvalue()
return True
return None | ['def', 'wait', '(', 'self', ',', 'timeout_ms', '=', 'None', ')', ':', 'closed', '=', 'timeouts', '.', 'loop_until_timeout_or_true', '(', 'timeouts', '.', 'PolledTimeout', '.', 'from_millis', '(', 'timeout_ms', ')', ',', 'self', '.', 'stream', '.', 'is_closed', ',', '.1', ')', 'if', 'closed', ':', 'if', 'hasattr', '(', 'self', '.', 'stdout', ',', "'getvalue'", ')', ':', 'return', 'self', '.', 'stdout', '.', 'getvalue', '(', ')', 'return', 'True', 'return', 'None'] | Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''. | ['Block', 'until', 'this', 'command', 'has', 'completed', '.'] | train | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/shell_service.py#L169-L189 |
6,883 | mfcloud/python-zvm-sdk | smtLayer/makeVM.py | createVM | def createVM(rh):
"""
Create a virtual machine in z/VM.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter makeVM.createVM")
dirLines = []
dirLines.append("USER " + rh.userid + " " + rh.parms['pw'] +
" " + rh.parms['priMemSize'] + " " +
rh.parms['maxMemSize'] + " " + rh.parms['privClasses'])
if 'profName' in rh.parms:
dirLines.append("INCLUDE " + rh.parms['profName'])
if 'maxCPU' in rh.parms:
dirLines.append("MACHINE ESA %i" % rh.parms['maxCPU'])
dirLines.append("CPU 00 BASE")
if 'cpuCnt' in rh.parms:
for i in range(1, rh.parms['cpuCnt']):
dirLines.append("CPU %0.2X" % i)
if 'ipl' in rh.parms:
ipl_string = "IPL %s " % rh.parms['ipl']
if 'iplParam' in rh.parms:
ipl_string += ("PARM %s " % rh.parms['iplParam'])
if 'iplLoadparam' in rh.parms:
ipl_string += ("LOADPARM %s " % rh.parms['iplLoadparam'])
dirLines.append(ipl_string)
if 'byUsers' in rh.parms:
for user in rh.parms['byUsers']:
dirLines.append("LOGONBY " + user)
priMem = rh.parms['priMemSize'].upper()
maxMem = rh.parms['maxMemSize'].upper()
if 'setReservedMem' in rh.parms and (priMem != maxMem):
reservedSize = getReservedMemSize(rh, priMem, maxMem)
if rh.results['overallRC'] != 0:
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if reservedSize != '0M':
dirLines.append("COMMAND DEF STOR RESERVED %s" % reservedSize)
# Construct the temporary file for the USER entry.
fd, tempFile = mkstemp()
to_write = '\n'.join(dirLines) + '\n'
os.write(fd, to_write.encode())
os.close(fd)
parms = ["-T", rh.userid, "-f", tempFile]
results = invokeSMCLI(rh, "Image_Create_DM", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
os.remove(tempFile)
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | python | def createVM(rh):
"""
Create a virtual machine in z/VM.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter makeVM.createVM")
dirLines = []
dirLines.append("USER " + rh.userid + " " + rh.parms['pw'] +
" " + rh.parms['priMemSize'] + " " +
rh.parms['maxMemSize'] + " " + rh.parms['privClasses'])
if 'profName' in rh.parms:
dirLines.append("INCLUDE " + rh.parms['profName'])
if 'maxCPU' in rh.parms:
dirLines.append("MACHINE ESA %i" % rh.parms['maxCPU'])
dirLines.append("CPU 00 BASE")
if 'cpuCnt' in rh.parms:
for i in range(1, rh.parms['cpuCnt']):
dirLines.append("CPU %0.2X" % i)
if 'ipl' in rh.parms:
ipl_string = "IPL %s " % rh.parms['ipl']
if 'iplParam' in rh.parms:
ipl_string += ("PARM %s " % rh.parms['iplParam'])
if 'iplLoadparam' in rh.parms:
ipl_string += ("LOADPARM %s " % rh.parms['iplLoadparam'])
dirLines.append(ipl_string)
if 'byUsers' in rh.parms:
for user in rh.parms['byUsers']:
dirLines.append("LOGONBY " + user)
priMem = rh.parms['priMemSize'].upper()
maxMem = rh.parms['maxMemSize'].upper()
if 'setReservedMem' in rh.parms and (priMem != maxMem):
reservedSize = getReservedMemSize(rh, priMem, maxMem)
if rh.results['overallRC'] != 0:
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if reservedSize != '0M':
dirLines.append("COMMAND DEF STOR RESERVED %s" % reservedSize)
# Construct the temporary file for the USER entry.
fd, tempFile = mkstemp()
to_write = '\n'.join(dirLines) + '\n'
os.write(fd, to_write.encode())
os.close(fd)
parms = ["-T", rh.userid, "-f", tempFile]
results = invokeSMCLI(rh, "Image_Create_DM", parms)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
os.remove(tempFile)
rh.printSysLog("Exit makeVM.createVM, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | ['def', 'createVM', '(', 'rh', ')', ':', 'rh', '.', 'printSysLog', '(', '"Enter makeVM.createVM"', ')', 'dirLines', '=', '[', ']', 'dirLines', '.', 'append', '(', '"USER "', '+', 'rh', '.', 'userid', '+', '" "', '+', 'rh', '.', 'parms', '[', "'pw'", ']', '+', '" "', '+', 'rh', '.', 'parms', '[', "'priMemSize'", ']', '+', '" "', '+', 'rh', '.', 'parms', '[', "'maxMemSize'", ']', '+', '" "', '+', 'rh', '.', 'parms', '[', "'privClasses'", ']', ')', 'if', "'profName'", 'in', 'rh', '.', 'parms', ':', 'dirLines', '.', 'append', '(', '"INCLUDE "', '+', 'rh', '.', 'parms', '[', "'profName'", ']', ')', 'if', "'maxCPU'", 'in', 'rh', '.', 'parms', ':', 'dirLines', '.', 'append', '(', '"MACHINE ESA %i"', '%', 'rh', '.', 'parms', '[', "'maxCPU'", ']', ')', 'dirLines', '.', 'append', '(', '"CPU 00 BASE"', ')', 'if', "'cpuCnt'", 'in', 'rh', '.', 'parms', ':', 'for', 'i', 'in', 'range', '(', '1', ',', 'rh', '.', 'parms', '[', "'cpuCnt'", ']', ')', ':', 'dirLines', '.', 'append', '(', '"CPU %0.2X"', '%', 'i', ')', 'if', "'ipl'", 'in', 'rh', '.', 'parms', ':', 'ipl_string', '=', '"IPL %s "', '%', 'rh', '.', 'parms', '[', "'ipl'", ']', 'if', "'iplParam'", 'in', 'rh', '.', 'parms', ':', 'ipl_string', '+=', '(', '"PARM %s "', '%', 'rh', '.', 'parms', '[', "'iplParam'", ']', ')', 'if', "'iplLoadparam'", 'in', 'rh', '.', 'parms', ':', 'ipl_string', '+=', '(', '"LOADPARM %s "', '%', 'rh', '.', 'parms', '[', "'iplLoadparam'", ']', ')', 'dirLines', '.', 'append', '(', 'ipl_string', ')', 'if', "'byUsers'", 'in', 'rh', '.', 'parms', ':', 'for', 'user', 'in', 'rh', '.', 'parms', '[', "'byUsers'", ']', ':', 'dirLines', '.', 'append', '(', '"LOGONBY "', '+', 'user', ')', 'priMem', '=', 'rh', '.', 'parms', '[', "'priMemSize'", ']', '.', 'upper', '(', ')', 'maxMem', '=', 'rh', '.', 'parms', '[', "'maxMemSize'", ']', '.', 'upper', '(', ')', 'if', "'setReservedMem'", 'in', 'rh', '.', 'parms', 'and', '(', 'priMem', '!=', 'maxMem', ')', ':', 'reservedSize', '=', 'getReservedMemSize', '(', 'rh', ',', 'priMem', ',', 'maxMem', ')', 'if', 'rh', '.', 'results', '[', "'overallRC'", ']', '!=', '0', ':', 'rh', '.', 'printSysLog', '(', '"Exit makeVM.createVM, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']', 'if', 'reservedSize', '!=', "'0M'", ':', 'dirLines', '.', 'append', '(', '"COMMAND DEF STOR RESERVED %s"', '%', 'reservedSize', ')', '# Construct the temporary file for the USER entry.', 'fd', ',', 'tempFile', '=', 'mkstemp', '(', ')', 'to_write', '=', "'\\n'", '.', 'join', '(', 'dirLines', ')', '+', "'\\n'", 'os', '.', 'write', '(', 'fd', ',', 'to_write', '.', 'encode', '(', ')', ')', 'os', '.', 'close', '(', 'fd', ')', 'parms', '=', '[', '"-T"', ',', 'rh', '.', 'userid', ',', '"-f"', ',', 'tempFile', ']', 'results', '=', 'invokeSMCLI', '(', 'rh', ',', '"Image_Create_DM"', ',', 'parms', ')', 'if', 'results', '[', "'overallRC'", ']', '!=', '0', ':', '# SMAPI API failed.', 'rh', '.', 'printLn', '(', '"ES"', ',', 'results', '[', "'response'", ']', ')', 'rh', '.', 'updateResults', '(', 'results', ')', '# Use results from invokeSMCLI', 'os', '.', 'remove', '(', 'tempFile', ')', 'rh', '.', 'printSysLog', '(', '"Exit makeVM.createVM, rc: "', '+', 'str', '(', 'rh', '.', 'results', '[', "'overallRC'", ']', ')', ')', 'return', 'rh', '.', 'results', '[', "'overallRC'", ']'] | Create a virtual machine in z/VM.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | ['Create', 'a', 'virtual', 'machine', 'in', 'z', '/', 'VM', '.'] | train | https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/makeVM.py#L79-L154 |
6,884 | log2timeline/plaso | plaso/parsers/pls_recall.py | PlsRecallParser.ParseFileObject | def ParseFileObject(self, parser_mediator, file_object):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size | python | def ParseFileObject(self, parser_mediator, file_object):
"""Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_offset = 0
file_size = file_object.get_size()
record_map = self._GetDataTypeMap('pls_recall_record')
while file_offset < file_size:
try:
pls_record, record_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
except (ValueError, errors.ParseError) as exception:
if file_offset == 0:
raise errors.UnableToParseFile('Unable to parse first record.')
parser_mediator.ProduceExtractionWarning((
'unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
break
if file_offset == 0 and not self._VerifyRecord(pls_record):
raise errors.UnableToParseFile('Verification of first record failed.')
event_data = PlsRecallEventData()
event_data.database_name = pls_record.database_name.rstrip('\x00')
event_data.sequence_number = pls_record.sequence_number
event_data.offset = file_offset
event_data.query = pls_record.query.rstrip('\x00')
event_data.username = pls_record.username.rstrip('\x00')
date_time = dfdatetime_delphi_date_time.DelphiDateTime(
timestamp=pls_record.last_written_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
file_offset += record_data_size | ['def', 'ParseFileObject', '(', 'self', ',', 'parser_mediator', ',', 'file_object', ')', ':', 'file_offset', '=', '0', 'file_size', '=', 'file_object', '.', 'get_size', '(', ')', 'record_map', '=', 'self', '.', '_GetDataTypeMap', '(', "'pls_recall_record'", ')', 'while', 'file_offset', '<', 'file_size', ':', 'try', ':', 'pls_record', ',', 'record_data_size', '=', 'self', '.', '_ReadStructureFromFileObject', '(', 'file_object', ',', 'file_offset', ',', 'record_map', ')', 'except', '(', 'ValueError', ',', 'errors', '.', 'ParseError', ')', 'as', 'exception', ':', 'if', 'file_offset', '==', '0', ':', 'raise', 'errors', '.', 'UnableToParseFile', '(', "'Unable to parse first record.'", ')', 'parser_mediator', '.', 'ProduceExtractionWarning', '(', '(', "'unable to parse record at offset: 0x{0:08x} with error: '", "'{1!s}'", ')', '.', 'format', '(', 'file_offset', ',', 'exception', ')', ')', 'break', 'if', 'file_offset', '==', '0', 'and', 'not', 'self', '.', '_VerifyRecord', '(', 'pls_record', ')', ':', 'raise', 'errors', '.', 'UnableToParseFile', '(', "'Verification of first record failed.'", ')', 'event_data', '=', 'PlsRecallEventData', '(', ')', 'event_data', '.', 'database_name', '=', 'pls_record', '.', 'database_name', '.', 'rstrip', '(', "'\\x00'", ')', 'event_data', '.', 'sequence_number', '=', 'pls_record', '.', 'sequence_number', 'event_data', '.', 'offset', '=', 'file_offset', 'event_data', '.', 'query', '=', 'pls_record', '.', 'query', '.', 'rstrip', '(', "'\\x00'", ')', 'event_data', '.', 'username', '=', 'pls_record', '.', 'username', '.', 'rstrip', '(', "'\\x00'", ')', 'date_time', '=', 'dfdatetime_delphi_date_time', '.', 'DelphiDateTime', '(', 'timestamp', '=', 'pls_record', '.', 'last_written_time', ')', 'event', '=', 'time_events', '.', 'DateTimeValuesEvent', '(', 'date_time', ',', 'definitions', '.', 'TIME_DESCRIPTION_WRITTEN', ')', 'parser_mediator', '.', 'ProduceEventWithEventData', '(', 'event', ',', 'event_data', ')', 'file_offset', '+=', 'record_data_size'] | Parses a PLSRecall.dat file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | ['Parses', 'a', 'PLSRecall', '.', 'dat', 'file', '-', 'like', 'object', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/pls_recall.py#L98-L142 |
6,885 | ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_trajectory.py | NCEITrajectoryBase.check_trajectory_id | def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results | python | def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results | ['def', 'check_trajectory_id', '(', 'self', ',', 'dataset', ')', ':', 'results', '=', '[', ']', 'exists_ctx', '=', 'TestCtx', '(', 'BaseCheck', '.', 'MEDIUM', ',', '\'Variable defining "trajectory_id" exists\'', ')', 'trajectory_ids', '=', 'dataset', '.', 'get_variables_by_attributes', '(', 'cf_role', '=', "'trajectory_id'", ')', '# No need to check', 'exists_ctx', '.', 'assert_true', '(', 'trajectory_ids', ',', '\'variable defining cf_role="trajectory_id" exists\'', ')', 'if', 'not', 'trajectory_ids', ':', 'return', 'exists_ctx', '.', 'to_result', '(', ')', 'results', '.', 'append', '(', 'exists_ctx', '.', 'to_result', '(', ')', ')', 'test_ctx', '=', 'TestCtx', '(', 'BaseCheck', '.', 'MEDIUM', ',', "'Recommended attributes for the {} variable'", '.', 'format', '(', 'trajectory_ids', '[', '0', ']', '.', 'name', ')', ')', 'test_ctx', '.', 'assert_true', '(', 'getattr', '(', 'trajectory_ids', '[', '0', ']', ',', "'long_name'", ',', "''", ')', '!=', '""', ',', '"long_name attribute should exist and not be empty"', ')', 'results', '.', 'append', '(', 'test_ctx', '.', 'to_result', '(', ')', ')', 'return', 'results'] | Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset | ['Checks', 'that', 'if', 'a', 'variable', 'exists', 'for', 'the', 'trajectory', 'id', 'it', 'has', 'the', 'appropriate', 'attributes'] | train | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_trajectory.py#L41-L61 |
6,886 | SetBased/py-stratum | pystratum/MetadataDataLayer.py | MetadataDataLayer._log_query | def _log_query(query):
"""
Logs the query on the console.
:param str query: The query.
"""
query = query.strip()
if os.linesep in query:
# Query is a multi line query
MetadataDataLayer.io.log_very_verbose('Executing query:')
MetadataDataLayer.io.log_very_verbose('<sql>{0}</sql>'.format(query))
else:
# Query is a single line query.
MetadataDataLayer.io.log_very_verbose('Executing query: <sql>{0}</sql>'.format(query)) | python | def _log_query(query):
"""
Logs the query on the console.
:param str query: The query.
"""
query = query.strip()
if os.linesep in query:
# Query is a multi line query
MetadataDataLayer.io.log_very_verbose('Executing query:')
MetadataDataLayer.io.log_very_verbose('<sql>{0}</sql>'.format(query))
else:
# Query is a single line query.
MetadataDataLayer.io.log_very_verbose('Executing query: <sql>{0}</sql>'.format(query)) | ['def', '_log_query', '(', 'query', ')', ':', 'query', '=', 'query', '.', 'strip', '(', ')', 'if', 'os', '.', 'linesep', 'in', 'query', ':', '# Query is a multi line query', 'MetadataDataLayer', '.', 'io', '.', 'log_very_verbose', '(', "'Executing query:'", ')', 'MetadataDataLayer', '.', 'io', '.', 'log_very_verbose', '(', "'<sql>{0}</sql>'", '.', 'format', '(', 'query', ')', ')', 'else', ':', '# Query is a single line query.', 'MetadataDataLayer', '.', 'io', '.', 'log_very_verbose', '(', "'Executing query: <sql>{0}</sql>'", '.', 'format', '(', 'query', ')', ')'] | Logs the query on the console.
:param str query: The query. | ['Logs', 'the', 'query', 'on', 'the', 'console', '.'] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/MetadataDataLayer.py#L20-L34 |
6,887 | lucasmaystre/choix | choix/utils.py | log_likelihood_top1 | def log_likelihood_top1(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
params = np.asarray(params)
for winner, losers in data:
idx = np.append(winner, losers)
loglik -= logsumexp(params.take(idx) - params[winner])
return loglik | python | def log_likelihood_top1(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
params = np.asarray(params)
for winner, losers in data:
idx = np.append(winner, losers)
loglik -= logsumexp(params.take(idx) - params[winner])
return loglik | ['def', 'log_likelihood_top1', '(', 'data', ',', 'params', ')', ':', 'loglik', '=', '0', 'params', '=', 'np', '.', 'asarray', '(', 'params', ')', 'for', 'winner', ',', 'losers', 'in', 'data', ':', 'idx', '=', 'np', '.', 'append', '(', 'winner', ',', 'losers', ')', 'loglik', '-=', 'logsumexp', '(', 'params', '.', 'take', '(', 'idx', ')', '-', 'params', '[', 'winner', ']', ')', 'return', 'loglik'] | Compute the log-likelihood of model parameters. | ['Compute', 'the', 'log', '-', 'likelihood', 'of', 'model', 'parameters', '.'] | train | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/utils.py#L186-L193 |
6,888 | dropbox/stone | stone/backends/python_rsrc/stone_serializers.py | json_decode | def json_decode(data_type, serialized_obj, caller_permissions=None,
alias_validators=None, strict=True, old_style=False):
"""Performs the reverse operation of json_encode.
Args:
data_type (Validator): Validator for serialized_obj.
serialized_obj (str): The JSON string to deserialize.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. strict should only be used by a
recipient of serialized JSON if it's guaranteed that its Stone
specs are at least as recent as the senders it receives messages
from.
Returns:
The returned object depends on the input data_type.
- Boolean -> bool
- Bytes -> bytes
- Float -> float
- Integer -> long
- List -> list
- Map -> dict
- Nullable -> None or its wrapped type.
- String -> unicode (PY2) or str (PY3)
- Struct -> An instance of its definition attribute.
- Timestamp -> datetime.datetime
- Union -> An instance of its definition attribute.
"""
try:
deserialized_obj = json.loads(serialized_obj)
except ValueError:
raise bv.ValidationError('could not decode input as JSON')
else:
return json_compat_obj_decode(
data_type, deserialized_obj, caller_permissions=caller_permissions,
alias_validators=alias_validators, strict=strict, old_style=old_style) | python | def json_decode(data_type, serialized_obj, caller_permissions=None,
alias_validators=None, strict=True, old_style=False):
"""Performs the reverse operation of json_encode.
Args:
data_type (Validator): Validator for serialized_obj.
serialized_obj (str): The JSON string to deserialize.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. strict should only be used by a
recipient of serialized JSON if it's guaranteed that its Stone
specs are at least as recent as the senders it receives messages
from.
Returns:
The returned object depends on the input data_type.
- Boolean -> bool
- Bytes -> bytes
- Float -> float
- Integer -> long
- List -> list
- Map -> dict
- Nullable -> None or its wrapped type.
- String -> unicode (PY2) or str (PY3)
- Struct -> An instance of its definition attribute.
- Timestamp -> datetime.datetime
- Union -> An instance of its definition attribute.
"""
try:
deserialized_obj = json.loads(serialized_obj)
except ValueError:
raise bv.ValidationError('could not decode input as JSON')
else:
return json_compat_obj_decode(
data_type, deserialized_obj, caller_permissions=caller_permissions,
alias_validators=alias_validators, strict=strict, old_style=old_style) | ['def', 'json_decode', '(', 'data_type', ',', 'serialized_obj', ',', 'caller_permissions', '=', 'None', ',', 'alias_validators', '=', 'None', ',', 'strict', '=', 'True', ',', 'old_style', '=', 'False', ')', ':', 'try', ':', 'deserialized_obj', '=', 'json', '.', 'loads', '(', 'serialized_obj', ')', 'except', 'ValueError', ':', 'raise', 'bv', '.', 'ValidationError', '(', "'could not decode input as JSON'", ')', 'else', ':', 'return', 'json_compat_obj_decode', '(', 'data_type', ',', 'deserialized_obj', ',', 'caller_permissions', '=', 'caller_permissions', ',', 'alias_validators', '=', 'alias_validators', ',', 'strict', '=', 'strict', ',', 'old_style', '=', 'old_style', ')'] | Performs the reverse operation of json_encode.
Args:
data_type (Validator): Validator for serialized_obj.
serialized_obj (str): The JSON string to deserialize.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
alias_validators (Optional[Mapping[bv.Validator, Callable[[], None]]]):
Custom validation functions. These must raise bv.ValidationError on
failure.
strict (bool): If strict, then unknown struct fields will raise an
error, and unknown union variants will raise an error even if a
catch all field is specified. strict should only be used by a
recipient of serialized JSON if it's guaranteed that its Stone
specs are at least as recent as the senders it receives messages
from.
Returns:
The returned object depends on the input data_type.
- Boolean -> bool
- Bytes -> bytes
- Float -> float
- Integer -> long
- List -> list
- Map -> dict
- Nullable -> None or its wrapped type.
- String -> unicode (PY2) or str (PY3)
- Struct -> An instance of its definition attribute.
- Timestamp -> datetime.datetime
- Union -> An instance of its definition attribute. | ['Performs', 'the', 'reverse', 'operation', 'of', 'json_encode', '.'] | train | https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_rsrc/stone_serializers.py#L911-L951 |
6,889 | scoutapp/scout_apm_python | src/scout_apm/core/remote_ip.py | RemoteIp.lookup_from_headers | def lookup_from_headers(cls, headers):
"""
Given a dictionary of headers (WSGI request.META for instance), look up
the most likely user's IP
"""
# A single address, set by this server, returned as an Array
remote_addr = cls.ips_from(headers.get("REMOTE_ADDR"))
# Could be a CSV list and/or repeated headers that were concatenated.
forwarded_ips = cls.ips_from(headers.get("HTTP_X_FORWARDED_FOR"))
client_ips = cls.ips_from(headers.get("HTTP_CLIENT_IP"))
# We assume these things about the IP headers:
#
# - X-Forwarded-For will be a list of IPs, one per proxy, or blank.
# in order: `client,proxy1,proxy2`
# - Client-Ip is propagated from the outermost proxy, or is blank
# - REMOTE_ADDR will be the IP that made the request to this server
#
# X-Forwarded-For and Client-Ip shouldn't be set at the same time, but
# if they are, use the one in Forwarded
ips = forwarded_ips + client_ips + remote_addr
try:
return ips[0]
except IndexError:
return None | python | def lookup_from_headers(cls, headers):
"""
Given a dictionary of headers (WSGI request.META for instance), look up
the most likely user's IP
"""
# A single address, set by this server, returned as an Array
remote_addr = cls.ips_from(headers.get("REMOTE_ADDR"))
# Could be a CSV list and/or repeated headers that were concatenated.
forwarded_ips = cls.ips_from(headers.get("HTTP_X_FORWARDED_FOR"))
client_ips = cls.ips_from(headers.get("HTTP_CLIENT_IP"))
# We assume these things about the IP headers:
#
# - X-Forwarded-For will be a list of IPs, one per proxy, or blank.
# in order: `client,proxy1,proxy2`
# - Client-Ip is propagated from the outermost proxy, or is blank
# - REMOTE_ADDR will be the IP that made the request to this server
#
# X-Forwarded-For and Client-Ip shouldn't be set at the same time, but
# if they are, use the one in Forwarded
ips = forwarded_ips + client_ips + remote_addr
try:
return ips[0]
except IndexError:
return None | ['def', 'lookup_from_headers', '(', 'cls', ',', 'headers', ')', ':', '# A single address, set by this server, returned as an Array', 'remote_addr', '=', 'cls', '.', 'ips_from', '(', 'headers', '.', 'get', '(', '"REMOTE_ADDR"', ')', ')', '# Could be a CSV list and/or repeated headers that were concatenated.', 'forwarded_ips', '=', 'cls', '.', 'ips_from', '(', 'headers', '.', 'get', '(', '"HTTP_X_FORWARDED_FOR"', ')', ')', 'client_ips', '=', 'cls', '.', 'ips_from', '(', 'headers', '.', 'get', '(', '"HTTP_CLIENT_IP"', ')', ')', '# We assume these things about the IP headers:', '#', '# - X-Forwarded-For will be a list of IPs, one per proxy, or blank.', '# in order: `client,proxy1,proxy2`', '# - Client-Ip is propagated from the outermost proxy, or is blank', '# - REMOTE_ADDR will be the IP that made the request to this server', '#', "# X-Forwarded-For and Client-Ip shouldn't be set at the same time, but", '# if they are, use the one in Forwarded', 'ips', '=', 'forwarded_ips', '+', 'client_ips', '+', 'remote_addr', 'try', ':', 'return', 'ips', '[', '0', ']', 'except', 'IndexError', ':', 'return', 'None'] | Given a dictionary of headers (WSGI request.META for instance), look up
the most likely user's IP | ['Given', 'a', 'dictionary', 'of', 'headers', '(', 'WSGI', 'request', '.', 'META', 'for', 'instance', ')', 'look', 'up', 'the', 'most', 'likely', 'user', 's', 'IP'] | train | https://github.com/scoutapp/scout_apm_python/blob/e5539ee23b8129be9b75d5007c88b6158b51294f/src/scout_apm/core/remote_ip.py#L16-L43 |
6,890 | pyGrowler/Growler | growler/aio/http_protocol.py | GrowlerHTTPProtocol.body_storage_pair | def body_storage_pair(self):
"""
Return reader/writer pair for storing receiving body data.
These are event-loop specific objects.
The reader should be an awaitable object that returns the
body data once created.
"""
future = Future()
def send_body():
nonlocal future
data = yield
future.set_result(data)
yield
sender = send_body()
next(sender)
return future, sender | python | def body_storage_pair(self):
"""
Return reader/writer pair for storing receiving body data.
These are event-loop specific objects.
The reader should be an awaitable object that returns the
body data once created.
"""
future = Future()
def send_body():
nonlocal future
data = yield
future.set_result(data)
yield
sender = send_body()
next(sender)
return future, sender | ['def', 'body_storage_pair', '(', 'self', ')', ':', 'future', '=', 'Future', '(', ')', 'def', 'send_body', '(', ')', ':', 'nonlocal', 'future', 'data', '=', 'yield', 'future', '.', 'set_result', '(', 'data', ')', 'yield', 'sender', '=', 'send_body', '(', ')', 'next', '(', 'sender', ')', 'return', 'future', ',', 'sender'] | Return reader/writer pair for storing receiving body data.
These are event-loop specific objects.
The reader should be an awaitable object that returns the
body data once created. | ['Return', 'reader', '/', 'writer', 'pair', 'for', 'storing', 'receiving', 'body', 'data', '.', 'These', 'are', 'event', '-', 'loop', 'specific', 'objects', '.'] | train | https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/aio/http_protocol.py#L147-L165 |
6,891 | CalebBell/ht | ht/condensation.py | Shah | def Shah(m, x, D, rhol, mul, kl, Cpl, P, Pc):
r'''Calculates heat transfer coefficient for condensation
of a fluid inside a tube, as presented in [1]_ and again by the same
author in [2]_; also given in [3]_. Requires no properties of the gas.
Uses the Dittus-Boelter correlation for single phase heat transfer
coefficient, with a Reynolds number assuming all the flow is liquid.
.. math::
h_{TP} = h_L\left[(1-x)^{0.8} +\frac{3.8x^{0.76}(1-x)^{0.04}}
{P_r^{0.38}}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific interval [-]
D : float
Diameter of the channel [m]
rhol : float
Density of the liquid [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
P : float
Pressure of the fluid, [Pa]
Pc : float
Critical pressure of the fluid, [Pa]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ is well written an unambiguous as to how to apply this equation.
Examples
--------
>>> Shah(m=1, x=0.4, D=.3, rhol=800, mul=1E-5, kl=0.6, Cpl=2300, P=1E6, Pc=2E7)
2561.2593415479214
References
----------
.. [1] Shah, M. M. "A General Correlation for Heat Transfer during Film
Condensation inside Pipes." International Journal of Heat and Mass
Transfer 22, no. 4 (April 1, 1979): 547-56.
doi:10.1016/0017-9310(79)90058-9.
.. [2] Shah, M. M., Heat Transfer During Film Condensation in Tubes and
Annuli: A Review of the Literature, ASHRAE Transactions, vol. 87, no.
3, pp. 1086-1100, 1981.
.. [3] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
'''
VL = m/(rhol*pi/4*D**2)
ReL = Reynolds(V=VL, D=D, rho=rhol, mu=mul)
Prl = Prandtl(Cp=Cpl, k=kl, mu=mul)
hL = turbulent_Dittus_Boelter(ReL, Prl)*kl/D
Pr = P/Pc
return hL*((1-x)**0.8 + 3.8*x**0.76*(1-x)**0.04/Pr**0.38) | python | def Shah(m, x, D, rhol, mul, kl, Cpl, P, Pc):
r'''Calculates heat transfer coefficient for condensation
of a fluid inside a tube, as presented in [1]_ and again by the same
author in [2]_; also given in [3]_. Requires no properties of the gas.
Uses the Dittus-Boelter correlation for single phase heat transfer
coefficient, with a Reynolds number assuming all the flow is liquid.
.. math::
h_{TP} = h_L\left[(1-x)^{0.8} +\frac{3.8x^{0.76}(1-x)^{0.04}}
{P_r^{0.38}}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific interval [-]
D : float
Diameter of the channel [m]
rhol : float
Density of the liquid [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
P : float
Pressure of the fluid, [Pa]
Pc : float
Critical pressure of the fluid, [Pa]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ is well written an unambiguous as to how to apply this equation.
Examples
--------
>>> Shah(m=1, x=0.4, D=.3, rhol=800, mul=1E-5, kl=0.6, Cpl=2300, P=1E6, Pc=2E7)
2561.2593415479214
References
----------
.. [1] Shah, M. M. "A General Correlation for Heat Transfer during Film
Condensation inside Pipes." International Journal of Heat and Mass
Transfer 22, no. 4 (April 1, 1979): 547-56.
doi:10.1016/0017-9310(79)90058-9.
.. [2] Shah, M. M., Heat Transfer During Film Condensation in Tubes and
Annuli: A Review of the Literature, ASHRAE Transactions, vol. 87, no.
3, pp. 1086-1100, 1981.
.. [3] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991.
'''
VL = m/(rhol*pi/4*D**2)
ReL = Reynolds(V=VL, D=D, rho=rhol, mu=mul)
Prl = Prandtl(Cp=Cpl, k=kl, mu=mul)
hL = turbulent_Dittus_Boelter(ReL, Prl)*kl/D
Pr = P/Pc
return hL*((1-x)**0.8 + 3.8*x**0.76*(1-x)**0.04/Pr**0.38) | ['def', 'Shah', '(', 'm', ',', 'x', ',', 'D', ',', 'rhol', ',', 'mul', ',', 'kl', ',', 'Cpl', ',', 'P', ',', 'Pc', ')', ':', 'VL', '=', 'm', '/', '(', 'rhol', '*', 'pi', '/', '4', '*', 'D', '**', '2', ')', 'ReL', '=', 'Reynolds', '(', 'V', '=', 'VL', ',', 'D', '=', 'D', ',', 'rho', '=', 'rhol', ',', 'mu', '=', 'mul', ')', 'Prl', '=', 'Prandtl', '(', 'Cp', '=', 'Cpl', ',', 'k', '=', 'kl', ',', 'mu', '=', 'mul', ')', 'hL', '=', 'turbulent_Dittus_Boelter', '(', 'ReL', ',', 'Prl', ')', '*', 'kl', '/', 'D', 'Pr', '=', 'P', '/', 'Pc', 'return', 'hL', '*', '(', '(', '1', '-', 'x', ')', '**', '0.8', '+', '3.8', '*', 'x', '**', '0.76', '*', '(', '1', '-', 'x', ')', '**', '0.04', '/', 'Pr', '**', '0.38', ')'] | r'''Calculates heat transfer coefficient for condensation
of a fluid inside a tube, as presented in [1]_ and again by the same
author in [2]_; also given in [3]_. Requires no properties of the gas.
Uses the Dittus-Boelter correlation for single phase heat transfer
coefficient, with a Reynolds number assuming all the flow is liquid.
.. math::
h_{TP} = h_L\left[(1-x)^{0.8} +\frac{3.8x^{0.76}(1-x)^{0.04}}
{P_r^{0.38}}\right]
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific interval [-]
D : float
Diameter of the channel [m]
rhol : float
Density of the liquid [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
P : float
Pressure of the fluid, [Pa]
Pc : float
Critical pressure of the fluid, [Pa]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ is well written an unambiguous as to how to apply this equation.
Examples
--------
>>> Shah(m=1, x=0.4, D=.3, rhol=800, mul=1E-5, kl=0.6, Cpl=2300, P=1E6, Pc=2E7)
2561.2593415479214
References
----------
.. [1] Shah, M. M. "A General Correlation for Heat Transfer during Film
Condensation inside Pipes." International Journal of Heat and Mass
Transfer 22, no. 4 (April 1, 1979): 547-56.
doi:10.1016/0017-9310(79)90058-9.
.. [2] Shah, M. M., Heat Transfer During Film Condensation in Tubes and
Annuli: A Review of the Literature, ASHRAE Transactions, vol. 87, no.
3, pp. 1086-1100, 1981.
.. [3] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1st.
Wiley-Interscience, 1991. | ['r', 'Calculates', 'heat', 'transfer', 'coefficient', 'for', 'condensation', 'of', 'a', 'fluid', 'inside', 'a', 'tube', 'as', 'presented', 'in', '[', '1', ']', '_', 'and', 'again', 'by', 'the', 'same', 'author', 'in', '[', '2', ']', '_', ';', 'also', 'given', 'in', '[', '3', ']', '_', '.', 'Requires', 'no', 'properties', 'of', 'the', 'gas', '.', 'Uses', 'the', 'Dittus', '-', 'Boelter', 'correlation', 'for', 'single', 'phase', 'heat', 'transfer', 'coefficient', 'with', 'a', 'Reynolds', 'number', 'assuming', 'all', 'the', 'flow', 'is', 'liquid', '.'] | train | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/condensation.py#L362-L425 |
6,892 | bcbio/bcbio-nextgen | bcbio/bam/callable.py | NBlockRegionPicker.expand_block | def expand_block(self, feat):
"""Expand any blocks which are near the start or end of a contig.
"""
chrom_end = self._ref_sizes.get(feat.chrom)
if chrom_end:
if feat.start < self._end_buffer:
feat.start = 0
if feat.stop >= chrom_end - self._end_buffer:
feat.stop = chrom_end
return feat | python | def expand_block(self, feat):
"""Expand any blocks which are near the start or end of a contig.
"""
chrom_end = self._ref_sizes.get(feat.chrom)
if chrom_end:
if feat.start < self._end_buffer:
feat.start = 0
if feat.stop >= chrom_end - self._end_buffer:
feat.stop = chrom_end
return feat | ['def', 'expand_block', '(', 'self', ',', 'feat', ')', ':', 'chrom_end', '=', 'self', '.', '_ref_sizes', '.', 'get', '(', 'feat', '.', 'chrom', ')', 'if', 'chrom_end', ':', 'if', 'feat', '.', 'start', '<', 'self', '.', '_end_buffer', ':', 'feat', '.', 'start', '=', '0', 'if', 'feat', '.', 'stop', '>=', 'chrom_end', '-', 'self', '.', '_end_buffer', ':', 'feat', '.', 'stop', '=', 'chrom_end', 'return', 'feat'] | Expand any blocks which are near the start or end of a contig. | ['Expand', 'any', 'blocks', 'which', 'are', 'near', 'the', 'start', 'or', 'end', 'of', 'a', 'contig', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L171-L180 |
6,893 | jfilter/text-classification-keras | texcla/experiment.py | split_data | def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
"""Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test
"""
assert(sum(ratio) == 1 and len(ratio) == 3)
X_train, X_rest, y_train, y_rest = train_test_split(
X, y, train_size=ratio[0])
X_val, X_test, y_val, y_test = train_test_split(
X_rest, y_rest, train_size=ratio[1])
return X_train, X_val, X_test, y_train, y_val, y_test | python | def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
"""Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test
"""
assert(sum(ratio) == 1 and len(ratio) == 3)
X_train, X_rest, y_train, y_rest = train_test_split(
X, y, train_size=ratio[0])
X_val, X_test, y_val, y_test = train_test_split(
X_rest, y_rest, train_size=ratio[1])
return X_train, X_val, X_test, y_train, y_val, y_test | ['def', 'split_data', '(', 'X', ',', 'y', ',', 'ratio', '=', '(', '0.8', ',', '0.1', ',', '0.1', ')', ')', ':', 'assert', '(', 'sum', '(', 'ratio', ')', '==', '1', 'and', 'len', '(', 'ratio', ')', '==', '3', ')', 'X_train', ',', 'X_rest', ',', 'y_train', ',', 'y_rest', '=', 'train_test_split', '(', 'X', ',', 'y', ',', 'train_size', '=', 'ratio', '[', '0', ']', ')', 'X_val', ',', 'X_test', ',', 'y_val', ',', 'y_test', '=', 'train_test_split', '(', 'X_rest', ',', 'y_rest', ',', 'train_size', '=', 'ratio', '[', '1', ']', ')', 'return', 'X_train', ',', 'X_val', ',', 'X_test', ',', 'y_train', ',', 'y_val', ',', 'y_test'] | Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test | ['Splits', 'data', 'into', 'a', 'training', 'validation', 'and', 'test', 'set', '.'] | train | https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/experiment.py#L148-L164 |
6,894 | manodeep/Corrfunc | setup.py | get_dict_from_buffer | def get_dict_from_buffer(buf, keys=['DISTNAME', 'MAJOR',
'MINOR', 'PATCHLEVEL',
'PYTHON',
'MIN_PYTHON_MAJOR',
'MIN_PYTHON_MINOR',
'MIN_NUMPY_MAJOR',
'MIN_NUMPY_MINOR']):
"""
Parses a string buffer for key-val pairs for the supplied keys.
Returns: Python dictionary with all the keys (all keys in buffer
if None is passed for keys) with the values being a list
corresponding to each key.
Note: Return dict will contain all keys supplied (if not None).
If any key was not found in the buffer, then the value for
that key will be [] such that dict[key] does not produce
a KeyError.
Slightly modified from:
"http://stackoverflow.com/questions/5323703/regex-how-to-"\
"match-sequence-of-key-value-pairs-at-end-of-string
"""
pairs = dict()
if keys is None:
keys = "\S+"
regex = re.compile(r'''
\n # all key-value pairs are on separate lines
\s* # there might be some leading spaces
( # start group to return
(?:{0}\s*) # placeholder for tags to detect '\S+' == all
\s*:*=\s* # optional spaces, optional colon, = , optional spaces
.* # the value
) # end group to return
'''.format(keys), re.VERBOSE)
validate = False
else:
keys = [k.strip() for k in keys]
regex = re.compile(r'''
\n # all key-value pairs are on separate lines
\s* # there might be some leading spaces
( # start group to return
(?:{0}\s*) # placeholder for tags to detect '\S+' == all
\s*:*=\s* # optional spaces, optional colon, = , optional spaces
.* # the value
) # end group to return
'''.format('|'.join(keys)), re.VERBOSE)
validate = True
for k in keys:
pairs[k] = []
matches = regex.findall(buf)
for match in matches:
key, val = match.split('=', 1)
# remove colon and leading/trailing whitespace from key
key = (strip_line(key, ':')).strip()
# remove newline and leading/trailing whitespace from value
val = (strip_line(val)).strip()
if validate and key not in keys:
msg = "regex produced incorrect match. regex pattern = {0} "\
"claims key = [{1}] while original set of search keys "\
"= {2}".format(regex.pattern, key, '|'.join(keys))
raise AssertionError(msg)
pairs.setdefault(key, []).append(val)
return pairs | python | def get_dict_from_buffer(buf, keys=['DISTNAME', 'MAJOR',
'MINOR', 'PATCHLEVEL',
'PYTHON',
'MIN_PYTHON_MAJOR',
'MIN_PYTHON_MINOR',
'MIN_NUMPY_MAJOR',
'MIN_NUMPY_MINOR']):
"""
Parses a string buffer for key-val pairs for the supplied keys.
Returns: Python dictionary with all the keys (all keys in buffer
if None is passed for keys) with the values being a list
corresponding to each key.
Note: Return dict will contain all keys supplied (if not None).
If any key was not found in the buffer, then the value for
that key will be [] such that dict[key] does not produce
a KeyError.
Slightly modified from:
"http://stackoverflow.com/questions/5323703/regex-how-to-"\
"match-sequence-of-key-value-pairs-at-end-of-string
"""
pairs = dict()
if keys is None:
keys = "\S+"
regex = re.compile(r'''
\n # all key-value pairs are on separate lines
\s* # there might be some leading spaces
( # start group to return
(?:{0}\s*) # placeholder for tags to detect '\S+' == all
\s*:*=\s* # optional spaces, optional colon, = , optional spaces
.* # the value
) # end group to return
'''.format(keys), re.VERBOSE)
validate = False
else:
keys = [k.strip() for k in keys]
regex = re.compile(r'''
\n # all key-value pairs are on separate lines
\s* # there might be some leading spaces
( # start group to return
(?:{0}\s*) # placeholder for tags to detect '\S+' == all
\s*:*=\s* # optional spaces, optional colon, = , optional spaces
.* # the value
) # end group to return
'''.format('|'.join(keys)), re.VERBOSE)
validate = True
for k in keys:
pairs[k] = []
matches = regex.findall(buf)
for match in matches:
key, val = match.split('=', 1)
# remove colon and leading/trailing whitespace from key
key = (strip_line(key, ':')).strip()
# remove newline and leading/trailing whitespace from value
val = (strip_line(val)).strip()
if validate and key not in keys:
msg = "regex produced incorrect match. regex pattern = {0} "\
"claims key = [{1}] while original set of search keys "\
"= {2}".format(regex.pattern, key, '|'.join(keys))
raise AssertionError(msg)
pairs.setdefault(key, []).append(val)
return pairs | ['def', 'get_dict_from_buffer', '(', 'buf', ',', 'keys', '=', '[', "'DISTNAME'", ',', "'MAJOR'", ',', "'MINOR'", ',', "'PATCHLEVEL'", ',', "'PYTHON'", ',', "'MIN_PYTHON_MAJOR'", ',', "'MIN_PYTHON_MINOR'", ',', "'MIN_NUMPY_MAJOR'", ',', "'MIN_NUMPY_MINOR'", ']', ')', ':', 'pairs', '=', 'dict', '(', ')', 'if', 'keys', 'is', 'None', ':', 'keys', '=', '"\\S+"', 'regex', '=', 're', '.', 'compile', '(', "r'''\n \\n # all key-value pairs are on separate lines\n \\s* # there might be some leading spaces\n ( # start group to return\n (?:{0}\\s*) # placeholder for tags to detect '\\S+' == all\n \\s*:*=\\s* # optional spaces, optional colon, = , optional spaces\n .* # the value\n ) # end group to return\n '''", '.', 'format', '(', 'keys', ')', ',', 're', '.', 'VERBOSE', ')', 'validate', '=', 'False', 'else', ':', 'keys', '=', '[', 'k', '.', 'strip', '(', ')', 'for', 'k', 'in', 'keys', ']', 'regex', '=', 're', '.', 'compile', '(', "r'''\n \\n # all key-value pairs are on separate lines\n \\s* # there might be some leading spaces\n ( # start group to return\n (?:{0}\\s*) # placeholder for tags to detect '\\S+' == all\n \\s*:*=\\s* # optional spaces, optional colon, = , optional spaces\n .* # the value\n ) # end group to return\n '''", '.', 'format', '(', "'|'", '.', 'join', '(', 'keys', ')', ')', ',', 're', '.', 'VERBOSE', ')', 'validate', '=', 'True', 'for', 'k', 'in', 'keys', ':', 'pairs', '[', 'k', ']', '=', '[', ']', 'matches', '=', 'regex', '.', 'findall', '(', 'buf', ')', 'for', 'match', 'in', 'matches', ':', 'key', ',', 'val', '=', 'match', '.', 'split', '(', "'='", ',', '1', ')', '# remove colon and leading/trailing whitespace from key', 'key', '=', '(', 'strip_line', '(', 'key', ',', "':'", ')', ')', '.', 'strip', '(', ')', '# remove newline and leading/trailing whitespace from value', 'val', '=', '(', 'strip_line', '(', 'val', ')', ')', '.', 'strip', '(', ')', 'if', 'validate', 'and', 'key', 'not', 'in', 'keys', ':', 'msg', '=', '"regex produced incorrect match. regex pattern = {0} "', '"claims key = [{1}] while original set of search keys "', '"= {2}"', '.', 'format', '(', 'regex', '.', 'pattern', ',', 'key', ',', "'|'", '.', 'join', '(', 'keys', ')', ')', 'raise', 'AssertionError', '(', 'msg', ')', 'pairs', '.', 'setdefault', '(', 'key', ',', '[', ']', ')', '.', 'append', '(', 'val', ')', 'return', 'pairs'] | Parses a string buffer for key-val pairs for the supplied keys.
Returns: Python dictionary with all the keys (all keys in buffer
if None is passed for keys) with the values being a list
corresponding to each key.
Note: Return dict will contain all keys supplied (if not None).
If any key was not found in the buffer, then the value for
that key will be [] such that dict[key] does not produce
a KeyError.
Slightly modified from:
"http://stackoverflow.com/questions/5323703/regex-how-to-"\
"match-sequence-of-key-value-pairs-at-end-of-string | ['Parses', 'a', 'string', 'buffer', 'for', 'key', '-', 'val', 'pairs', 'for', 'the', 'supplied', 'keys', '.'] | train | https://github.com/manodeep/Corrfunc/blob/753aa50b93eebfefc76a0b0cd61522536bd45d2a/setup.py#L74-L144 |
6,895 | etal/biofrills | biofrills/stats/chisq.py | _igamc | def _igamc(a, x):
"""Complemented incomplete Gamma integral.
SYNOPSIS:
double a, x, y, igamc();
y = igamc( a, x );
DESCRIPTION:
The function is defined by::
igamc(a,x) = 1 - igam(a,x)
inf.
-
1 | | -t a-1
= ----- | e t dt.
- | |
| (a) -
x
In this implementation both arguments must be positive.
The integral is evaluated by either a power series or
continued fraction expansion, depending on the relative
values of a and x.
"""
# Compute x**a * exp(-x) / Gamma(a)
ax = math.exp(a * math.log(x) - x - math.lgamma(a))
# Continued fraction
y = 1.0 - a
z = x + y + 1.0
c = 0.0
pkm2 = 1.0
qkm2 = x
pkm1 = x + 1.0
qkm1 = z * x
ans = pkm1 / qkm1
while True:
c += 1.0
y += 1.0
z += 2.0
yc = y * c
pk = pkm1 * z - pkm2 * yc
qk = qkm1 * z - qkm2 * yc
if qk != 0:
r = pk/qk
t = abs((ans - r) / r)
ans = r
else:
t = 1.0;
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
if abs(pk) > BIG:
pkm2 *= BIGINV;
pkm1 *= BIGINV;
qkm2 *= BIGINV;
qkm1 *= BIGINV;
if t <= MACHEP:
return ans * ax | python | def _igamc(a, x):
"""Complemented incomplete Gamma integral.
SYNOPSIS:
double a, x, y, igamc();
y = igamc( a, x );
DESCRIPTION:
The function is defined by::
igamc(a,x) = 1 - igam(a,x)
inf.
-
1 | | -t a-1
= ----- | e t dt.
- | |
| (a) -
x
In this implementation both arguments must be positive.
The integral is evaluated by either a power series or
continued fraction expansion, depending on the relative
values of a and x.
"""
# Compute x**a * exp(-x) / Gamma(a)
ax = math.exp(a * math.log(x) - x - math.lgamma(a))
# Continued fraction
y = 1.0 - a
z = x + y + 1.0
c = 0.0
pkm2 = 1.0
qkm2 = x
pkm1 = x + 1.0
qkm1 = z * x
ans = pkm1 / qkm1
while True:
c += 1.0
y += 1.0
z += 2.0
yc = y * c
pk = pkm1 * z - pkm2 * yc
qk = qkm1 * z - qkm2 * yc
if qk != 0:
r = pk/qk
t = abs((ans - r) / r)
ans = r
else:
t = 1.0;
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
if abs(pk) > BIG:
pkm2 *= BIGINV;
pkm1 *= BIGINV;
qkm2 *= BIGINV;
qkm1 *= BIGINV;
if t <= MACHEP:
return ans * ax | ['def', '_igamc', '(', 'a', ',', 'x', ')', ':', '# Compute x**a * exp(-x) / Gamma(a)', 'ax', '=', 'math', '.', 'exp', '(', 'a', '*', 'math', '.', 'log', '(', 'x', ')', '-', 'x', '-', 'math', '.', 'lgamma', '(', 'a', ')', ')', '# Continued fraction', 'y', '=', '1.0', '-', 'a', 'z', '=', 'x', '+', 'y', '+', '1.0', 'c', '=', '0.0', 'pkm2', '=', '1.0', 'qkm2', '=', 'x', 'pkm1', '=', 'x', '+', '1.0', 'qkm1', '=', 'z', '*', 'x', 'ans', '=', 'pkm1', '/', 'qkm1', 'while', 'True', ':', 'c', '+=', '1.0', 'y', '+=', '1.0', 'z', '+=', '2.0', 'yc', '=', 'y', '*', 'c', 'pk', '=', 'pkm1', '*', 'z', '-', 'pkm2', '*', 'yc', 'qk', '=', 'qkm1', '*', 'z', '-', 'qkm2', '*', 'yc', 'if', 'qk', '!=', '0', ':', 'r', '=', 'pk', '/', 'qk', 't', '=', 'abs', '(', '(', 'ans', '-', 'r', ')', '/', 'r', ')', 'ans', '=', 'r', 'else', ':', 't', '=', '1.0', 'pkm2', '=', 'pkm1', 'pkm1', '=', 'pk', 'qkm2', '=', 'qkm1', 'qkm1', '=', 'qk', 'if', 'abs', '(', 'pk', ')', '>', 'BIG', ':', 'pkm2', '*=', 'BIGINV', 'pkm1', '*=', 'BIGINV', 'qkm2', '*=', 'BIGINV', 'qkm1', '*=', 'BIGINV', 'if', 't', '<=', 'MACHEP', ':', 'return', 'ans', '*', 'ax'] | Complemented incomplete Gamma integral.
SYNOPSIS:
double a, x, y, igamc();
y = igamc( a, x );
DESCRIPTION:
The function is defined by::
igamc(a,x) = 1 - igam(a,x)
inf.
-
1 | | -t a-1
= ----- | e t dt.
- | |
| (a) -
x
In this implementation both arguments must be positive.
The integral is evaluated by either a power series or
continued fraction expansion, depending on the relative
values of a and x. | ['Complemented', 'incomplete', 'Gamma', 'integral', '.'] | train | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/stats/chisq.py#L47-L110 |
6,896 | CalebBell/ht | ht/condensation.py | Boyko_Kruzhilin | def Boyko_Kruzhilin(m, rhog, rhol, kl, mul, Cpl, D, x):
r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
h_f = h_{LO}\left[1 + x\left(\frac{\rho_L}{\rho_G} - 1\right)\right]^{0.5}
h_{LO} = 0.021 \frac{k_L}{L} Re_{LO}^{0.8} Pr^{0.43}
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
To calculate overall heat transfer coefficient during condensation,
simply average values at x = 1 and x = 0.
Examples
--------
Page 589 in [2]_, matches exactly.
>>> Boyko_Kruzhilin(m=500*pi/4*.03**2, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
10598.657227479956
References
----------
.. [1] Boyko, L. D., and G. N. Kruzhilin. "Heat Transfer and Hydraulic
Resistance during Condensation of Steam in a Horizontal Tube and in a
Bundle of Tubes." International Journal of Heat and Mass Transfer 10,
no. 3 (March 1, 1967): 361-73. doi:10.1016/0017-9310(67)90152-4.
.. [2] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and
T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994.
'''
Vlo = m/rhol/(pi/4.*D**2)
Relo = rhol*Vlo*D/mul
Prl = mul*Cpl/kl
hlo = 0.021*kl/D*Relo**0.8*Prl**0.43
return hlo*(1. + x*(rhol/rhog - 1.))**0.5 | python | def Boyko_Kruzhilin(m, rhog, rhol, kl, mul, Cpl, D, x):
r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
h_f = h_{LO}\left[1 + x\left(\frac{\rho_L}{\rho_G} - 1\right)\right]^{0.5}
h_{LO} = 0.021 \frac{k_L}{L} Re_{LO}^{0.8} Pr^{0.43}
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
To calculate overall heat transfer coefficient during condensation,
simply average values at x = 1 and x = 0.
Examples
--------
Page 589 in [2]_, matches exactly.
>>> Boyko_Kruzhilin(m=500*pi/4*.03**2, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
10598.657227479956
References
----------
.. [1] Boyko, L. D., and G. N. Kruzhilin. "Heat Transfer and Hydraulic
Resistance during Condensation of Steam in a Horizontal Tube and in a
Bundle of Tubes." International Journal of Heat and Mass Transfer 10,
no. 3 (March 1, 1967): 361-73. doi:10.1016/0017-9310(67)90152-4.
.. [2] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and
T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994.
'''
Vlo = m/rhol/(pi/4.*D**2)
Relo = rhol*Vlo*D/mul
Prl = mul*Cpl/kl
hlo = 0.021*kl/D*Relo**0.8*Prl**0.43
return hlo*(1. + x*(rhol/rhog - 1.))**0.5 | ['def', 'Boyko_Kruzhilin', '(', 'm', ',', 'rhog', ',', 'rhol', ',', 'kl', ',', 'mul', ',', 'Cpl', ',', 'D', ',', 'x', ')', ':', 'Vlo', '=', 'm', '/', 'rhol', '/', '(', 'pi', '/', '4.', '*', 'D', '**', '2', ')', 'Relo', '=', 'rhol', '*', 'Vlo', '*', 'D', '/', 'mul', 'Prl', '=', 'mul', '*', 'Cpl', '/', 'kl', 'hlo', '=', '0.021', '*', 'kl', '/', 'D', '*', 'Relo', '**', '0.8', '*', 'Prl', '**', '0.43', 'return', 'hlo', '*', '(', '1.', '+', 'x', '*', '(', 'rhol', '/', 'rhog', '-', '1.', ')', ')', '**', '0.5'] | r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
h_f = h_{LO}\left[1 + x\left(\frac{\rho_L}{\rho_G} - 1\right)\right]^{0.5}
h_{LO} = 0.021 \frac{k_L}{L} Re_{LO}^{0.8} Pr^{0.43}
Parameters
----------
m : float
Mass flow rate [kg/s]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
D : float
Diameter of the tubing [m]
x : float
Quality at the specific interval [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
To calculate overall heat transfer coefficient during condensation,
simply average values at x = 1 and x = 0.
Examples
--------
Page 589 in [2]_, matches exactly.
>>> Boyko_Kruzhilin(m=500*pi/4*.03**2, rhog=6.36, rhol=582.9, kl=0.098,
... mul=159E-6, Cpl=2520., D=0.03, x=0.85)
10598.657227479956
References
----------
.. [1] Boyko, L. D., and G. N. Kruzhilin. "Heat Transfer and Hydraulic
Resistance during Condensation of Steam in a Horizontal Tube and in a
Bundle of Tubes." International Journal of Heat and Mass Transfer 10,
no. 3 (March 1, 1967): 361-73. doi:10.1016/0017-9310(67)90152-4.
.. [2] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and
T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994. | ['r', 'Calculates', 'heat', 'transfer', 'coefficient', 'for', 'condensation', 'of', 'a', 'pure', 'chemical', 'inside', 'a', 'vertical', 'tube', 'or', 'tube', 'bundle', 'as', 'presented', 'in', '[', '2', ']', '_', 'according', 'to', '[', '1', ']', '_', '.'] | train | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/condensation.py#L93-L153 |
6,897 | sibirrer/lenstronomy | lenstronomy/GalKin/light_profile.py | LightProfile.draw_light_2d_linear | def draw_light_2d_linear(self, kwargs_list, n=1, new_compute=False, r_eff=1.):
"""
constructs the CDF and draws from it random realizations of projected radii R
:param kwargs_list:
:return:
"""
if not hasattr(self, '_light_cdf') or new_compute is True:
r_array = np.linspace(self._min_interpolate, self._max_interpolate, self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum = 0
for i, r in enumerate(r_array):
if i == 0:
cum_sum[i] = 0
else:
sum += self.light_2d(r, kwargs_list) * r
cum_sum[i] = copy.deepcopy(sum)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, r_array)
self._light_cdf = f
cdf_draw = np.random.uniform(0., 1, n)
r_draw = self._light_cdf(cdf_draw)
return r_draw | python | def draw_light_2d_linear(self, kwargs_list, n=1, new_compute=False, r_eff=1.):
"""
constructs the CDF and draws from it random realizations of projected radii R
:param kwargs_list:
:return:
"""
if not hasattr(self, '_light_cdf') or new_compute is True:
r_array = np.linspace(self._min_interpolate, self._max_interpolate, self._interp_grid_num)
cum_sum = np.zeros_like(r_array)
sum = 0
for i, r in enumerate(r_array):
if i == 0:
cum_sum[i] = 0
else:
sum += self.light_2d(r, kwargs_list) * r
cum_sum[i] = copy.deepcopy(sum)
cum_sum_norm = cum_sum/cum_sum[-1]
f = interp1d(cum_sum_norm, r_array)
self._light_cdf = f
cdf_draw = np.random.uniform(0., 1, n)
r_draw = self._light_cdf(cdf_draw)
return r_draw | ['def', 'draw_light_2d_linear', '(', 'self', ',', 'kwargs_list', ',', 'n', '=', '1', ',', 'new_compute', '=', 'False', ',', 'r_eff', '=', '1.', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_light_cdf'", ')', 'or', 'new_compute', 'is', 'True', ':', 'r_array', '=', 'np', '.', 'linspace', '(', 'self', '.', '_min_interpolate', ',', 'self', '.', '_max_interpolate', ',', 'self', '.', '_interp_grid_num', ')', 'cum_sum', '=', 'np', '.', 'zeros_like', '(', 'r_array', ')', 'sum', '=', '0', 'for', 'i', ',', 'r', 'in', 'enumerate', '(', 'r_array', ')', ':', 'if', 'i', '==', '0', ':', 'cum_sum', '[', 'i', ']', '=', '0', 'else', ':', 'sum', '+=', 'self', '.', 'light_2d', '(', 'r', ',', 'kwargs_list', ')', '*', 'r', 'cum_sum', '[', 'i', ']', '=', 'copy', '.', 'deepcopy', '(', 'sum', ')', 'cum_sum_norm', '=', 'cum_sum', '/', 'cum_sum', '[', '-', '1', ']', 'f', '=', 'interp1d', '(', 'cum_sum_norm', ',', 'r_array', ')', 'self', '.', '_light_cdf', '=', 'f', 'cdf_draw', '=', 'np', '.', 'random', '.', 'uniform', '(', '0.', ',', '1', ',', 'n', ')', 'r_draw', '=', 'self', '.', '_light_cdf', '(', 'cdf_draw', ')', 'return', 'r_draw'] | constructs the CDF and draws from it random realizations of projected radii R
:param kwargs_list:
:return: | ['constructs', 'the', 'CDF', 'and', 'draws', 'from', 'it', 'random', 'realizations', 'of', 'projected', 'radii', 'R', ':', 'param', 'kwargs_list', ':', ':', 'return', ':'] | train | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/GalKin/light_profile.py#L61-L82 |
6,898 | mcieslik-mctp/papy | src/papy/core.py | Dagger.connect | def connect(self, datas=None):
"""
Connects ``Pipers`` in the order input -> output. See ``Piper.connect``.
According to the pipes (topology). If "datas" is given will connect the
input ``Pipers`` to the input data see: ``Dagger.connect_inputs``.
Argumensts:
- datas(sequence) [default: ``None``] valid sequence of input data.
see: ``Dagger.connect_inputs``.
"""
# if data connect inputs
if datas:
self.connect_inputs(datas)
# connect the remaining pipers
postorder = self.postorder()
self.log.debug('%s trying to connect in the order %s' % \
(repr(self), repr(postorder)))
for piper in postorder:
if not piper.connected and self[piper].nodes():
# 1. sort inputs by index in postorder
inputs = [p for p in postorder if p in self[piper].nodes()]
# 2. sort postorder so that all parents come before children
# mind that the top of a pipeline is a child!
inputs.sort(cmp=self.children_after_parents)
# 2. branch age sorted inputs
piper.connect(inputs)
self.log.debug('%s succesfuly connected' % repr(self)) | python | def connect(self, datas=None):
"""
Connects ``Pipers`` in the order input -> output. See ``Piper.connect``.
According to the pipes (topology). If "datas" is given will connect the
input ``Pipers`` to the input data see: ``Dagger.connect_inputs``.
Argumensts:
- datas(sequence) [default: ``None``] valid sequence of input data.
see: ``Dagger.connect_inputs``.
"""
# if data connect inputs
if datas:
self.connect_inputs(datas)
# connect the remaining pipers
postorder = self.postorder()
self.log.debug('%s trying to connect in the order %s' % \
(repr(self), repr(postorder)))
for piper in postorder:
if not piper.connected and self[piper].nodes():
# 1. sort inputs by index in postorder
inputs = [p for p in postorder if p in self[piper].nodes()]
# 2. sort postorder so that all parents come before children
# mind that the top of a pipeline is a child!
inputs.sort(cmp=self.children_after_parents)
# 2. branch age sorted inputs
piper.connect(inputs)
self.log.debug('%s succesfuly connected' % repr(self)) | ['def', 'connect', '(', 'self', ',', 'datas', '=', 'None', ')', ':', '# if data connect inputs', 'if', 'datas', ':', 'self', '.', 'connect_inputs', '(', 'datas', ')', '# connect the remaining pipers', 'postorder', '=', 'self', '.', 'postorder', '(', ')', 'self', '.', 'log', '.', 'debug', '(', "'%s trying to connect in the order %s'", '%', '(', 'repr', '(', 'self', ')', ',', 'repr', '(', 'postorder', ')', ')', ')', 'for', 'piper', 'in', 'postorder', ':', 'if', 'not', 'piper', '.', 'connected', 'and', 'self', '[', 'piper', ']', '.', 'nodes', '(', ')', ':', '# 1. sort inputs by index in postorder', 'inputs', '=', '[', 'p', 'for', 'p', 'in', 'postorder', 'if', 'p', 'in', 'self', '[', 'piper', ']', '.', 'nodes', '(', ')', ']', '# 2. sort postorder so that all parents come before children', '# mind that the top of a pipeline is a child!', 'inputs', '.', 'sort', '(', 'cmp', '=', 'self', '.', 'children_after_parents', ')', '# 2. branch age sorted inputs', 'piper', '.', 'connect', '(', 'inputs', ')', 'self', '.', 'log', '.', 'debug', '(', "'%s succesfuly connected'", '%', 'repr', '(', 'self', ')', ')'] | Connects ``Pipers`` in the order input -> output. See ``Piper.connect``.
According to the pipes (topology). If "datas" is given will connect the
input ``Pipers`` to the input data see: ``Dagger.connect_inputs``.
Argumensts:
- datas(sequence) [default: ``None``] valid sequence of input data.
see: ``Dagger.connect_inputs``. | ['Connects', 'Pipers', 'in', 'the', 'order', 'input', '-', '>', 'output', '.', 'See', 'Piper', '.', 'connect', '.', 'According', 'to', 'the', 'pipes', '(', 'topology', ')', '.', 'If', 'datas', 'is', 'given', 'will', 'connect', 'the', 'input', 'Pipers', 'to', 'the', 'input', 'data', 'see', ':', 'Dagger', '.', 'connect_inputs', '.', 'Argumensts', ':', '-', 'datas', '(', 'sequence', ')', '[', 'default', ':', 'None', ']', 'valid', 'sequence', 'of', 'input', 'data', '.', 'see', ':', 'Dagger', '.', 'connect_inputs', '.'] | train | https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L158-L186 |
6,899 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender._get_data_schema | def _get_data_schema(self):
"""
Returns a dictionary of (column : type) for the data used in the
model.
"""
if not hasattr(self, "_data_schema"):
response = self.__proxy__.get_data_schema()
self._data_schema = {k : _turicreate._cython.cy_flexible_type.pytype_from_type_name(v)
for k, v in response["schema"].items()}
return self._data_schema | python | def _get_data_schema(self):
"""
Returns a dictionary of (column : type) for the data used in the
model.
"""
if not hasattr(self, "_data_schema"):
response = self.__proxy__.get_data_schema()
self._data_schema = {k : _turicreate._cython.cy_flexible_type.pytype_from_type_name(v)
for k, v in response["schema"].items()}
return self._data_schema | ['def', '_get_data_schema', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', '"_data_schema"', ')', ':', 'response', '=', 'self', '.', '__proxy__', '.', 'get_data_schema', '(', ')', 'self', '.', '_data_schema', '=', '{', 'k', ':', '_turicreate', '.', '_cython', '.', 'cy_flexible_type', '.', 'pytype_from_type_name', '(', 'v', ')', 'for', 'k', ',', 'v', 'in', 'response', '[', '"schema"', ']', '.', 'items', '(', ')', '}', 'return', 'self', '.', '_data_schema'] | Returns a dictionary of (column : type) for the data used in the
model. | ['Returns', 'a', 'dictionary', 'of', '(', 'column', ':', 'type', ')', 'for', 'the', 'data', 'used', 'in', 'the', 'model', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L845-L857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.