code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def nsx_controller_activate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(nsx_controller, "name") name_key.text = kwargs.pop('name') activate = ET.SubElement(nsx_controller, "activate") callback = kwargs.pop('callback', self._callback) return callback(config)
def function[nsx_controller_activate, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[nsx_controller] assign[=] call[name[ET].SubElement, parameter[name[config], constant[nsx-controller]]] variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[nsx_controller], constant[name]]] name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]] variable[activate] assign[=] call[name[ET].SubElement, parameter[name[nsx_controller], constant[activate]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[nsx_controller_activate] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[nsx_controller] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[nsx_controller] , literal[string] ) identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[activate] = identifier[ET] . identifier[SubElement] ( identifier[nsx_controller] , literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def nsx_controller_activate(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') nsx_controller = ET.SubElement(config, 'nsx-controller', xmlns='urn:brocade.com:mgmt:brocade-tunnels') name_key = ET.SubElement(nsx_controller, 'name') name_key.text = kwargs.pop('name') activate = ET.SubElement(nsx_controller, 'activate') callback = kwargs.pop('callback', self._callback) return callback(config)
def is_valid_ip(ip_address): """ Check Validity of an IP address """ valid = True try: socket.inet_aton(ip_address.strip()) except: valid = False return valid
def function[is_valid_ip, parameter[ip_address]]: constant[ Check Validity of an IP address ] variable[valid] assign[=] constant[True] <ast.Try object at 0x7da1b1080d60> return[name[valid]]
keyword[def] identifier[is_valid_ip] ( identifier[ip_address] ): literal[string] identifier[valid] = keyword[True] keyword[try] : identifier[socket] . identifier[inet_aton] ( identifier[ip_address] . identifier[strip] ()) keyword[except] : identifier[valid] = keyword[False] keyword[return] identifier[valid]
def is_valid_ip(ip_address): """ Check Validity of an IP address """ valid = True try: socket.inet_aton(ip_address.strip()) # depends on [control=['try'], data=[]] except: valid = False # depends on [control=['except'], data=[]] return valid
def join(self, timeout=None): """Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ self.all_tasks_done.acquire() try: while self.unfinished_tasks: self.all_tasks_done.wait(timeout) finally: self.all_tasks_done.release()
def function[join, parameter[self, timeout]]: constant[Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. ] call[name[self].all_tasks_done.acquire, parameter[]] <ast.Try object at 0x7da207f98a90>
keyword[def] identifier[join] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] identifier[self] . identifier[all_tasks_done] . identifier[acquire] () keyword[try] : keyword[while] identifier[self] . identifier[unfinished_tasks] : identifier[self] . identifier[all_tasks_done] . identifier[wait] ( identifier[timeout] ) keyword[finally] : identifier[self] . identifier[all_tasks_done] . identifier[release] ()
def join(self, timeout=None): """Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ self.all_tasks_done.acquire() try: while self.unfinished_tasks: self.all_tasks_done.wait(timeout) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] finally: self.all_tasks_done.release()
def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None, dvd_episode=None, imdb_id=None, page=1): """Query series episodes""" # perform the request params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode, 'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page} r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params) if r.status_code == 404: return None r.raise_for_status() return r.json()
def function[query_series_episodes, parameter[self, id, absolute_number, aired_season, aired_episode, dvd_season, dvd_episode, imdb_id, page]]: constant[Query series episodes] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1eb5e40>, <ast.Constant object at 0x7da1b1eb4070>, <ast.Constant object at 0x7da1b1eb4040>, <ast.Constant object at 0x7da1b1eb71c0>, <ast.Constant object at 0x7da1b1eb4760>, <ast.Constant object at 0x7da1b1eb7160>, <ast.Constant object at 0x7da1b1eb61d0>], [<ast.Name object at 0x7da1b1eb60e0>, <ast.Name object at 0x7da1b1eb48e0>, <ast.Name object at 0x7da1b1eb4850>, <ast.Name object at 0x7da1b1eb48b0>, <ast.Name object at 0x7da1b1eb6650>, <ast.Name object at 0x7da1b1eb61a0>, <ast.Name object at 0x7da1b1eb6170>]] variable[r] assign[=] call[name[self].session.get, parameter[binary_operation[name[self].base_url + call[constant[/series/{}/episodes/query].format, parameter[name[id]]]]]] if compare[name[r].status_code equal[==] constant[404]] begin[:] return[constant[None]] call[name[r].raise_for_status, parameter[]] return[call[name[r].json, parameter[]]]
keyword[def] identifier[query_series_episodes] ( identifier[self] , identifier[id] , identifier[absolute_number] = keyword[None] , identifier[aired_season] = keyword[None] , identifier[aired_episode] = keyword[None] , identifier[dvd_season] = keyword[None] , identifier[dvd_episode] = keyword[None] , identifier[imdb_id] = keyword[None] , identifier[page] = literal[int] ): literal[string] identifier[params] ={ literal[string] : identifier[absolute_number] , literal[string] : identifier[aired_season] , literal[string] : identifier[aired_episode] , literal[string] : identifier[dvd_season] , literal[string] : identifier[dvd_episode] , literal[string] : identifier[imdb_id] , literal[string] : identifier[page] } identifier[r] = identifier[self] . identifier[session] . identifier[get] ( identifier[self] . identifier[base_url] + literal[string] . identifier[format] ( identifier[id] ), identifier[params] = identifier[params] ) keyword[if] identifier[r] . identifier[status_code] == literal[int] : keyword[return] keyword[None] identifier[r] . identifier[raise_for_status] () keyword[return] identifier[r] . identifier[json] ()
def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None, dvd_episode=None, imdb_id=None, page=1): """Query series episodes""" # perform the request params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode, 'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page} r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params) if r.status_code == 404: return None # depends on [control=['if'], data=[]] r.raise_for_status() return r.json()
def predict(self, quadruplets): """Predicts the ordering between sample distances in input quadruplets. For each quadruplet, returns 1 if the quadruplet is in the right order ( first pair is more similar than second pair), and -1 if not. Parameters ---------- quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or (n_quadruplets, 4) 3D Array of quadruplets to predict, with each row corresponding to four points, or 2D array of indices of quadruplets if the metric learner uses a preprocessor. Returns ------- prediction : `numpy.ndarray` of floats, shape=(n_constraints,) Predictions of the ordering of pairs, for each quadruplet. """ check_is_fitted(self, 'transformer_') quadruplets = check_input(quadruplets, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return np.sign(self.decision_function(quadruplets))
def function[predict, parameter[self, quadruplets]]: constant[Predicts the ordering between sample distances in input quadruplets. For each quadruplet, returns 1 if the quadruplet is in the right order ( first pair is more similar than second pair), and -1 if not. Parameters ---------- quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or (n_quadruplets, 4) 3D Array of quadruplets to predict, with each row corresponding to four points, or 2D array of indices of quadruplets if the metric learner uses a preprocessor. Returns ------- prediction : `numpy.ndarray` of floats, shape=(n_constraints,) Predictions of the ordering of pairs, for each quadruplet. ] call[name[check_is_fitted], parameter[name[self], constant[transformer_]]] variable[quadruplets] assign[=] call[name[check_input], parameter[name[quadruplets]]] return[call[name[np].sign, parameter[call[name[self].decision_function, parameter[name[quadruplets]]]]]]
keyword[def] identifier[predict] ( identifier[self] , identifier[quadruplets] ): literal[string] identifier[check_is_fitted] ( identifier[self] , literal[string] ) identifier[quadruplets] = identifier[check_input] ( identifier[quadruplets] , identifier[type_of_inputs] = literal[string] , identifier[preprocessor] = identifier[self] . identifier[preprocessor_] , identifier[estimator] = identifier[self] , identifier[tuple_size] = identifier[self] . identifier[_tuple_size] ) keyword[return] identifier[np] . identifier[sign] ( identifier[self] . identifier[decision_function] ( identifier[quadruplets] ))
def predict(self, quadruplets): """Predicts the ordering between sample distances in input quadruplets. For each quadruplet, returns 1 if the quadruplet is in the right order ( first pair is more similar than second pair), and -1 if not. Parameters ---------- quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or (n_quadruplets, 4) 3D Array of quadruplets to predict, with each row corresponding to four points, or 2D array of indices of quadruplets if the metric learner uses a preprocessor. Returns ------- prediction : `numpy.ndarray` of floats, shape=(n_constraints,) Predictions of the ordering of pairs, for each quadruplet. """ check_is_fitted(self, 'transformer_') quadruplets = check_input(quadruplets, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return np.sign(self.decision_function(quadruplets))
def start(**kwargs): ''' Start KodeDrive daemon. ''' output, err = cli_syncthing_adapter.start(**kwargs) click.echo("%s" % output, err=err)
def function[start, parameter[]]: constant[ Start KodeDrive daemon. ] <ast.Tuple object at 0x7da1b209e7a0> assign[=] call[name[cli_syncthing_adapter].start, parameter[]] call[name[click].echo, parameter[binary_operation[constant[%s] <ast.Mod object at 0x7da2590d6920> name[output]]]]
keyword[def] identifier[start] (** identifier[kwargs] ): literal[string] identifier[output] , identifier[err] = identifier[cli_syncthing_adapter] . identifier[start] (** identifier[kwargs] ) identifier[click] . identifier[echo] ( literal[string] % identifier[output] , identifier[err] = identifier[err] )
def start(**kwargs): """ Start KodeDrive daemon. """ (output, err) = cli_syncthing_adapter.start(**kwargs) click.echo('%s' % output, err=err)
def run(self, inputs, **kwargs): """Run model inference and return the result Parameters ---------- inputs : numpy array input to run a layer on Returns ------- params : numpy array result obtained after running the inference on mxnet """ input_data = np.asarray(inputs[0], dtype='f') # create module, passing cpu context if self.device == 'CPU': ctx = mx.cpu() else: raise NotImplementedError("Only CPU context is supported for now") mod = mx.mod.Module(symbol=self.symbol, data_names=['input_0'], context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=[('input_0', input_data.shape)], label_shapes=None) mod.set_params(arg_params=self.params, aux_params=None) # run inference batch = namedtuple('Batch', ['data']) mod.forward(batch([mx.nd.array(input_data)])) result = mod.get_outputs()[0].asnumpy() return [result]
def function[run, parameter[self, inputs]]: constant[Run model inference and return the result Parameters ---------- inputs : numpy array input to run a layer on Returns ------- params : numpy array result obtained after running the inference on mxnet ] variable[input_data] assign[=] call[name[np].asarray, parameter[call[name[inputs]][constant[0]]]] if compare[name[self].device equal[==] constant[CPU]] begin[:] variable[ctx] assign[=] call[name[mx].cpu, parameter[]] variable[mod] assign[=] call[name[mx].mod.Module, parameter[]] call[name[mod].bind, parameter[]] call[name[mod].set_params, parameter[]] variable[batch] assign[=] call[name[namedtuple], parameter[constant[Batch], list[[<ast.Constant object at 0x7da1b26ae1a0>]]]] call[name[mod].forward, parameter[call[name[batch], parameter[list[[<ast.Call object at 0x7da1b26acb80>]]]]]] variable[result] assign[=] call[call[call[name[mod].get_outputs, parameter[]]][constant[0]].asnumpy, parameter[]] return[list[[<ast.Name object at 0x7da1b26ae440>]]]
keyword[def] identifier[run] ( identifier[self] , identifier[inputs] ,** identifier[kwargs] ): literal[string] identifier[input_data] = identifier[np] . identifier[asarray] ( identifier[inputs] [ literal[int] ], identifier[dtype] = literal[string] ) keyword[if] identifier[self] . identifier[device] == literal[string] : identifier[ctx] = identifier[mx] . identifier[cpu] () keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) identifier[mod] = identifier[mx] . identifier[mod] . identifier[Module] ( identifier[symbol] = identifier[self] . identifier[symbol] , identifier[data_names] =[ literal[string] ], identifier[context] = identifier[ctx] , identifier[label_names] = keyword[None] ) identifier[mod] . identifier[bind] ( identifier[for_training] = keyword[False] , identifier[data_shapes] =[( literal[string] , identifier[input_data] . identifier[shape] )], identifier[label_shapes] = keyword[None] ) identifier[mod] . identifier[set_params] ( identifier[arg_params] = identifier[self] . identifier[params] , identifier[aux_params] = keyword[None] ) identifier[batch] = identifier[namedtuple] ( literal[string] ,[ literal[string] ]) identifier[mod] . identifier[forward] ( identifier[batch] ([ identifier[mx] . identifier[nd] . identifier[array] ( identifier[input_data] )])) identifier[result] = identifier[mod] . identifier[get_outputs] ()[ literal[int] ]. identifier[asnumpy] () keyword[return] [ identifier[result] ]
def run(self, inputs, **kwargs): """Run model inference and return the result Parameters ---------- inputs : numpy array input to run a layer on Returns ------- params : numpy array result obtained after running the inference on mxnet """ input_data = np.asarray(inputs[0], dtype='f') # create module, passing cpu context if self.device == 'CPU': ctx = mx.cpu() # depends on [control=['if'], data=[]] else: raise NotImplementedError('Only CPU context is supported for now') mod = mx.mod.Module(symbol=self.symbol, data_names=['input_0'], context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=[('input_0', input_data.shape)], label_shapes=None) mod.set_params(arg_params=self.params, aux_params=None) # run inference batch = namedtuple('Batch', ['data']) mod.forward(batch([mx.nd.array(input_data)])) result = mod.get_outputs()[0].asnumpy() return [result]
def apply_rectwv_coeff(reduced_image, rectwv_coeff, args_resampling=2, args_ignore_dtu_configuration=True, debugplot=0): """Compute rectification and wavelength calibration coefficients. Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. args_resampling : int 1: nearest neighbour, 2: flux preserving interpolation. args_ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_image : HDUList object Rectified and wavelength calibrated image. """ logger = logging.getLogger(__name__) # header and data array (use deepcopy to avoid modifying # reduced_image[0].header as a side effect) header = copy.deepcopy(reduced_image[0].header) image2d = reduced_image[0].data # apply global offsets image2d = apply_integer_offsets( image2d=image2d, offx=rectwv_coeff.global_integer_offset_x_pix, offy=rectwv_coeff.global_integer_offset_y_pix ) # check grism and filter filter_name = header['filter'] logger.info('Filter: ' + filter_name) if filter_name != rectwv_coeff.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.info('Grism: ' + grism_name) if grism_name != rectwv_coeff.tags['grism']: raise ValueError('Grism name does not match!') # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from RectWaveCoeff object dtu_conf_calib = DtuConfiguration.define_from_dictionary( rectwv_coeff.meta_info['dtu_configuration'] ) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if args_ignore_dtu_configuration: logger.warning('DTU configuration differences found!') else: logger.warning('DTU configuration from image header:') logger.warning(dtu_conf) logger.warning('DTU configuration from master calibration:') logger.warning(dtu_conf_calib) raise ValueError("DTU configurations do not match!") else: logger.info('DTU configuration match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in rectwv_coeff.missing_slitlets: list_valid_islitlets.remove(idel) logger.debug('Valid slitlet numbers:\n' + str(list_valid_islitlets)) # --- # relevant wavelength calibration parameters for rectified and wavelength # calibrated image wv_parameters = set_wv_parameters(filter_name, grism_name) crpix1_enlarged = wv_parameters['crpix1_enlarged'] crval1_enlarged = wv_parameters['crval1_enlarged'] cdelt1_enlarged = wv_parameters['cdelt1_enlarged'] naxis1_enlarged = wv_parameters['naxis1_enlarged'] # initialize rectified and wavelength calibrated image naxis2_enlarged = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED image2d_rectwv = np.zeros((naxis2_enlarged, naxis1_enlarged), dtype='float32') # main loop logger.info('Applying rectification and wavelength calibration') logger.info('RectWaveCoeff uuid={}'.format(rectwv_coeff.uuid)) cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet in list_valid_islitlets: # define Slitlet2D object slt = Slitlet2D(islitlet=islitlet, rectwv_coeff=rectwv_coeff, debugplot=debugplot) # extract (distorted) slitlet from the initial image slitlet2d = slt.extract_slitlet2d(image2d) # rectify slitlet slitlet2d_rect = slt.rectify(slitlet2d, resampling=args_resampling) # wavelength calibration of the rectifed slitlet slitlet2d_rect_wv = resample_image2d_flux( image2d_orig=slitlet2d_rect, naxis1=naxis1_enlarged, cdelt1=cdelt1_enlarged, crval1=crval1_enlarged, crpix1=crpix1_enlarged, coeff=slt.wpoly ) # minimum and maximum useful row in the full 2d rectified image # (starting from 0) i1 = slt.iminslt - 1 i2 = slt.imaxslt # minimum and maximum scan in the rectified slitlet # (in pixels, from 1 to NAXIS2) ii1 = slt.min_row_rectified ii2 = slt.max_row_rectified + 1 # save rectified slitlet in its corresponding location within # the full 2d rectified image image2d_rectwv[i1:i2, :] = slitlet2d_rect_wv[ii1:ii2, :] # include scan range in FITS header header['imnslt' + str(islitlet).zfill(2)] = \ slt.iminslt, 'minimum Y pixel of useful slitlet region' header['imxslt' + str(islitlet).zfill(2)] = \ slt.imaxslt, 'maximum Y pixel of useful slitlet region' # determine useful channel region in each spectrum and include # that information in FITS header jminslt = [] jmaxslt = [] for idum in range(ii1, ii2 + 1): jminmax = find_pix_borders( slitlet2d_rect_wv[idum, :], sought_value=0 ) if jminmax != (-1, naxis1_enlarged): jminslt.append(jminmax[0]) jmaxslt.append(jminmax[1]) if len(jminslt) > 0: slt.jminslt = min(jminslt) + 1 slt.jmaxslt = max(jmaxslt) + 1 header['jmnslt' + str(islitlet).zfill(2)] = \ slt.jminslt, 'minimum X pixel of useful slitlet region' header['jmxslt' + str(islitlet).zfill(2)] = \ slt.jmaxslt, 'maximum X pixel of useful slitlet region' cout += '.' else: # include scan and channel range in FITS header header['imnslt' + str(islitlet).zfill(2)] = \ 0, 'minimum Y pixel of useful slitlet region' header['imxslt' + str(islitlet).zfill(2)] = \ 0, 'maximum Y pixel of useful slitlet region' header['jmnslt' + str(islitlet).zfill(2)] = \ 0, 'minimum X pixel of useful slitlet region' header['jmxslt' + str(islitlet).zfill(2)] = \ 0, 'maximum X pixel of useful slitlet region' cout += 'i' if islitlet % 10 == 0: if cout != 'i': cout = str(islitlet // 10) logger.info(cout) # update wavelength calibration in FITS header logger.info('Updating image header') for keyword in ['crval1', 'crpix1', 'crval2', 'crpix2']: if keyword in header: header.remove(keyword) header['crpix1'] = (crpix1_enlarged, 'reference pixel') header['crval1'] = (crval1_enlarged, 'central wavelength at crpix1') header['cdelt1'] = (cdelt1_enlarged, 'linear dispersion (Angstrom/pixel)') header['cunit1'] = ('Angstrom', 'units along axis1') header['ctype1'] = 'WAVELENGTH' header['crpix2'] = (0.0, 'reference pixel') header['crval2'] = (0.0, 'central value at crpix2') header['cdelt2'] = (1.0, 'increment') header['ctype2'] = 'PIXEL' header['cunit2'] = ('Pixel', 'units along axis2') for keyword in ['cd1_1', 'cd1_2', 'cd2_1', 'cd2_2', 'PCD1_1', 'PCD1_2', 'PCD2_1', 'PCD2_2', 'PCRPIX1', 'PCRPIX2']: if keyword in header: header.remove(keyword) # update history in FITS header header['history'] = 'Boundary parameters uuid:' + \ rectwv_coeff.meta_info['origin']['bound_param'][4:] if 'master_rectwv' in rectwv_coeff.meta_info['origin']: header['history'] = \ 'MasterRectWave uuid:' + \ rectwv_coeff.meta_info['origin']['master_rectwv'][4:] header['history'] = 'RectWaveCoeff uuid:' + rectwv_coeff.uuid header['history'] = 'Rectification and wavelength calibration time ' \ + datetime.now().isoformat() logger.info('Generating rectified and wavelength calibrated image') rectwv_image = fits.PrimaryHDU(data=image2d_rectwv, header=header) return fits.HDUList([rectwv_image])
def function[apply_rectwv_coeff, parameter[reduced_image, rectwv_coeff, args_resampling, args_ignore_dtu_configuration, debugplot]]: constant[Compute rectification and wavelength calibration coefficients. Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. args_resampling : int 1: nearest neighbour, 2: flux preserving interpolation. args_ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_image : HDUList object Rectified and wavelength calibrated image. ] variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]] variable[header] assign[=] call[name[copy].deepcopy, parameter[call[name[reduced_image]][constant[0]].header]] variable[image2d] assign[=] call[name[reduced_image]][constant[0]].data variable[image2d] assign[=] call[name[apply_integer_offsets], parameter[]] variable[filter_name] assign[=] call[name[header]][constant[filter]] call[name[logger].info, parameter[binary_operation[constant[Filter: ] + name[filter_name]]]] if compare[name[filter_name] not_equal[!=] call[name[rectwv_coeff].tags][constant[filter]]] begin[:] <ast.Raise object at 0x7da18dc981c0> variable[grism_name] assign[=] call[name[header]][constant[grism]] call[name[logger].info, parameter[binary_operation[constant[Grism: ] + name[grism_name]]]] if compare[name[grism_name] not_equal[!=] call[name[rectwv_coeff].tags][constant[grism]]] begin[:] <ast.Raise object at 0x7da18dc9add0> variable[dtu_conf] assign[=] call[name[DtuConfiguration].define_from_header, parameter[name[header]]] variable[dtu_conf_calib] assign[=] call[name[DtuConfiguration].define_from_dictionary, parameter[call[name[rectwv_coeff].meta_info][constant[dtu_configuration]]]] if compare[name[dtu_conf] not_equal[!=] name[dtu_conf_calib]] begin[:] if name[args_ignore_dtu_configuration] begin[:] call[name[logger].warning, parameter[constant[DTU configuration differences found!]]] variable[list_valid_islitlets] assign[=] call[name[list], parameter[call[name[range], parameter[constant[1], binary_operation[name[EMIR_NBARS] + constant[1]]]]]] for taget[name[idel]] in starred[name[rectwv_coeff].missing_slitlets] begin[:] call[name[list_valid_islitlets].remove, parameter[name[idel]]] call[name[logger].debug, parameter[binary_operation[constant[Valid slitlet numbers: ] + call[name[str], parameter[name[list_valid_islitlets]]]]]] variable[wv_parameters] assign[=] call[name[set_wv_parameters], parameter[name[filter_name], name[grism_name]]] variable[crpix1_enlarged] assign[=] call[name[wv_parameters]][constant[crpix1_enlarged]] variable[crval1_enlarged] assign[=] call[name[wv_parameters]][constant[crval1_enlarged]] variable[cdelt1_enlarged] assign[=] call[name[wv_parameters]][constant[cdelt1_enlarged]] variable[naxis1_enlarged] assign[=] call[name[wv_parameters]][constant[naxis1_enlarged]] variable[naxis2_enlarged] assign[=] binary_operation[name[EMIR_NBARS] * name[EMIR_NPIXPERSLIT_RECTIFIED]] variable[image2d_rectwv] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da18fe92140>, <ast.Name object at 0x7da18fe90dc0>]]]] call[name[logger].info, parameter[constant[Applying rectification and wavelength calibration]]] call[name[logger].info, parameter[call[constant[RectWaveCoeff uuid={}].format, parameter[name[rectwv_coeff].uuid]]]] variable[cout] assign[=] constant[0] for taget[name[islitlet]] in starred[call[name[range], parameter[constant[1], binary_operation[name[EMIR_NBARS] + constant[1]]]]] begin[:] if compare[name[islitlet] in name[list_valid_islitlets]] begin[:] variable[slt] assign[=] call[name[Slitlet2D], parameter[]] variable[slitlet2d] assign[=] call[name[slt].extract_slitlet2d, parameter[name[image2d]]] variable[slitlet2d_rect] assign[=] call[name[slt].rectify, parameter[name[slitlet2d]]] variable[slitlet2d_rect_wv] assign[=] call[name[resample_image2d_flux], parameter[]] variable[i1] assign[=] binary_operation[name[slt].iminslt - constant[1]] variable[i2] assign[=] name[slt].imaxslt variable[ii1] assign[=] name[slt].min_row_rectified variable[ii2] assign[=] binary_operation[name[slt].max_row_rectified + constant[1]] call[name[image2d_rectwv]][tuple[[<ast.Slice object at 0x7da18dc99210>, <ast.Slice object at 0x7da18dc9a110>]]] assign[=] call[name[slitlet2d_rect_wv]][tuple[[<ast.Slice object at 0x7da18dc98190>, <ast.Slice object at 0x7da18dc995a0>]]] call[name[header]][binary_operation[constant[imnslt] + call[call[name[str], parameter[name[islitlet]]].zfill, parameter[constant[2]]]]] assign[=] tuple[[<ast.Attribute object at 0x7da18dc98a90>, <ast.Constant object at 0x7da18dc985e0>]] call[name[header]][binary_operation[constant[imxslt] + call[call[name[str], parameter[name[islitlet]]].zfill, parameter[constant[2]]]]] assign[=] tuple[[<ast.Attribute object at 0x7da18f09c460>, <ast.Constant object at 0x7da18f09d660>]] variable[jminslt] assign[=] list[[]] variable[jmaxslt] assign[=] list[[]] for taget[name[idum]] in starred[call[name[range], parameter[name[ii1], binary_operation[name[ii2] + constant[1]]]]] begin[:] variable[jminmax] assign[=] call[name[find_pix_borders], parameter[call[name[slitlet2d_rect_wv]][tuple[[<ast.Name object at 0x7da18f09fa60>, <ast.Slice object at 0x7da18f09ca00>]]]]] if compare[name[jminmax] not_equal[!=] tuple[[<ast.UnaryOp object at 0x7da18f09c580>, <ast.Name object at 0x7da18f09db70>]]] begin[:] call[name[jminslt].append, parameter[call[name[jminmax]][constant[0]]]] call[name[jmaxslt].append, parameter[call[name[jminmax]][constant[1]]]] if compare[call[name[len], parameter[name[jminslt]]] greater[>] constant[0]] begin[:] name[slt].jminslt assign[=] binary_operation[call[name[min], parameter[name[jminslt]]] + constant[1]] name[slt].jmaxslt assign[=] binary_operation[call[name[max], parameter[name[jmaxslt]]] + constant[1]] call[name[header]][binary_operation[constant[jmnslt] + call[call[name[str], parameter[name[islitlet]]].zfill, parameter[constant[2]]]]] assign[=] tuple[[<ast.Attribute object at 0x7da18f09dc90>, <ast.Constant object at 0x7da18f09cfa0>]] call[name[header]][binary_operation[constant[jmxslt] + call[call[name[str], parameter[name[islitlet]]].zfill, parameter[constant[2]]]]] assign[=] tuple[[<ast.Attribute object at 0x7da18f09f490>, <ast.Constant object at 0x7da18f09e830>]] <ast.AugAssign object at 0x7da18f09d120> if compare[binary_operation[name[islitlet] <ast.Mod object at 0x7da2590d6920> constant[10]] equal[==] constant[0]] begin[:] if compare[name[cout] not_equal[!=] constant[i]] begin[:] variable[cout] assign[=] call[name[str], parameter[binary_operation[name[islitlet] <ast.FloorDiv object at 0x7da2590d6bc0> constant[10]]]] call[name[logger].info, parameter[name[cout]]] call[name[logger].info, parameter[constant[Updating image header]]] for taget[name[keyword]] in starred[list[[<ast.Constant object at 0x7da18f09cd30>, <ast.Constant object at 0x7da18f09d3f0>, <ast.Constant object at 0x7da18f09cbe0>, <ast.Constant object at 0x7da18f09de40>]]] begin[:] if compare[name[keyword] in name[header]] begin[:] call[name[header].remove, parameter[name[keyword]]] call[name[header]][constant[crpix1]] assign[=] tuple[[<ast.Name object at 0x7da18f09da20>, <ast.Constant object at 0x7da18f09f310>]] call[name[header]][constant[crval1]] assign[=] tuple[[<ast.Name object at 0x7da18f09d4e0>, <ast.Constant object at 0x7da18f09c5e0>]] call[name[header]][constant[cdelt1]] assign[=] tuple[[<ast.Name object at 0x7da18f09cb20>, <ast.Constant object at 0x7da18f09e590>]] call[name[header]][constant[cunit1]] assign[=] tuple[[<ast.Constant object at 0x7da18f09d990>, <ast.Constant object at 0x7da18f09e6b0>]] call[name[header]][constant[ctype1]] assign[=] constant[WAVELENGTH] call[name[header]][constant[crpix2]] assign[=] tuple[[<ast.Constant object at 0x7da18f09cd90>, <ast.Constant object at 0x7da18f09ed40>]] call[name[header]][constant[crval2]] assign[=] tuple[[<ast.Constant object at 0x7da18f09e3b0>, <ast.Constant object at 0x7da18f09d6f0>]] call[name[header]][constant[cdelt2]] assign[=] tuple[[<ast.Constant object at 0x7da18f09e320>, <ast.Constant object at 0x7da18f09c910>]] call[name[header]][constant[ctype2]] assign[=] constant[PIXEL] call[name[header]][constant[cunit2]] assign[=] tuple[[<ast.Constant object at 0x7da18f09f340>, <ast.Constant object at 0x7da18f09e080>]] for taget[name[keyword]] in starred[list[[<ast.Constant object at 0x7da18f09e1d0>, <ast.Constant object at 0x7da18f09eda0>, <ast.Constant object at 0x7da18f09dae0>, <ast.Constant object at 0x7da18f09f010>, <ast.Constant object at 0x7da18f09f6d0>, <ast.Constant object at 0x7da18f09fa30>, <ast.Constant object at 0x7da18f09f7f0>, <ast.Constant object at 0x7da18f09e200>, <ast.Constant object at 0x7da18f09f580>, <ast.Constant object at 0x7da18f09e770>]]] begin[:] if compare[name[keyword] in name[header]] begin[:] call[name[header].remove, parameter[name[keyword]]] call[name[header]][constant[history]] assign[=] binary_operation[constant[Boundary parameters uuid:] + call[call[call[name[rectwv_coeff].meta_info][constant[origin]]][constant[bound_param]]][<ast.Slice object at 0x7da18f09d750>]] if compare[constant[master_rectwv] in call[name[rectwv_coeff].meta_info][constant[origin]]] begin[:] call[name[header]][constant[history]] assign[=] binary_operation[constant[MasterRectWave uuid:] + call[call[call[name[rectwv_coeff].meta_info][constant[origin]]][constant[master_rectwv]]][<ast.Slice object at 0x7da20c76d390>]] call[name[header]][constant[history]] assign[=] binary_operation[constant[RectWaveCoeff uuid:] + name[rectwv_coeff].uuid] call[name[header]][constant[history]] assign[=] binary_operation[constant[Rectification and wavelength calibration time ] + call[call[name[datetime].now, parameter[]].isoformat, parameter[]]] call[name[logger].info, parameter[constant[Generating rectified and wavelength calibrated image]]] variable[rectwv_image] assign[=] call[name[fits].PrimaryHDU, parameter[]] return[call[name[fits].HDUList, parameter[list[[<ast.Name object at 0x7da20c76f910>]]]]]
keyword[def] identifier[apply_rectwv_coeff] ( identifier[reduced_image] , identifier[rectwv_coeff] , identifier[args_resampling] = literal[int] , identifier[args_ignore_dtu_configuration] = keyword[True] , identifier[debugplot] = literal[int] ): literal[string] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[header] = identifier[copy] . identifier[deepcopy] ( identifier[reduced_image] [ literal[int] ]. identifier[header] ) identifier[image2d] = identifier[reduced_image] [ literal[int] ]. identifier[data] identifier[image2d] = identifier[apply_integer_offsets] ( identifier[image2d] = identifier[image2d] , identifier[offx] = identifier[rectwv_coeff] . identifier[global_integer_offset_x_pix] , identifier[offy] = identifier[rectwv_coeff] . identifier[global_integer_offset_y_pix] ) identifier[filter_name] = identifier[header] [ literal[string] ] identifier[logger] . identifier[info] ( literal[string] + identifier[filter_name] ) keyword[if] identifier[filter_name] != identifier[rectwv_coeff] . identifier[tags] [ literal[string] ]: keyword[raise] identifier[ValueError] ( literal[string] ) identifier[grism_name] = identifier[header] [ literal[string] ] identifier[logger] . identifier[info] ( literal[string] + identifier[grism_name] ) keyword[if] identifier[grism_name] != identifier[rectwv_coeff] . identifier[tags] [ literal[string] ]: keyword[raise] identifier[ValueError] ( literal[string] ) identifier[dtu_conf] = identifier[DtuConfiguration] . identifier[define_from_header] ( identifier[header] ) identifier[dtu_conf_calib] = identifier[DtuConfiguration] . identifier[define_from_dictionary] ( identifier[rectwv_coeff] . identifier[meta_info] [ literal[string] ] ) keyword[if] identifier[dtu_conf] != identifier[dtu_conf_calib] : keyword[if] identifier[args_ignore_dtu_configuration] : identifier[logger] . identifier[warning] ( literal[string] ) keyword[else] : identifier[logger] . identifier[warning] ( literal[string] ) identifier[logger] . identifier[warning] ( identifier[dtu_conf] ) identifier[logger] . identifier[warning] ( literal[string] ) identifier[logger] . identifier[warning] ( identifier[dtu_conf_calib] ) keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : identifier[logger] . identifier[info] ( literal[string] ) identifier[list_valid_islitlets] = identifier[list] ( identifier[range] ( literal[int] , identifier[EMIR_NBARS] + literal[int] )) keyword[for] identifier[idel] keyword[in] identifier[rectwv_coeff] . identifier[missing_slitlets] : identifier[list_valid_islitlets] . identifier[remove] ( identifier[idel] ) identifier[logger] . identifier[debug] ( literal[string] + identifier[str] ( identifier[list_valid_islitlets] )) identifier[wv_parameters] = identifier[set_wv_parameters] ( identifier[filter_name] , identifier[grism_name] ) identifier[crpix1_enlarged] = identifier[wv_parameters] [ literal[string] ] identifier[crval1_enlarged] = identifier[wv_parameters] [ literal[string] ] identifier[cdelt1_enlarged] = identifier[wv_parameters] [ literal[string] ] identifier[naxis1_enlarged] = identifier[wv_parameters] [ literal[string] ] identifier[naxis2_enlarged] = identifier[EMIR_NBARS] * identifier[EMIR_NPIXPERSLIT_RECTIFIED] identifier[image2d_rectwv] = identifier[np] . identifier[zeros] (( identifier[naxis2_enlarged] , identifier[naxis1_enlarged] ), identifier[dtype] = literal[string] ) identifier[logger] . identifier[info] ( literal[string] ) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[rectwv_coeff] . identifier[uuid] )) identifier[cout] = literal[string] keyword[for] identifier[islitlet] keyword[in] identifier[range] ( literal[int] , identifier[EMIR_NBARS] + literal[int] ): keyword[if] identifier[islitlet] keyword[in] identifier[list_valid_islitlets] : identifier[slt] = identifier[Slitlet2D] ( identifier[islitlet] = identifier[islitlet] , identifier[rectwv_coeff] = identifier[rectwv_coeff] , identifier[debugplot] = identifier[debugplot] ) identifier[slitlet2d] = identifier[slt] . identifier[extract_slitlet2d] ( identifier[image2d] ) identifier[slitlet2d_rect] = identifier[slt] . identifier[rectify] ( identifier[slitlet2d] , identifier[resampling] = identifier[args_resampling] ) identifier[slitlet2d_rect_wv] = identifier[resample_image2d_flux] ( identifier[image2d_orig] = identifier[slitlet2d_rect] , identifier[naxis1] = identifier[naxis1_enlarged] , identifier[cdelt1] = identifier[cdelt1_enlarged] , identifier[crval1] = identifier[crval1_enlarged] , identifier[crpix1] = identifier[crpix1_enlarged] , identifier[coeff] = identifier[slt] . identifier[wpoly] ) identifier[i1] = identifier[slt] . identifier[iminslt] - literal[int] identifier[i2] = identifier[slt] . identifier[imaxslt] identifier[ii1] = identifier[slt] . identifier[min_row_rectified] identifier[ii2] = identifier[slt] . identifier[max_row_rectified] + literal[int] identifier[image2d_rectwv] [ identifier[i1] : identifier[i2] ,:]= identifier[slitlet2d_rect_wv] [ identifier[ii1] : identifier[ii2] ,:] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= identifier[slt] . identifier[iminslt] , literal[string] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= identifier[slt] . identifier[imaxslt] , literal[string] identifier[jminslt] =[] identifier[jmaxslt] =[] keyword[for] identifier[idum] keyword[in] identifier[range] ( identifier[ii1] , identifier[ii2] + literal[int] ): identifier[jminmax] = identifier[find_pix_borders] ( identifier[slitlet2d_rect_wv] [ identifier[idum] ,:], identifier[sought_value] = literal[int] ) keyword[if] identifier[jminmax] !=(- literal[int] , identifier[naxis1_enlarged] ): identifier[jminslt] . identifier[append] ( identifier[jminmax] [ literal[int] ]) identifier[jmaxslt] . identifier[append] ( identifier[jminmax] [ literal[int] ]) keyword[if] identifier[len] ( identifier[jminslt] )> literal[int] : identifier[slt] . identifier[jminslt] = identifier[min] ( identifier[jminslt] )+ literal[int] identifier[slt] . identifier[jmaxslt] = identifier[max] ( identifier[jmaxslt] )+ literal[int] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= identifier[slt] . identifier[jminslt] , literal[string] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= identifier[slt] . identifier[jmaxslt] , literal[string] identifier[cout] += literal[string] keyword[else] : identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= literal[int] , literal[string] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= literal[int] , literal[string] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= literal[int] , literal[string] identifier[header] [ literal[string] + identifier[str] ( identifier[islitlet] ). identifier[zfill] ( literal[int] )]= literal[int] , literal[string] identifier[cout] += literal[string] keyword[if] identifier[islitlet] % literal[int] == literal[int] : keyword[if] identifier[cout] != literal[string] : identifier[cout] = identifier[str] ( identifier[islitlet] // literal[int] ) identifier[logger] . identifier[info] ( identifier[cout] ) identifier[logger] . identifier[info] ( literal[string] ) keyword[for] identifier[keyword] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[keyword] keyword[in] identifier[header] : identifier[header] . identifier[remove] ( identifier[keyword] ) identifier[header] [ literal[string] ]=( identifier[crpix1_enlarged] , literal[string] ) identifier[header] [ literal[string] ]=( identifier[crval1_enlarged] , literal[string] ) identifier[header] [ literal[string] ]=( identifier[cdelt1_enlarged] , literal[string] ) identifier[header] [ literal[string] ]=( literal[string] , literal[string] ) identifier[header] [ literal[string] ]= literal[string] identifier[header] [ literal[string] ]=( literal[int] , literal[string] ) identifier[header] [ literal[string] ]=( literal[int] , literal[string] ) identifier[header] [ literal[string] ]=( literal[int] , literal[string] ) identifier[header] [ literal[string] ]= literal[string] identifier[header] [ literal[string] ]=( literal[string] , literal[string] ) keyword[for] identifier[keyword] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[keyword] keyword[in] identifier[header] : identifier[header] . identifier[remove] ( identifier[keyword] ) identifier[header] [ literal[string] ]= literal[string] + identifier[rectwv_coeff] . identifier[meta_info] [ literal[string] ][ literal[string] ][ literal[int] :] keyword[if] literal[string] keyword[in] identifier[rectwv_coeff] . identifier[meta_info] [ literal[string] ]: identifier[header] [ literal[string] ]= literal[string] + identifier[rectwv_coeff] . identifier[meta_info] [ literal[string] ][ literal[string] ][ literal[int] :] identifier[header] [ literal[string] ]= literal[string] + identifier[rectwv_coeff] . identifier[uuid] identifier[header] [ literal[string] ]= literal[string] + identifier[datetime] . identifier[now] (). identifier[isoformat] () identifier[logger] . identifier[info] ( literal[string] ) identifier[rectwv_image] = identifier[fits] . identifier[PrimaryHDU] ( identifier[data] = identifier[image2d_rectwv] , identifier[header] = identifier[header] ) keyword[return] identifier[fits] . identifier[HDUList] ([ identifier[rectwv_image] ])
def apply_rectwv_coeff(reduced_image, rectwv_coeff, args_resampling=2, args_ignore_dtu_configuration=True, debugplot=0): """Compute rectification and wavelength calibration coefficients. Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. args_resampling : int 1: nearest neighbour, 2: flux preserving interpolation. args_ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_image : HDUList object Rectified and wavelength calibrated image. """ logger = logging.getLogger(__name__) # header and data array (use deepcopy to avoid modifying # reduced_image[0].header as a side effect) header = copy.deepcopy(reduced_image[0].header) image2d = reduced_image[0].data # apply global offsets image2d = apply_integer_offsets(image2d=image2d, offx=rectwv_coeff.global_integer_offset_x_pix, offy=rectwv_coeff.global_integer_offset_y_pix) # check grism and filter filter_name = header['filter'] logger.info('Filter: ' + filter_name) if filter_name != rectwv_coeff.tags['filter']: raise ValueError('Filter name does not match!') # depends on [control=['if'], data=[]] grism_name = header['grism'] logger.info('Grism: ' + grism_name) if grism_name != rectwv_coeff.tags['grism']: raise ValueError('Grism name does not match!') # depends on [control=['if'], data=[]] # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from RectWaveCoeff object dtu_conf_calib = DtuConfiguration.define_from_dictionary(rectwv_coeff.meta_info['dtu_configuration']) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if args_ignore_dtu_configuration: logger.warning('DTU configuration differences found!') # depends on [control=['if'], data=[]] else: logger.warning('DTU configuration from image header:') logger.warning(dtu_conf) logger.warning('DTU configuration from master calibration:') logger.warning(dtu_conf_calib) raise ValueError('DTU configurations do not match!') # depends on [control=['if'], data=['dtu_conf', 'dtu_conf_calib']] else: logger.info('DTU configuration match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in rectwv_coeff.missing_slitlets: list_valid_islitlets.remove(idel) # depends on [control=['for'], data=['idel']] logger.debug('Valid slitlet numbers:\n' + str(list_valid_islitlets)) # --- # relevant wavelength calibration parameters for rectified and wavelength # calibrated image wv_parameters = set_wv_parameters(filter_name, grism_name) crpix1_enlarged = wv_parameters['crpix1_enlarged'] crval1_enlarged = wv_parameters['crval1_enlarged'] cdelt1_enlarged = wv_parameters['cdelt1_enlarged'] naxis1_enlarged = wv_parameters['naxis1_enlarged'] # initialize rectified and wavelength calibrated image naxis2_enlarged = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED image2d_rectwv = np.zeros((naxis2_enlarged, naxis1_enlarged), dtype='float32') # main loop logger.info('Applying rectification and wavelength calibration') logger.info('RectWaveCoeff uuid={}'.format(rectwv_coeff.uuid)) cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet in list_valid_islitlets: # define Slitlet2D object slt = Slitlet2D(islitlet=islitlet, rectwv_coeff=rectwv_coeff, debugplot=debugplot) # extract (distorted) slitlet from the initial image slitlet2d = slt.extract_slitlet2d(image2d) # rectify slitlet slitlet2d_rect = slt.rectify(slitlet2d, resampling=args_resampling) # wavelength calibration of the rectifed slitlet slitlet2d_rect_wv = resample_image2d_flux(image2d_orig=slitlet2d_rect, naxis1=naxis1_enlarged, cdelt1=cdelt1_enlarged, crval1=crval1_enlarged, crpix1=crpix1_enlarged, coeff=slt.wpoly) # minimum and maximum useful row in the full 2d rectified image # (starting from 0) i1 = slt.iminslt - 1 i2 = slt.imaxslt # minimum and maximum scan in the rectified slitlet # (in pixels, from 1 to NAXIS2) ii1 = slt.min_row_rectified ii2 = slt.max_row_rectified + 1 # save rectified slitlet in its corresponding location within # the full 2d rectified image image2d_rectwv[i1:i2, :] = slitlet2d_rect_wv[ii1:ii2, :] # include scan range in FITS header header['imnslt' + str(islitlet).zfill(2)] = (slt.iminslt, 'minimum Y pixel of useful slitlet region') header['imxslt' + str(islitlet).zfill(2)] = (slt.imaxslt, 'maximum Y pixel of useful slitlet region') # determine useful channel region in each spectrum and include # that information in FITS header jminslt = [] jmaxslt = [] for idum in range(ii1, ii2 + 1): jminmax = find_pix_borders(slitlet2d_rect_wv[idum, :], sought_value=0) if jminmax != (-1, naxis1_enlarged): jminslt.append(jminmax[0]) jmaxslt.append(jminmax[1]) # depends on [control=['if'], data=['jminmax']] # depends on [control=['for'], data=['idum']] if len(jminslt) > 0: slt.jminslt = min(jminslt) + 1 slt.jmaxslt = max(jmaxslt) + 1 # depends on [control=['if'], data=[]] header['jmnslt' + str(islitlet).zfill(2)] = (slt.jminslt, 'minimum X pixel of useful slitlet region') header['jmxslt' + str(islitlet).zfill(2)] = (slt.jmaxslt, 'maximum X pixel of useful slitlet region') cout += '.' # depends on [control=['if'], data=['islitlet']] else: # include scan and channel range in FITS header header['imnslt' + str(islitlet).zfill(2)] = (0, 'minimum Y pixel of useful slitlet region') header['imxslt' + str(islitlet).zfill(2)] = (0, 'maximum Y pixel of useful slitlet region') header['jmnslt' + str(islitlet).zfill(2)] = (0, 'minimum X pixel of useful slitlet region') header['jmxslt' + str(islitlet).zfill(2)] = (0, 'maximum X pixel of useful slitlet region') cout += 'i' if islitlet % 10 == 0: if cout != 'i': cout = str(islitlet // 10) # depends on [control=['if'], data=['cout']] # depends on [control=['if'], data=[]] logger.info(cout) # depends on [control=['for'], data=['islitlet']] # update wavelength calibration in FITS header logger.info('Updating image header') for keyword in ['crval1', 'crpix1', 'crval2', 'crpix2']: if keyword in header: header.remove(keyword) # depends on [control=['if'], data=['keyword', 'header']] # depends on [control=['for'], data=['keyword']] header['crpix1'] = (crpix1_enlarged, 'reference pixel') header['crval1'] = (crval1_enlarged, 'central wavelength at crpix1') header['cdelt1'] = (cdelt1_enlarged, 'linear dispersion (Angstrom/pixel)') header['cunit1'] = ('Angstrom', 'units along axis1') header['ctype1'] = 'WAVELENGTH' header['crpix2'] = (0.0, 'reference pixel') header['crval2'] = (0.0, 'central value at crpix2') header['cdelt2'] = (1.0, 'increment') header['ctype2'] = 'PIXEL' header['cunit2'] = ('Pixel', 'units along axis2') for keyword in ['cd1_1', 'cd1_2', 'cd2_1', 'cd2_2', 'PCD1_1', 'PCD1_2', 'PCD2_1', 'PCD2_2', 'PCRPIX1', 'PCRPIX2']: if keyword in header: header.remove(keyword) # depends on [control=['if'], data=['keyword', 'header']] # depends on [control=['for'], data=['keyword']] # update history in FITS header header['history'] = 'Boundary parameters uuid:' + rectwv_coeff.meta_info['origin']['bound_param'][4:] if 'master_rectwv' in rectwv_coeff.meta_info['origin']: header['history'] = 'MasterRectWave uuid:' + rectwv_coeff.meta_info['origin']['master_rectwv'][4:] # depends on [control=['if'], data=[]] header['history'] = 'RectWaveCoeff uuid:' + rectwv_coeff.uuid header['history'] = 'Rectification and wavelength calibration time ' + datetime.now().isoformat() logger.info('Generating rectified and wavelength calibrated image') rectwv_image = fits.PrimaryHDU(data=image2d_rectwv, header=header) return fits.HDUList([rectwv_image])
def _fullCloneOrFallback(self): """Wrapper for _fullClone(). In the case of failure, if clobberOnFailure is set to True remove the build directory and try a full clone again. """ res = yield self._fullClone() if res != RC_SUCCESS: if not self.clobberOnFailure: raise buildstep.BuildStepFailed() res = yield self.clobber() return res
def function[_fullCloneOrFallback, parameter[self]]: constant[Wrapper for _fullClone(). In the case of failure, if clobberOnFailure is set to True remove the build directory and try a full clone again. ] variable[res] assign[=] <ast.Yield object at 0x7da1b21e31f0> if compare[name[res] not_equal[!=] name[RC_SUCCESS]] begin[:] if <ast.UnaryOp object at 0x7da1b21e23e0> begin[:] <ast.Raise object at 0x7da1b21e0b80> variable[res] assign[=] <ast.Yield object at 0x7da1b21e3820> return[name[res]]
keyword[def] identifier[_fullCloneOrFallback] ( identifier[self] ): literal[string] identifier[res] = keyword[yield] identifier[self] . identifier[_fullClone] () keyword[if] identifier[res] != identifier[RC_SUCCESS] : keyword[if] keyword[not] identifier[self] . identifier[clobberOnFailure] : keyword[raise] identifier[buildstep] . identifier[BuildStepFailed] () identifier[res] = keyword[yield] identifier[self] . identifier[clobber] () keyword[return] identifier[res]
def _fullCloneOrFallback(self): """Wrapper for _fullClone(). In the case of failure, if clobberOnFailure is set to True remove the build directory and try a full clone again. """ res = (yield self._fullClone()) if res != RC_SUCCESS: if not self.clobberOnFailure: raise buildstep.BuildStepFailed() # depends on [control=['if'], data=[]] res = (yield self.clobber()) # depends on [control=['if'], data=['res']] return res
def _set_as_int(self, addr, val, numBytes = 1): """Convenience method. Oftentimes we need to set a range of registers to represent an int. This method will automatically set @numBytes registers starting at @addr. It will convert the int @val into an array of bytes.""" if not isinstance(val, int): raise ValueError("val must be an int. You provided: %s" % str(val)) buf = [] for i in range(numBytes): buf.append(cast_to_byte(val >> 8 * i)) self.set_register(addr, buf)
def function[_set_as_int, parameter[self, addr, val, numBytes]]: constant[Convenience method. Oftentimes we need to set a range of registers to represent an int. This method will automatically set @numBytes registers starting at @addr. It will convert the int @val into an array of bytes.] if <ast.UnaryOp object at 0x7da18eb54700> begin[:] <ast.Raise object at 0x7da18eb55030> variable[buf] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[name[numBytes]]]] begin[:] call[name[buf].append, parameter[call[name[cast_to_byte], parameter[binary_operation[name[val] <ast.RShift object at 0x7da2590d6a40> binary_operation[constant[8] * name[i]]]]]]] call[name[self].set_register, parameter[name[addr], name[buf]]]
keyword[def] identifier[_set_as_int] ( identifier[self] , identifier[addr] , identifier[val] , identifier[numBytes] = literal[int] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[val] , identifier[int] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[str] ( identifier[val] )) identifier[buf] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[numBytes] ): identifier[buf] . identifier[append] ( identifier[cast_to_byte] ( identifier[val] >> literal[int] * identifier[i] )) identifier[self] . identifier[set_register] ( identifier[addr] , identifier[buf] )
def _set_as_int(self, addr, val, numBytes=1): """Convenience method. Oftentimes we need to set a range of registers to represent an int. This method will automatically set @numBytes registers starting at @addr. It will convert the int @val into an array of bytes.""" if not isinstance(val, int): raise ValueError('val must be an int. You provided: %s' % str(val)) # depends on [control=['if'], data=[]] buf = [] for i in range(numBytes): buf.append(cast_to_byte(val >> 8 * i)) # depends on [control=['for'], data=['i']] self.set_register(addr, buf)
def import_name(modulename, name=None): """ Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``modulename``. Returns: object: Requested object. """ if name is None: modulename, name = modulename.rsplit(':', 1) module = __import__(modulename, globals(), {}, [name]) return getattr(module, name)
def function[import_name, parameter[modulename, name]]: constant[ Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``modulename``. Returns: object: Requested object. ] if compare[name[name] is constant[None]] begin[:] <ast.Tuple object at 0x7da20c6e5fc0> assign[=] call[name[modulename].rsplit, parameter[constant[:], constant[1]]] variable[module] assign[=] call[name[__import__], parameter[name[modulename], call[name[globals], parameter[]], dictionary[[], []], list[[<ast.Name object at 0x7da20cabe380>]]]] return[call[name[getattr], parameter[name[module], name[name]]]]
keyword[def] identifier[import_name] ( identifier[modulename] , identifier[name] = keyword[None] ): literal[string] keyword[if] identifier[name] keyword[is] keyword[None] : identifier[modulename] , identifier[name] = identifier[modulename] . identifier[rsplit] ( literal[string] , literal[int] ) identifier[module] = identifier[__import__] ( identifier[modulename] , identifier[globals] (),{},[ identifier[name] ]) keyword[return] identifier[getattr] ( identifier[module] , identifier[name] )
def import_name(modulename, name=None): """ Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``modulename``. Returns: object: Requested object. """ if name is None: (modulename, name) = modulename.rsplit(':', 1) # depends on [control=['if'], data=['name']] module = __import__(modulename, globals(), {}, [name]) return getattr(module, name)
def remember_forever(self, key, callback): """ Get an item from the cache, or store the default value forever. :param key: The cache key :type key: str :param callback: The default function :type callback: mixed :rtype: mixed """ # If the item exists in the cache we will just return this immediately # otherwise we will execute the given callback and cache the result # of that execution forever. val = self.get(key) if val is not None: return val val = value(callback) self.forever(key, val) return val
def function[remember_forever, parameter[self, key, callback]]: constant[ Get an item from the cache, or store the default value forever. :param key: The cache key :type key: str :param callback: The default function :type callback: mixed :rtype: mixed ] variable[val] assign[=] call[name[self].get, parameter[name[key]]] if compare[name[val] is_not constant[None]] begin[:] return[name[val]] variable[val] assign[=] call[name[value], parameter[name[callback]]] call[name[self].forever, parameter[name[key], name[val]]] return[name[val]]
keyword[def] identifier[remember_forever] ( identifier[self] , identifier[key] , identifier[callback] ): literal[string] identifier[val] = identifier[self] . identifier[get] ( identifier[key] ) keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[val] identifier[val] = identifier[value] ( identifier[callback] ) identifier[self] . identifier[forever] ( identifier[key] , identifier[val] ) keyword[return] identifier[val]
def remember_forever(self, key, callback): """ Get an item from the cache, or store the default value forever. :param key: The cache key :type key: str :param callback: The default function :type callback: mixed :rtype: mixed """ # If the item exists in the cache we will just return this immediately # otherwise we will execute the given callback and cache the result # of that execution forever. val = self.get(key) if val is not None: return val # depends on [control=['if'], data=['val']] val = value(callback) self.forever(key, val) return val
def to_shapely_line_string(self, closed=False, interpolate=0): """ Convert this polygon to a Shapely LineString object. Parameters ---------- closed : bool, optional Whether to return the line string with the last point being identical to the first point. interpolate : int, optional Number of points to interpolate between any pair of two consecutive points. These points are added to the final line string. Returns ------- shapely.geometry.LineString The Shapely LineString matching the polygon's exterior. """ return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def function[to_shapely_line_string, parameter[self, closed, interpolate]]: constant[ Convert this polygon to a Shapely LineString object. Parameters ---------- closed : bool, optional Whether to return the line string with the last point being identical to the first point. interpolate : int, optional Number of points to interpolate between any pair of two consecutive points. These points are added to the final line string. Returns ------- shapely.geometry.LineString The Shapely LineString matching the polygon's exterior. ] return[call[name[_convert_points_to_shapely_line_string], parameter[name[self].exterior]]]
keyword[def] identifier[to_shapely_line_string] ( identifier[self] , identifier[closed] = keyword[False] , identifier[interpolate] = literal[int] ): literal[string] keyword[return] identifier[_convert_points_to_shapely_line_string] ( identifier[self] . identifier[exterior] , identifier[closed] = identifier[closed] , identifier[interpolate] = identifier[interpolate] )
def to_shapely_line_string(self, closed=False, interpolate=0): """ Convert this polygon to a Shapely LineString object. Parameters ---------- closed : bool, optional Whether to return the line string with the last point being identical to the first point. interpolate : int, optional Number of points to interpolate between any pair of two consecutive points. These points are added to the final line string. Returns ------- shapely.geometry.LineString The Shapely LineString matching the polygon's exterior. """ return _convert_points_to_shapely_line_string(self.exterior, closed=closed, interpolate=interpolate)
def _get_object(data, position, obj_end, opts, dummy): """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): return (opts.document_class(data[position:end + 1], opts), position + obj_size) obj = _elements_to_dict(data, position + 4, end, opts) position += obj_size if "$ref" in obj: return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position
def function[_get_object, parameter[data, position, obj_end, opts, dummy]]: constant[Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.] <ast.Tuple object at 0x7da20c992620> assign[=] call[name[_get_object_size], parameter[name[data], name[position], name[obj_end]]] if call[name[_raw_document_class], parameter[name[opts].document_class]] begin[:] return[tuple[[<ast.Call object at 0x7da20c990580>, <ast.BinOp object at 0x7da20c993100>]]] variable[obj] assign[=] call[name[_elements_to_dict], parameter[name[data], binary_operation[name[position] + constant[4]], name[end], name[opts]]] <ast.AugAssign object at 0x7da20c991030> if compare[constant[$ref] in name[obj]] begin[:] return[tuple[[<ast.Call object at 0x7da20c993fa0>, <ast.Name object at 0x7da20c991180>]]] return[tuple[[<ast.Name object at 0x7da20c991600>, <ast.Name object at 0x7da20c992230>]]]
keyword[def] identifier[_get_object] ( identifier[data] , identifier[position] , identifier[obj_end] , identifier[opts] , identifier[dummy] ): literal[string] identifier[obj_size] , identifier[end] = identifier[_get_object_size] ( identifier[data] , identifier[position] , identifier[obj_end] ) keyword[if] identifier[_raw_document_class] ( identifier[opts] . identifier[document_class] ): keyword[return] ( identifier[opts] . identifier[document_class] ( identifier[data] [ identifier[position] : identifier[end] + literal[int] ], identifier[opts] ), identifier[position] + identifier[obj_size] ) identifier[obj] = identifier[_elements_to_dict] ( identifier[data] , identifier[position] + literal[int] , identifier[end] , identifier[opts] ) identifier[position] += identifier[obj_size] keyword[if] literal[string] keyword[in] identifier[obj] : keyword[return] ( identifier[DBRef] ( identifier[obj] . identifier[pop] ( literal[string] ), identifier[obj] . identifier[pop] ( literal[string] , keyword[None] ), identifier[obj] . identifier[pop] ( literal[string] , keyword[None] ), identifier[obj] ), identifier[position] ) keyword[return] identifier[obj] , identifier[position]
def _get_object(data, position, obj_end, opts, dummy): """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" (obj_size, end) = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): return (opts.document_class(data[position:end + 1], opts), position + obj_size) # depends on [control=['if'], data=[]] obj = _elements_to_dict(data, position + 4, end, opts) position += obj_size if '$ref' in obj: return (DBRef(obj.pop('$ref'), obj.pop('$id', None), obj.pop('$db', None), obj), position) # depends on [control=['if'], data=['obj']] return (obj, position)
def __decode_data_time(self, payload): """Extract time and decode payload (based on mime type) from payload. Applies to E_FEEDDATA and E_RECENTDATA. Returns tuple of data, mime, time.""" data, mime = self.__bytes_to_share_data(payload) try: time = datetime.strptime(payload.get(P_TIME), self.__share_time_fmt) except (ValueError, TypeError): logger.warning('Share payload from container has invalid timestamp (%s), will use naive local time', payload.get(P_TIME)) time = datetime.utcnow() return data, mime, time
def function[__decode_data_time, parameter[self, payload]]: constant[Extract time and decode payload (based on mime type) from payload. Applies to E_FEEDDATA and E_RECENTDATA. Returns tuple of data, mime, time.] <ast.Tuple object at 0x7da1b1b17070> assign[=] call[name[self].__bytes_to_share_data, parameter[name[payload]]] <ast.Try object at 0x7da1b1b17f70> return[tuple[[<ast.Name object at 0x7da1b1b1ada0>, <ast.Name object at 0x7da1b1b1a2c0>, <ast.Name object at 0x7da1b1b19f90>]]]
keyword[def] identifier[__decode_data_time] ( identifier[self] , identifier[payload] ): literal[string] identifier[data] , identifier[mime] = identifier[self] . identifier[__bytes_to_share_data] ( identifier[payload] ) keyword[try] : identifier[time] = identifier[datetime] . identifier[strptime] ( identifier[payload] . identifier[get] ( identifier[P_TIME] ), identifier[self] . identifier[__share_time_fmt] ) keyword[except] ( identifier[ValueError] , identifier[TypeError] ): identifier[logger] . identifier[warning] ( literal[string] , identifier[payload] . identifier[get] ( identifier[P_TIME] )) identifier[time] = identifier[datetime] . identifier[utcnow] () keyword[return] identifier[data] , identifier[mime] , identifier[time]
def __decode_data_time(self, payload): """Extract time and decode payload (based on mime type) from payload. Applies to E_FEEDDATA and E_RECENTDATA. Returns tuple of data, mime, time.""" (data, mime) = self.__bytes_to_share_data(payload) try: time = datetime.strptime(payload.get(P_TIME), self.__share_time_fmt) # depends on [control=['try'], data=[]] except (ValueError, TypeError): logger.warning('Share payload from container has invalid timestamp (%s), will use naive local time', payload.get(P_TIME)) time = datetime.utcnow() # depends on [control=['except'], data=[]] return (data, mime, time)
def on_background_image(self, *args): """When I get a new ``background_image``, store its texture in ``background_texture``. """ if self.background_image is not None: self.background_texture = self.background_image.texture
def function[on_background_image, parameter[self]]: constant[When I get a new ``background_image``, store its texture in ``background_texture``. ] if compare[name[self].background_image is_not constant[None]] begin[:] name[self].background_texture assign[=] name[self].background_image.texture
keyword[def] identifier[on_background_image] ( identifier[self] ,* identifier[args] ): literal[string] keyword[if] identifier[self] . identifier[background_image] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[background_texture] = identifier[self] . identifier[background_image] . identifier[texture]
def on_background_image(self, *args): """When I get a new ``background_image``, store its texture in ``background_texture``. """ if self.background_image is not None: self.background_texture = self.background_image.texture # depends on [control=['if'], data=[]]
def find(self, **kwargs): """Returns List(typeof=). Executes collection's find method based on keyword args maps results ( dict to list of entity instances). Set max_limit parameter to limit the amount of data send back through network Example:: manager = EntityManager(Product) products = yield manager.find(age={'$gt': 17}, max_limit=100) """ max_limit = None if 'max_limit' in kwargs: max_limit = kwargs.pop('max_limit') cursor = self.__collection.find(kwargs) instances = [] for doc in (yield cursor.to_list(max_limit)): instance = self.__entity() instance.map_dict(doc) instances.append(instance) return instances
def function[find, parameter[self]]: constant[Returns List(typeof=). Executes collection's find method based on keyword args maps results ( dict to list of entity instances). Set max_limit parameter to limit the amount of data send back through network Example:: manager = EntityManager(Product) products = yield manager.find(age={'$gt': 17}, max_limit=100) ] variable[max_limit] assign[=] constant[None] if compare[constant[max_limit] in name[kwargs]] begin[:] variable[max_limit] assign[=] call[name[kwargs].pop, parameter[constant[max_limit]]] variable[cursor] assign[=] call[name[self].__collection.find, parameter[name[kwargs]]] variable[instances] assign[=] list[[]] for taget[name[doc]] in starred[<ast.Yield object at 0x7da2047e90f0>] begin[:] variable[instance] assign[=] call[name[self].__entity, parameter[]] call[name[instance].map_dict, parameter[name[doc]]] call[name[instances].append, parameter[name[instance]]] return[name[instances]]
keyword[def] identifier[find] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[max_limit] = keyword[None] keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[max_limit] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[cursor] = identifier[self] . identifier[__collection] . identifier[find] ( identifier[kwargs] ) identifier[instances] =[] keyword[for] identifier[doc] keyword[in] ( keyword[yield] identifier[cursor] . identifier[to_list] ( identifier[max_limit] )): identifier[instance] = identifier[self] . identifier[__entity] () identifier[instance] . identifier[map_dict] ( identifier[doc] ) identifier[instances] . identifier[append] ( identifier[instance] ) keyword[return] identifier[instances]
def find(self, **kwargs): """Returns List(typeof=). Executes collection's find method based on keyword args maps results ( dict to list of entity instances). Set max_limit parameter to limit the amount of data send back through network Example:: manager = EntityManager(Product) products = yield manager.find(age={'$gt': 17}, max_limit=100) """ max_limit = None if 'max_limit' in kwargs: max_limit = kwargs.pop('max_limit') # depends on [control=['if'], data=['kwargs']] cursor = self.__collection.find(kwargs) instances = [] for doc in (yield cursor.to_list(max_limit)): instance = self.__entity() instance.map_dict(doc) instances.append(instance) # depends on [control=['for'], data=['doc']] return instances
def update_event_source_mapping(UUID, FunctionName=None, Enabled=None, BatchSize=None, region=None, key=None, keyid=None, profile=None): ''' Update the event source mapping identified by the UUID. Returns {updated: true} if the alias was updated and returns {updated: False} if the alias was not updated. CLI Example: .. code-block:: bash salt myminion boto_lamba.update_event_source_mapping uuid FunctionName=new_function ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) args = {} if FunctionName is not None: args['FunctionName'] = FunctionName if Enabled is not None: args['Enabled'] = Enabled if BatchSize is not None: args['BatchSize'] = BatchSize r = conn.update_event_source_mapping(UUID=UUID, **args) if r: keys = ('UUID', 'BatchSize', 'EventSourceArn', 'FunctionArn', 'LastModified', 'LastProcessingResult', 'State', 'StateTransitionReason') return {'updated': True, 'event_source_mapping': dict([(k, r.get(k)) for k in keys])} else: log.warning('Mapping was not updated') return {'updated': False} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
def function[update_event_source_mapping, parameter[UUID, FunctionName, Enabled, BatchSize, region, key, keyid, profile]]: constant[ Update the event source mapping identified by the UUID. Returns {updated: true} if the alias was updated and returns {updated: False} if the alias was not updated. CLI Example: .. code-block:: bash salt myminion boto_lamba.update_event_source_mapping uuid FunctionName=new_function ] <ast.Try object at 0x7da1b2347430>
keyword[def] identifier[update_event_source_mapping] ( identifier[UUID] , identifier[FunctionName] = keyword[None] , identifier[Enabled] = keyword[None] , identifier[BatchSize] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] keyword[try] : identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) identifier[args] ={} keyword[if] identifier[FunctionName] keyword[is] keyword[not] keyword[None] : identifier[args] [ literal[string] ]= identifier[FunctionName] keyword[if] identifier[Enabled] keyword[is] keyword[not] keyword[None] : identifier[args] [ literal[string] ]= identifier[Enabled] keyword[if] identifier[BatchSize] keyword[is] keyword[not] keyword[None] : identifier[args] [ literal[string] ]= identifier[BatchSize] identifier[r] = identifier[conn] . identifier[update_event_source_mapping] ( identifier[UUID] = identifier[UUID] ,** identifier[args] ) keyword[if] identifier[r] : identifier[keys] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[return] { literal[string] : keyword[True] , literal[string] : identifier[dict] ([( identifier[k] , identifier[r] . identifier[get] ( identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[keys] ])} keyword[else] : identifier[log] . identifier[warning] ( literal[string] ) keyword[return] { literal[string] : keyword[False] } keyword[except] identifier[ClientError] keyword[as] identifier[e] : keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )}
def update_event_source_mapping(UUID, FunctionName=None, Enabled=None, BatchSize=None, region=None, key=None, keyid=None, profile=None): """ Update the event source mapping identified by the UUID. Returns {updated: true} if the alias was updated and returns {updated: False} if the alias was not updated. CLI Example: .. code-block:: bash salt myminion boto_lamba.update_event_source_mapping uuid FunctionName=new_function """ try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) args = {} if FunctionName is not None: args['FunctionName'] = FunctionName # depends on [control=['if'], data=['FunctionName']] if Enabled is not None: args['Enabled'] = Enabled # depends on [control=['if'], data=['Enabled']] if BatchSize is not None: args['BatchSize'] = BatchSize # depends on [control=['if'], data=['BatchSize']] r = conn.update_event_source_mapping(UUID=UUID, **args) if r: keys = ('UUID', 'BatchSize', 'EventSourceArn', 'FunctionArn', 'LastModified', 'LastProcessingResult', 'State', 'StateTransitionReason') return {'updated': True, 'event_source_mapping': dict([(k, r.get(k)) for k in keys])} # depends on [control=['if'], data=[]] else: log.warning('Mapping was not updated') return {'updated': False} # depends on [control=['try'], data=[]] except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']]
def singleChoiceParam(parameters, name, type_converter = str): """ single choice parameter value. Returns -1 if no value was chosen. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'""" param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name)) value = int(param.find('Value').text) values = param.find('Values') if value < 0: return value return type_converter(values[value].text)
def function[singleChoiceParam, parameter[parameters, name, type_converter]]: constant[ single choice parameter value. Returns -1 if no value was chosen. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'] variable[param] assign[=] call[name[parameters].find, parameter[call[constant[.//SingleChoiceParam[@Name='{name}']].format, parameter[]]]] variable[value] assign[=] call[name[int], parameter[call[name[param].find, parameter[constant[Value]]].text]] variable[values] assign[=] call[name[param].find, parameter[constant[Values]]] if compare[name[value] less[<] constant[0]] begin[:] return[name[value]] return[call[name[type_converter], parameter[call[name[values]][name[value]].text]]]
keyword[def] identifier[singleChoiceParam] ( identifier[parameters] , identifier[name] , identifier[type_converter] = identifier[str] ): literal[string] identifier[param] = identifier[parameters] . identifier[find] ( literal[string] . identifier[format] ( identifier[name] = identifier[name] )) identifier[value] = identifier[int] ( identifier[param] . identifier[find] ( literal[string] ). identifier[text] ) identifier[values] = identifier[param] . identifier[find] ( literal[string] ) keyword[if] identifier[value] < literal[int] : keyword[return] identifier[value] keyword[return] identifier[type_converter] ( identifier[values] [ identifier[value] ]. identifier[text] )
def singleChoiceParam(parameters, name, type_converter=str): """ single choice parameter value. Returns -1 if no value was chosen. :param parameters: the parameters tree. :param name: the name of the parameter. :param type_converter: function to convert the chosen value to a different type (e.g. str, float, int). default = 'str'""" param = parameters.find(".//SingleChoiceParam[@Name='{name}']".format(name=name)) value = int(param.find('Value').text) values = param.find('Values') if value < 0: return value # depends on [control=['if'], data=['value']] return type_converter(values[value].text)
def _getJsonOrig(url): '''internal''' url = _URL_PREFIX + url resp = requests.get(urlparse(url).geturl(), proxies=_PYEX_PROXIES) if resp.status_code == 200: return resp.json() raise PyEXception('Response %d - ' % resp.status_code, resp.text)
def function[_getJsonOrig, parameter[url]]: constant[internal] variable[url] assign[=] binary_operation[name[_URL_PREFIX] + name[url]] variable[resp] assign[=] call[name[requests].get, parameter[call[call[name[urlparse], parameter[name[url]]].geturl, parameter[]]]] if compare[name[resp].status_code equal[==] constant[200]] begin[:] return[call[name[resp].json, parameter[]]] <ast.Raise object at 0x7da1b0153ac0>
keyword[def] identifier[_getJsonOrig] ( identifier[url] ): literal[string] identifier[url] = identifier[_URL_PREFIX] + identifier[url] identifier[resp] = identifier[requests] . identifier[get] ( identifier[urlparse] ( identifier[url] ). identifier[geturl] (), identifier[proxies] = identifier[_PYEX_PROXIES] ) keyword[if] identifier[resp] . identifier[status_code] == literal[int] : keyword[return] identifier[resp] . identifier[json] () keyword[raise] identifier[PyEXception] ( literal[string] % identifier[resp] . identifier[status_code] , identifier[resp] . identifier[text] )
def _getJsonOrig(url): """internal""" url = _URL_PREFIX + url resp = requests.get(urlparse(url).geturl(), proxies=_PYEX_PROXIES) if resp.status_code == 200: return resp.json() # depends on [control=['if'], data=[]] raise PyEXception('Response %d - ' % resp.status_code, resp.text)
def i2c_bitrate(self): """I2C bitrate in kHz. Not every bitrate is supported by the host adapter. Therefore, the actual bitrate may be less than the value which is set. The power-on default value is 100 kHz. """ ret = api.py_aa_i2c_bitrate(self.handle, 0) _raise_error_if_negative(ret) return ret
def function[i2c_bitrate, parameter[self]]: constant[I2C bitrate in kHz. Not every bitrate is supported by the host adapter. Therefore, the actual bitrate may be less than the value which is set. The power-on default value is 100 kHz. ] variable[ret] assign[=] call[name[api].py_aa_i2c_bitrate, parameter[name[self].handle, constant[0]]] call[name[_raise_error_if_negative], parameter[name[ret]]] return[name[ret]]
keyword[def] identifier[i2c_bitrate] ( identifier[self] ): literal[string] identifier[ret] = identifier[api] . identifier[py_aa_i2c_bitrate] ( identifier[self] . identifier[handle] , literal[int] ) identifier[_raise_error_if_negative] ( identifier[ret] ) keyword[return] identifier[ret]
def i2c_bitrate(self): """I2C bitrate in kHz. Not every bitrate is supported by the host adapter. Therefore, the actual bitrate may be less than the value which is set. The power-on default value is 100 kHz. """ ret = api.py_aa_i2c_bitrate(self.handle, 0) _raise_error_if_negative(ret) return ret
def get_history(self, exp, rep, tags): """ returns the whole history for one experiment and one repetition. tags can be a string or a list of strings. if tags is a string, the history is returned as list of values, if tags is a list of strings or 'all', history is returned as a dictionary of lists of values. """ params = self.get_params(exp) if params == None: raise SystemExit('experiment %s not found.'%exp) # make list of tags, even if it is only one if tags != 'all' and not hasattr(tags, '__iter__'): tags = [tags] results = {} logfile = os.path.join(exp, '%i.log'%rep) try: f = open(logfile) except IOError: if len(tags) == 1: return [] else: return {} for line in f: dic = json.loads(line) for tag in tags: if not tag in results: results[tag] = [] if tag in dic: results[tag].append(dic[tag]) else: results[tag].append(None) f.close() if len(results) == 0: if len(tags) == 1: return [] else: return {} # raise ValueError('tag(s) not found: %s'%str(tags)) if len(tags) == 1: return results[results.keys()[0]] else: return results
def function[get_history, parameter[self, exp, rep, tags]]: constant[ returns the whole history for one experiment and one repetition. tags can be a string or a list of strings. if tags is a string, the history is returned as list of values, if tags is a list of strings or 'all', history is returned as a dictionary of lists of values. ] variable[params] assign[=] call[name[self].get_params, parameter[name[exp]]] if compare[name[params] equal[==] constant[None]] begin[:] <ast.Raise object at 0x7da1b08f6a70> if <ast.BoolOp object at 0x7da1b08f6bc0> begin[:] variable[tags] assign[=] list[[<ast.Name object at 0x7da1b0889810>]] variable[results] assign[=] dictionary[[], []] variable[logfile] assign[=] call[name[os].path.join, parameter[name[exp], binary_operation[constant[%i.log] <ast.Mod object at 0x7da2590d6920> name[rep]]]] <ast.Try object at 0x7da2043456f0> for taget[name[line]] in starred[name[f]] begin[:] variable[dic] assign[=] call[name[json].loads, parameter[name[line]]] for taget[name[tag]] in starred[name[tags]] begin[:] if <ast.UnaryOp object at 0x7da204345f60> begin[:] call[name[results]][name[tag]] assign[=] list[[]] if compare[name[tag] in name[dic]] begin[:] call[call[name[results]][name[tag]].append, parameter[call[name[dic]][name[tag]]]] call[name[f].close, parameter[]] if compare[call[name[len], parameter[name[results]]] equal[==] constant[0]] begin[:] if compare[call[name[len], parameter[name[tags]]] equal[==] constant[1]] begin[:] return[list[[]]] if compare[call[name[len], parameter[name[tags]]] equal[==] constant[1]] begin[:] return[call[name[results]][call[call[name[results].keys, parameter[]]][constant[0]]]]
keyword[def] identifier[get_history] ( identifier[self] , identifier[exp] , identifier[rep] , identifier[tags] ): literal[string] identifier[params] = identifier[self] . identifier[get_params] ( identifier[exp] ) keyword[if] identifier[params] == keyword[None] : keyword[raise] identifier[SystemExit] ( literal[string] % identifier[exp] ) keyword[if] identifier[tags] != literal[string] keyword[and] keyword[not] identifier[hasattr] ( identifier[tags] , literal[string] ): identifier[tags] =[ identifier[tags] ] identifier[results] ={} identifier[logfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[exp] , literal[string] % identifier[rep] ) keyword[try] : identifier[f] = identifier[open] ( identifier[logfile] ) keyword[except] identifier[IOError] : keyword[if] identifier[len] ( identifier[tags] )== literal[int] : keyword[return] [] keyword[else] : keyword[return] {} keyword[for] identifier[line] keyword[in] identifier[f] : identifier[dic] = identifier[json] . identifier[loads] ( identifier[line] ) keyword[for] identifier[tag] keyword[in] identifier[tags] : keyword[if] keyword[not] identifier[tag] keyword[in] identifier[results] : identifier[results] [ identifier[tag] ]=[] keyword[if] identifier[tag] keyword[in] identifier[dic] : identifier[results] [ identifier[tag] ]. identifier[append] ( identifier[dic] [ identifier[tag] ]) keyword[else] : identifier[results] [ identifier[tag] ]. identifier[append] ( keyword[None] ) identifier[f] . identifier[close] () keyword[if] identifier[len] ( identifier[results] )== literal[int] : keyword[if] identifier[len] ( identifier[tags] )== literal[int] : keyword[return] [] keyword[else] : keyword[return] {} keyword[if] identifier[len] ( identifier[tags] )== literal[int] : keyword[return] identifier[results] [ identifier[results] . identifier[keys] ()[ literal[int] ]] keyword[else] : keyword[return] identifier[results]
def get_history(self, exp, rep, tags): """ returns the whole history for one experiment and one repetition. tags can be a string or a list of strings. if tags is a string, the history is returned as list of values, if tags is a list of strings or 'all', history is returned as a dictionary of lists of values. """ params = self.get_params(exp) if params == None: raise SystemExit('experiment %s not found.' % exp) # depends on [control=['if'], data=[]] # make list of tags, even if it is only one if tags != 'all' and (not hasattr(tags, '__iter__')): tags = [tags] # depends on [control=['if'], data=[]] results = {} logfile = os.path.join(exp, '%i.log' % rep) try: f = open(logfile) # depends on [control=['try'], data=[]] except IOError: if len(tags) == 1: return [] # depends on [control=['if'], data=[]] else: return {} # depends on [control=['except'], data=[]] for line in f: dic = json.loads(line) for tag in tags: if not tag in results: results[tag] = [] # depends on [control=['if'], data=[]] if tag in dic: results[tag].append(dic[tag]) # depends on [control=['if'], data=['tag', 'dic']] else: results[tag].append(None) # depends on [control=['for'], data=['tag']] # depends on [control=['for'], data=['line']] f.close() if len(results) == 0: if len(tags) == 1: return [] # depends on [control=['if'], data=[]] else: return {} # depends on [control=['if'], data=[]] # raise ValueError('tag(s) not found: %s'%str(tags)) if len(tags) == 1: return results[results.keys()[0]] # depends on [control=['if'], data=[]] else: return results
def select_features(cls, features_id, file_struct, annot_beats, framesync): """Selects the features from the given parameters. Parameters ---------- features_id: str The identifier of the features (it must be a key inside the `features_registry`) file_struct: msaf.io.FileStruct The file struct containing the files to extract the features from annot_beats: boolean Whether to use annotated (`True`) or estimated (`False`) beats framesync: boolean Whether to use framesync (`True`) or beatsync (`False`) features Returns ------- features: obj The actual features object that inherits from `msaf.Features` """ if not annot_beats and framesync: feat_type = FeatureTypes.framesync elif annot_beats and not framesync: feat_type = FeatureTypes.ann_beatsync elif not annot_beats and not framesync: feat_type = FeatureTypes.est_beatsync else: raise FeatureTypeNotFound("Type of features not valid.") # Select features with default parameters if features_id not in features_registry.keys(): raise FeaturesNotFound( "The features '%s' are invalid (valid features are %s)" % (features_id, features_registry.keys())) return features_registry[features_id](file_struct, feat_type)
def function[select_features, parameter[cls, features_id, file_struct, annot_beats, framesync]]: constant[Selects the features from the given parameters. Parameters ---------- features_id: str The identifier of the features (it must be a key inside the `features_registry`) file_struct: msaf.io.FileStruct The file struct containing the files to extract the features from annot_beats: boolean Whether to use annotated (`True`) or estimated (`False`) beats framesync: boolean Whether to use framesync (`True`) or beatsync (`False`) features Returns ------- features: obj The actual features object that inherits from `msaf.Features` ] if <ast.BoolOp object at 0x7da1b0219000> begin[:] variable[feat_type] assign[=] name[FeatureTypes].framesync if compare[name[features_id] <ast.NotIn object at 0x7da2590d7190> call[name[features_registry].keys, parameter[]]] begin[:] <ast.Raise object at 0x7da1b0219570> return[call[call[name[features_registry]][name[features_id]], parameter[name[file_struct], name[feat_type]]]]
keyword[def] identifier[select_features] ( identifier[cls] , identifier[features_id] , identifier[file_struct] , identifier[annot_beats] , identifier[framesync] ): literal[string] keyword[if] keyword[not] identifier[annot_beats] keyword[and] identifier[framesync] : identifier[feat_type] = identifier[FeatureTypes] . identifier[framesync] keyword[elif] identifier[annot_beats] keyword[and] keyword[not] identifier[framesync] : identifier[feat_type] = identifier[FeatureTypes] . identifier[ann_beatsync] keyword[elif] keyword[not] identifier[annot_beats] keyword[and] keyword[not] identifier[framesync] : identifier[feat_type] = identifier[FeatureTypes] . identifier[est_beatsync] keyword[else] : keyword[raise] identifier[FeatureTypeNotFound] ( literal[string] ) keyword[if] identifier[features_id] keyword[not] keyword[in] identifier[features_registry] . identifier[keys] (): keyword[raise] identifier[FeaturesNotFound] ( literal[string] %( identifier[features_id] , identifier[features_registry] . identifier[keys] ())) keyword[return] identifier[features_registry] [ identifier[features_id] ]( identifier[file_struct] , identifier[feat_type] )
def select_features(cls, features_id, file_struct, annot_beats, framesync): """Selects the features from the given parameters. Parameters ---------- features_id: str The identifier of the features (it must be a key inside the `features_registry`) file_struct: msaf.io.FileStruct The file struct containing the files to extract the features from annot_beats: boolean Whether to use annotated (`True`) or estimated (`False`) beats framesync: boolean Whether to use framesync (`True`) or beatsync (`False`) features Returns ------- features: obj The actual features object that inherits from `msaf.Features` """ if not annot_beats and framesync: feat_type = FeatureTypes.framesync # depends on [control=['if'], data=[]] elif annot_beats and (not framesync): feat_type = FeatureTypes.ann_beatsync # depends on [control=['if'], data=[]] elif not annot_beats and (not framesync): feat_type = FeatureTypes.est_beatsync # depends on [control=['if'], data=[]] else: raise FeatureTypeNotFound('Type of features not valid.') # Select features with default parameters if features_id not in features_registry.keys(): raise FeaturesNotFound("The features '%s' are invalid (valid features are %s)" % (features_id, features_registry.keys())) # depends on [control=['if'], data=['features_id']] return features_registry[features_id](file_struct, feat_type)
def get_credentials(self): """Get read-only credentials. Returns: class: Read-only credentials. """ return ReadOnlyCredentials( self.access_token, self.client_id, self.client_secret, self.refresh_token )
def function[get_credentials, parameter[self]]: constant[Get read-only credentials. Returns: class: Read-only credentials. ] return[call[name[ReadOnlyCredentials], parameter[name[self].access_token, name[self].client_id, name[self].client_secret, name[self].refresh_token]]]
keyword[def] identifier[get_credentials] ( identifier[self] ): literal[string] keyword[return] identifier[ReadOnlyCredentials] ( identifier[self] . identifier[access_token] , identifier[self] . identifier[client_id] , identifier[self] . identifier[client_secret] , identifier[self] . identifier[refresh_token] )
def get_credentials(self): """Get read-only credentials. Returns: class: Read-only credentials. """ return ReadOnlyCredentials(self.access_token, self.client_id, self.client_secret, self.refresh_token)
def K2onSilicon_main(args=None): """Function called when `K2onSilicon` is executed on the command line.""" import argparse parser = argparse.ArgumentParser( description="Run K2onSilicon to find which targets in a " "list call on active silicon for a given K2 campaign.") parser.add_argument('csv_file', type=str, help="Name of input csv file with targets, column are " "Ra_degrees, Dec_degrees, Kepmag") parser.add_argument('campaign', type=int, help='K2 Campaign number') args = parser.parse_args(args) K2onSilicon(args.csv_file, args.campaign)
def function[K2onSilicon_main, parameter[args]]: constant[Function called when `K2onSilicon` is executed on the command line.] import module[argparse] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[parser].add_argument, parameter[constant[csv_file]]] call[name[parser].add_argument, parameter[constant[campaign]]] variable[args] assign[=] call[name[parser].parse_args, parameter[name[args]]] call[name[K2onSilicon], parameter[name[args].csv_file, name[args].campaign]]
keyword[def] identifier[K2onSilicon_main] ( identifier[args] = keyword[None] ): literal[string] keyword[import] identifier[argparse] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[str] , identifier[help] = literal[string] literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[help] = literal[string] ) identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[args] ) identifier[K2onSilicon] ( identifier[args] . identifier[csv_file] , identifier[args] . identifier[campaign] )
def K2onSilicon_main(args=None): """Function called when `K2onSilicon` is executed on the command line.""" import argparse parser = argparse.ArgumentParser(description='Run K2onSilicon to find which targets in a list call on active silicon for a given K2 campaign.') parser.add_argument('csv_file', type=str, help='Name of input csv file with targets, column are Ra_degrees, Dec_degrees, Kepmag') parser.add_argument('campaign', type=int, help='K2 Campaign number') args = parser.parse_args(args) K2onSilicon(args.csv_file, args.campaign)
def get_ipython_module_path(module_str): """Find the path to an IPython module in this version of IPython. This will always find the version of the module that is in this importable IPython package. This will always return the path to the ``.py`` version of the module. """ if module_str == 'IPython': return os.path.join(get_ipython_package_dir(), '__init__.py') mod = import_item(module_str) the_path = mod.__file__.replace('.pyc', '.py') the_path = the_path.replace('.pyo', '.py') return py3compat.cast_unicode(the_path, fs_encoding)
def function[get_ipython_module_path, parameter[module_str]]: constant[Find the path to an IPython module in this version of IPython. This will always find the version of the module that is in this importable IPython package. This will always return the path to the ``.py`` version of the module. ] if compare[name[module_str] equal[==] constant[IPython]] begin[:] return[call[name[os].path.join, parameter[call[name[get_ipython_package_dir], parameter[]], constant[__init__.py]]]] variable[mod] assign[=] call[name[import_item], parameter[name[module_str]]] variable[the_path] assign[=] call[name[mod].__file__.replace, parameter[constant[.pyc], constant[.py]]] variable[the_path] assign[=] call[name[the_path].replace, parameter[constant[.pyo], constant[.py]]] return[call[name[py3compat].cast_unicode, parameter[name[the_path], name[fs_encoding]]]]
keyword[def] identifier[get_ipython_module_path] ( identifier[module_str] ): literal[string] keyword[if] identifier[module_str] == literal[string] : keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[get_ipython_package_dir] (), literal[string] ) identifier[mod] = identifier[import_item] ( identifier[module_str] ) identifier[the_path] = identifier[mod] . identifier[__file__] . identifier[replace] ( literal[string] , literal[string] ) identifier[the_path] = identifier[the_path] . identifier[replace] ( literal[string] , literal[string] ) keyword[return] identifier[py3compat] . identifier[cast_unicode] ( identifier[the_path] , identifier[fs_encoding] )
def get_ipython_module_path(module_str): """Find the path to an IPython module in this version of IPython. This will always find the version of the module that is in this importable IPython package. This will always return the path to the ``.py`` version of the module. """ if module_str == 'IPython': return os.path.join(get_ipython_package_dir(), '__init__.py') # depends on [control=['if'], data=[]] mod = import_item(module_str) the_path = mod.__file__.replace('.pyc', '.py') the_path = the_path.replace('.pyo', '.py') return py3compat.cast_unicode(the_path, fs_encoding)
def create_secgroup_rule(self, protocol, from_port, to_port, source, target): """ Creates a new server security group rule. :param str protocol: E.g. ``tcp``, ``icmp``, etc... :param int from_port: E.g. ``1`` :param int to_port: E.g. ``65535`` :param str source: :param str target: """ nova = self.nova def get_id(gname): sg = nova.security_groups.find(name=gname) if not sg: raise BangError("Security group not found, %s" % gname) return str(sg.id) kwargs = { 'ip_protocol': protocol, 'from_port': str(from_port), 'to_port': str(to_port), 'parent_group_id': get_id(target), } if '/' in source: kwargs['cidr'] = source else: kwargs['group_id'] = get_id(source) # not sure if this is an openstack hack or an hpcloud hack, but # this is definitely required to get it working on hpcloud: kwargs['cidr'] = 'null' nova.security_group_rules.create(**kwargs)
def function[create_secgroup_rule, parameter[self, protocol, from_port, to_port, source, target]]: constant[ Creates a new server security group rule. :param str protocol: E.g. ``tcp``, ``icmp``, etc... :param int from_port: E.g. ``1`` :param int to_port: E.g. ``65535`` :param str source: :param str target: ] variable[nova] assign[=] name[self].nova def function[get_id, parameter[gname]]: variable[sg] assign[=] call[name[nova].security_groups.find, parameter[]] if <ast.UnaryOp object at 0x7da1b1414550> begin[:] <ast.Raise object at 0x7da1b1415b10> return[call[name[str], parameter[name[sg].id]]] variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1417640>, <ast.Constant object at 0x7da1b1417d00>, <ast.Constant object at 0x7da1b1417790>, <ast.Constant object at 0x7da1b14162c0>], [<ast.Name object at 0x7da1b1414100>, <ast.Call object at 0x7da1b14178b0>, <ast.Call object at 0x7da1b1416f20>, <ast.Call object at 0x7da1b1416c50>]] if compare[constant[/] in name[source]] begin[:] call[name[kwargs]][constant[cidr]] assign[=] name[source] call[name[nova].security_group_rules.create, parameter[]]
keyword[def] identifier[create_secgroup_rule] ( identifier[self] , identifier[protocol] , identifier[from_port] , identifier[to_port] , identifier[source] , identifier[target] ): literal[string] identifier[nova] = identifier[self] . identifier[nova] keyword[def] identifier[get_id] ( identifier[gname] ): identifier[sg] = identifier[nova] . identifier[security_groups] . identifier[find] ( identifier[name] = identifier[gname] ) keyword[if] keyword[not] identifier[sg] : keyword[raise] identifier[BangError] ( literal[string] % identifier[gname] ) keyword[return] identifier[str] ( identifier[sg] . identifier[id] ) identifier[kwargs] ={ literal[string] : identifier[protocol] , literal[string] : identifier[str] ( identifier[from_port] ), literal[string] : identifier[str] ( identifier[to_port] ), literal[string] : identifier[get_id] ( identifier[target] ), } keyword[if] literal[string] keyword[in] identifier[source] : identifier[kwargs] [ literal[string] ]= identifier[source] keyword[else] : identifier[kwargs] [ literal[string] ]= identifier[get_id] ( identifier[source] ) identifier[kwargs] [ literal[string] ]= literal[string] identifier[nova] . identifier[security_group_rules] . identifier[create] (** identifier[kwargs] )
def create_secgroup_rule(self, protocol, from_port, to_port, source, target): """ Creates a new server security group rule. :param str protocol: E.g. ``tcp``, ``icmp``, etc... :param int from_port: E.g. ``1`` :param int to_port: E.g. ``65535`` :param str source: :param str target: """ nova = self.nova def get_id(gname): sg = nova.security_groups.find(name=gname) if not sg: raise BangError('Security group not found, %s' % gname) # depends on [control=['if'], data=[]] return str(sg.id) kwargs = {'ip_protocol': protocol, 'from_port': str(from_port), 'to_port': str(to_port), 'parent_group_id': get_id(target)} if '/' in source: kwargs['cidr'] = source # depends on [control=['if'], data=['source']] else: kwargs['group_id'] = get_id(source) # not sure if this is an openstack hack or an hpcloud hack, but # this is definitely required to get it working on hpcloud: kwargs['cidr'] = 'null' nova.security_group_rules.create(**kwargs)
def execute(self, command, *args, **kwargs): """Execute sentinel command.""" # TODO: choose pool # kwargs can be used to control which sentinel to use if self.closed: raise PoolClosedError("Sentinel pool is closed") for pool in self._pools: return pool.execute(command, *args, **kwargs)
def function[execute, parameter[self, command]]: constant[Execute sentinel command.] if name[self].closed begin[:] <ast.Raise object at 0x7da18bc70340> for taget[name[pool]] in starred[name[self]._pools] begin[:] return[call[name[pool].execute, parameter[name[command], <ast.Starred object at 0x7da20c7ca950>]]]
keyword[def] identifier[execute] ( identifier[self] , identifier[command] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[self] . identifier[closed] : keyword[raise] identifier[PoolClosedError] ( literal[string] ) keyword[for] identifier[pool] keyword[in] identifier[self] . identifier[_pools] : keyword[return] identifier[pool] . identifier[execute] ( identifier[command] ,* identifier[args] ,** identifier[kwargs] )
def execute(self, command, *args, **kwargs): """Execute sentinel command.""" # TODO: choose pool # kwargs can be used to control which sentinel to use if self.closed: raise PoolClosedError('Sentinel pool is closed') # depends on [control=['if'], data=[]] for pool in self._pools: return pool.execute(command, *args, **kwargs) # depends on [control=['for'], data=['pool']]
def plot_return_on_dollar(rets, title='Return on $1', show_maxdd=0, figsize=None, ax=None, append=0, label=None, **plot_args): """ Show the cumulative return of specified rets and max drawdowns if selected.""" crets = (1. + returns_cumulative(rets, expanding=1)) if isinstance(crets, pd.DataFrame): tmp = crets.copy() for c in tmp.columns: s = tmp[c] fv = s.first_valid_index() fi = s.index.get_loc(fv) if fi != 0: tmp.ix[fi - 1, c] = 1. else: if not s.index.freq: # no frequency set freq = guess_freq(s.index) s = s.asfreq(freq) first = s.index.shift(-1)[0] tmp = pd.concat([pd.DataFrame({c: [1.]}, index=[first]), tmp]) crets = tmp if append: toadd = crets.index.shift(1)[-1] crets = pd.concat([crets, pd.DataFrame(np.nan, columns=crets.columns, index=[toadd])]) else: fv = crets.first_valid_index() fi = crets.index.get_loc(fv) if fi != 0: crets = crets.copy() crets.iloc[fi - 1] = 1. else: if not crets.index.freq: first = crets.asfreq(guess_freq(crets.index)).index.shift(-1)[0] else: first = crets.index.shift(-1)[0] tmp = pd.Series([1.], index=[first]) tmp = tmp.append(crets) crets = tmp if append: toadd = pd.Series(np.nan, index=[crets.index.shift(1)[-1]]) crets = crets.append(toadd) ax = crets.plot(figsize=figsize, title=title, ax=ax, label=label, **plot_args) AxesFormat().Y.apply_format(new_float_formatter()).X.label("").apply(ax) #ax.tick_params(labelsize=14) if show_maxdd: # find the max drawdown available by using original rets if isinstance(rets, pd.DataFrame): iterator = rets.iteritems() else: iterator = iter([('', rets)]) for c, col in iterator: dd, dt = max_drawdown(col, inc_date=1) lbl = c and c + ' maxdd' or 'maxdd' # get cret to place annotation correctly if isinstance(crets, pd.DataFrame): amt = crets.ix[dt, c] else: amt = crets[dt] bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.7) # sub = lambda c: c and len(c) > 2 and c[:2] or c try: dtstr = '{0}'.format(dt.to_period()) except: dtstr = '{0}'.format(dt) ax.text(dt, amt, "mdd {0}".format(dtstr).strip(), ha="center", va="center", size=10, bbox=bbox_props) plt.tight_layout()
def function[plot_return_on_dollar, parameter[rets, title, show_maxdd, figsize, ax, append, label]]: constant[ Show the cumulative return of specified rets and max drawdowns if selected.] variable[crets] assign[=] binary_operation[constant[1.0] + call[name[returns_cumulative], parameter[name[rets]]]] if call[name[isinstance], parameter[name[crets], name[pd].DataFrame]] begin[:] variable[tmp] assign[=] call[name[crets].copy, parameter[]] for taget[name[c]] in starred[name[tmp].columns] begin[:] variable[s] assign[=] call[name[tmp]][name[c]] variable[fv] assign[=] call[name[s].first_valid_index, parameter[]] variable[fi] assign[=] call[name[s].index.get_loc, parameter[name[fv]]] if compare[name[fi] not_equal[!=] constant[0]] begin[:] call[name[tmp].ix][tuple[[<ast.BinOp object at 0x7da1b1edef20>, <ast.Name object at 0x7da1b1edee90>]]] assign[=] constant[1.0] variable[crets] assign[=] name[tmp] if name[append] begin[:] variable[toadd] assign[=] call[call[name[crets].index.shift, parameter[constant[1]]]][<ast.UnaryOp object at 0x7da1b1ede380>] variable[crets] assign[=] call[name[pd].concat, parameter[list[[<ast.Name object at 0x7da1b1ede200>, <ast.Call object at 0x7da1b1ede1d0>]]]] variable[ax] assign[=] call[name[crets].plot, parameter[]] call[call[call[call[name[AxesFormat], parameter[]].Y.apply_format, parameter[call[name[new_float_formatter], parameter[]]]].X.label, parameter[constant[]]].apply, parameter[name[ax]]] if name[show_maxdd] begin[:] if call[name[isinstance], parameter[name[rets], name[pd].DataFrame]] begin[:] variable[iterator] assign[=] call[name[rets].iteritems, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1edc1f0>, <ast.Name object at 0x7da1b1edc1c0>]]] in starred[name[iterator]] begin[:] <ast.Tuple object at 0x7da1b1edc100> assign[=] call[name[max_drawdown], parameter[name[col]]] variable[lbl] assign[=] <ast.BoolOp object at 0x7da1b1e78130> if call[name[isinstance], parameter[name[crets], name[pd].DataFrame]] begin[:] variable[amt] assign[=] call[name[crets].ix][tuple[[<ast.Name object at 0x7da1b1e784c0>, <ast.Name object at 0x7da1b1e784f0>]]] variable[bbox_props] assign[=] call[name[dict], parameter[]] <ast.Try object at 0x7da1b1e78850> call[name[ax].text, parameter[name[dt], name[amt], call[call[constant[mdd {0}].format, parameter[name[dtstr]]].strip, parameter[]]]] call[name[plt].tight_layout, parameter[]]
keyword[def] identifier[plot_return_on_dollar] ( identifier[rets] , identifier[title] = literal[string] , identifier[show_maxdd] = literal[int] , identifier[figsize] = keyword[None] , identifier[ax] = keyword[None] , identifier[append] = literal[int] , identifier[label] = keyword[None] ,** identifier[plot_args] ): literal[string] identifier[crets] =( literal[int] + identifier[returns_cumulative] ( identifier[rets] , identifier[expanding] = literal[int] )) keyword[if] identifier[isinstance] ( identifier[crets] , identifier[pd] . identifier[DataFrame] ): identifier[tmp] = identifier[crets] . identifier[copy] () keyword[for] identifier[c] keyword[in] identifier[tmp] . identifier[columns] : identifier[s] = identifier[tmp] [ identifier[c] ] identifier[fv] = identifier[s] . identifier[first_valid_index] () identifier[fi] = identifier[s] . identifier[index] . identifier[get_loc] ( identifier[fv] ) keyword[if] identifier[fi] != literal[int] : identifier[tmp] . identifier[ix] [ identifier[fi] - literal[int] , identifier[c] ]= literal[int] keyword[else] : keyword[if] keyword[not] identifier[s] . identifier[index] . identifier[freq] : identifier[freq] = identifier[guess_freq] ( identifier[s] . identifier[index] ) identifier[s] = identifier[s] . identifier[asfreq] ( identifier[freq] ) identifier[first] = identifier[s] . identifier[index] . identifier[shift] (- literal[int] )[ literal[int] ] identifier[tmp] = identifier[pd] . identifier[concat] ([ identifier[pd] . identifier[DataFrame] ({ identifier[c] :[ literal[int] ]}, identifier[index] =[ identifier[first] ]), identifier[tmp] ]) identifier[crets] = identifier[tmp] keyword[if] identifier[append] : identifier[toadd] = identifier[crets] . identifier[index] . identifier[shift] ( literal[int] )[- literal[int] ] identifier[crets] = identifier[pd] . identifier[concat] ([ identifier[crets] , identifier[pd] . identifier[DataFrame] ( identifier[np] . identifier[nan] , identifier[columns] = identifier[crets] . identifier[columns] , identifier[index] =[ identifier[toadd] ])]) keyword[else] : identifier[fv] = identifier[crets] . identifier[first_valid_index] () identifier[fi] = identifier[crets] . identifier[index] . identifier[get_loc] ( identifier[fv] ) keyword[if] identifier[fi] != literal[int] : identifier[crets] = identifier[crets] . identifier[copy] () identifier[crets] . identifier[iloc] [ identifier[fi] - literal[int] ]= literal[int] keyword[else] : keyword[if] keyword[not] identifier[crets] . identifier[index] . identifier[freq] : identifier[first] = identifier[crets] . identifier[asfreq] ( identifier[guess_freq] ( identifier[crets] . identifier[index] )). identifier[index] . identifier[shift] (- literal[int] )[ literal[int] ] keyword[else] : identifier[first] = identifier[crets] . identifier[index] . identifier[shift] (- literal[int] )[ literal[int] ] identifier[tmp] = identifier[pd] . identifier[Series] ([ literal[int] ], identifier[index] =[ identifier[first] ]) identifier[tmp] = identifier[tmp] . identifier[append] ( identifier[crets] ) identifier[crets] = identifier[tmp] keyword[if] identifier[append] : identifier[toadd] = identifier[pd] . identifier[Series] ( identifier[np] . identifier[nan] , identifier[index] =[ identifier[crets] . identifier[index] . identifier[shift] ( literal[int] )[- literal[int] ]]) identifier[crets] = identifier[crets] . identifier[append] ( identifier[toadd] ) identifier[ax] = identifier[crets] . identifier[plot] ( identifier[figsize] = identifier[figsize] , identifier[title] = identifier[title] , identifier[ax] = identifier[ax] , identifier[label] = identifier[label] ,** identifier[plot_args] ) identifier[AxesFormat] (). identifier[Y] . identifier[apply_format] ( identifier[new_float_formatter] ()). identifier[X] . identifier[label] ( literal[string] ). identifier[apply] ( identifier[ax] ) keyword[if] identifier[show_maxdd] : keyword[if] identifier[isinstance] ( identifier[rets] , identifier[pd] . identifier[DataFrame] ): identifier[iterator] = identifier[rets] . identifier[iteritems] () keyword[else] : identifier[iterator] = identifier[iter] ([( literal[string] , identifier[rets] )]) keyword[for] identifier[c] , identifier[col] keyword[in] identifier[iterator] : identifier[dd] , identifier[dt] = identifier[max_drawdown] ( identifier[col] , identifier[inc_date] = literal[int] ) identifier[lbl] = identifier[c] keyword[and] identifier[c] + literal[string] keyword[or] literal[string] keyword[if] identifier[isinstance] ( identifier[crets] , identifier[pd] . identifier[DataFrame] ): identifier[amt] = identifier[crets] . identifier[ix] [ identifier[dt] , identifier[c] ] keyword[else] : identifier[amt] = identifier[crets] [ identifier[dt] ] identifier[bbox_props] = identifier[dict] ( identifier[boxstyle] = literal[string] , identifier[fc] = literal[string] , identifier[ec] = literal[string] , identifier[alpha] = literal[int] ) keyword[try] : identifier[dtstr] = literal[string] . identifier[format] ( identifier[dt] . identifier[to_period] ()) keyword[except] : identifier[dtstr] = literal[string] . identifier[format] ( identifier[dt] ) identifier[ax] . identifier[text] ( identifier[dt] , identifier[amt] , literal[string] . identifier[format] ( identifier[dtstr] ). identifier[strip] (), identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[size] = literal[int] , identifier[bbox] = identifier[bbox_props] ) identifier[plt] . identifier[tight_layout] ()
def plot_return_on_dollar(rets, title='Return on $1', show_maxdd=0, figsize=None, ax=None, append=0, label=None, **plot_args): """ Show the cumulative return of specified rets and max drawdowns if selected.""" crets = 1.0 + returns_cumulative(rets, expanding=1) if isinstance(crets, pd.DataFrame): tmp = crets.copy() for c in tmp.columns: s = tmp[c] fv = s.first_valid_index() fi = s.index.get_loc(fv) if fi != 0: tmp.ix[fi - 1, c] = 1.0 # depends on [control=['if'], data=['fi']] else: if not s.index.freq: # no frequency set freq = guess_freq(s.index) s = s.asfreq(freq) # depends on [control=['if'], data=[]] first = s.index.shift(-1)[0] tmp = pd.concat([pd.DataFrame({c: [1.0]}, index=[first]), tmp]) # depends on [control=['for'], data=['c']] crets = tmp if append: toadd = crets.index.shift(1)[-1] crets = pd.concat([crets, pd.DataFrame(np.nan, columns=crets.columns, index=[toadd])]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: fv = crets.first_valid_index() fi = crets.index.get_loc(fv) if fi != 0: crets = crets.copy() crets.iloc[fi - 1] = 1.0 # depends on [control=['if'], data=['fi']] else: if not crets.index.freq: first = crets.asfreq(guess_freq(crets.index)).index.shift(-1)[0] # depends on [control=['if'], data=[]] else: first = crets.index.shift(-1)[0] tmp = pd.Series([1.0], index=[first]) tmp = tmp.append(crets) crets = tmp if append: toadd = pd.Series(np.nan, index=[crets.index.shift(1)[-1]]) crets = crets.append(toadd) # depends on [control=['if'], data=[]] ax = crets.plot(figsize=figsize, title=title, ax=ax, label=label, **plot_args) AxesFormat().Y.apply_format(new_float_formatter()).X.label('').apply(ax) #ax.tick_params(labelsize=14) if show_maxdd: # find the max drawdown available by using original rets if isinstance(rets, pd.DataFrame): iterator = rets.iteritems() # depends on [control=['if'], data=[]] else: iterator = iter([('', rets)]) for (c, col) in iterator: (dd, dt) = max_drawdown(col, inc_date=1) lbl = c and c + ' maxdd' or 'maxdd' # get cret to place annotation correctly if isinstance(crets, pd.DataFrame): amt = crets.ix[dt, c] # depends on [control=['if'], data=[]] else: amt = crets[dt] bbox_props = dict(boxstyle='round', fc='w', ec='0.5', alpha=0.7) # sub = lambda c: c and len(c) > 2 and c[:2] or c try: dtstr = '{0}'.format(dt.to_period()) # depends on [control=['try'], data=[]] except: dtstr = '{0}'.format(dt) # depends on [control=['except'], data=[]] ax.text(dt, amt, 'mdd {0}'.format(dtstr).strip(), ha='center', va='center', size=10, bbox=bbox_props) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] plt.tight_layout()
def cli_info(data, title='Info'): ''' Prints an info on CLI with the title. Useful for infos, general errors etc. :param data: :param title: :return: ''' wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' * 4 wrapper.subsequent_indent = wrapper.initial_indent return '{title}:\n\n{text}'.format(title=title, text=wrapper.fill(data))
def function[cli_info, parameter[data, title]]: constant[ Prints an info on CLI with the title. Useful for infos, general errors etc. :param data: :param title: :return: ] variable[wrapper] assign[=] call[name[textwrap].TextWrapper, parameter[]] name[wrapper].initial_indent assign[=] binary_operation[constant[ ] * constant[4]] name[wrapper].subsequent_indent assign[=] name[wrapper].initial_indent return[call[constant[{title}: {text}].format, parameter[]]]
keyword[def] identifier[cli_info] ( identifier[data] , identifier[title] = literal[string] ): literal[string] identifier[wrapper] = identifier[textwrap] . identifier[TextWrapper] () identifier[wrapper] . identifier[initial_indent] = literal[string] * literal[int] identifier[wrapper] . identifier[subsequent_indent] = identifier[wrapper] . identifier[initial_indent] keyword[return] literal[string] . identifier[format] ( identifier[title] = identifier[title] , identifier[text] = identifier[wrapper] . identifier[fill] ( identifier[data] ))
def cli_info(data, title='Info'): """ Prints an info on CLI with the title. Useful for infos, general errors etc. :param data: :param title: :return: """ wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' * 4 wrapper.subsequent_indent = wrapper.initial_indent return '{title}:\n\n{text}'.format(title=title, text=wrapper.fill(data))
def compute_Rk(L,A,n_samples): # TODO: need to inspect more into compute Rk. """ Compute sparse L matrix and neighbors. Returns ------- Rk_tensor : array-like. Length = n each component correspond to the sparse matrix of Lk, which is generated by extracting the kth row of laplacian and removing zeros. nbk : array-like. Length = n each component correspond to the neighbor index of point k, which is used in slicing the gradient, Y or S arrays. """ laplacian_matrix = L.copy() laplacian_matrix.setdiag(0) laplacian_matrix.eliminate_zeros() n = n_samples Rk_tensor = [] nbk = [] row_A,column_A = A.T.nonzero() row,column = laplacian_matrix.nonzero() nnz_val = np.squeeze(np.asarray(laplacian_matrix.T[(row,column)])) sorted_col_args = np.argsort(column) sorted_col_vals = column[sorted_col_args] breaks_row_A = np.diff(row_A).nonzero()[0] breaks_col = np.diff(sorted_col_vals).nonzero()[0] for k in range(n_samples): if k == 0: nbk.append( column_A[:breaks_row_A[k]+1].T ) Rk_tensor.append( nnz_val[np.sort(sorted_col_args[:breaks_col[k]+1])]) elif k == n_samples-1: nbk.append( column_A[breaks_row_A[k-1]+1:].T ) Rk_tensor.append( nnz_val[np.sort(sorted_col_args[breaks_col[k-1]+1:])]) else: nbk.append( column_A[breaks_row_A[k-1]+1:breaks_row_A[k]+1].T ) Rk_tensor.append(nnz_val[np.sort( sorted_col_args[breaks_col[k-1]+1:breaks_col[k]+1])]) return Rk_tensor, nbk
def function[compute_Rk, parameter[L, A, n_samples]]: constant[ Compute sparse L matrix and neighbors. Returns ------- Rk_tensor : array-like. Length = n each component correspond to the sparse matrix of Lk, which is generated by extracting the kth row of laplacian and removing zeros. nbk : array-like. Length = n each component correspond to the neighbor index of point k, which is used in slicing the gradient, Y or S arrays. ] variable[laplacian_matrix] assign[=] call[name[L].copy, parameter[]] call[name[laplacian_matrix].setdiag, parameter[constant[0]]] call[name[laplacian_matrix].eliminate_zeros, parameter[]] variable[n] assign[=] name[n_samples] variable[Rk_tensor] assign[=] list[[]] variable[nbk] assign[=] list[[]] <ast.Tuple object at 0x7da1b26afc70> assign[=] call[name[A].T.nonzero, parameter[]] <ast.Tuple object at 0x7da1b26ad3f0> assign[=] call[name[laplacian_matrix].nonzero, parameter[]] variable[nnz_val] assign[=] call[name[np].squeeze, parameter[call[name[np].asarray, parameter[call[name[laplacian_matrix].T][tuple[[<ast.Name object at 0x7da1b26ad720>, <ast.Name object at 0x7da1b26aedd0>]]]]]]] variable[sorted_col_args] assign[=] call[name[np].argsort, parameter[name[column]]] variable[sorted_col_vals] assign[=] call[name[column]][name[sorted_col_args]] variable[breaks_row_A] assign[=] call[call[call[name[np].diff, parameter[name[row_A]]].nonzero, parameter[]]][constant[0]] variable[breaks_col] assign[=] call[call[call[name[np].diff, parameter[name[sorted_col_vals]]].nonzero, parameter[]]][constant[0]] for taget[name[k]] in starred[call[name[range], parameter[name[n_samples]]]] begin[:] if compare[name[k] equal[==] constant[0]] begin[:] call[name[nbk].append, parameter[call[name[column_A]][<ast.Slice object at 0x7da1b1391c30>].T]] call[name[Rk_tensor].append, parameter[call[name[nnz_val]][call[name[np].sort, parameter[call[name[sorted_col_args]][<ast.Slice object at 0x7da1b1390b80>]]]]]] return[tuple[[<ast.Name object at 0x7da1b135bb50>, <ast.Name object at 0x7da1b135be20>]]]
keyword[def] identifier[compute_Rk] ( identifier[L] , identifier[A] , identifier[n_samples] ): literal[string] identifier[laplacian_matrix] = identifier[L] . identifier[copy] () identifier[laplacian_matrix] . identifier[setdiag] ( literal[int] ) identifier[laplacian_matrix] . identifier[eliminate_zeros] () identifier[n] = identifier[n_samples] identifier[Rk_tensor] =[] identifier[nbk] =[] identifier[row_A] , identifier[column_A] = identifier[A] . identifier[T] . identifier[nonzero] () identifier[row] , identifier[column] = identifier[laplacian_matrix] . identifier[nonzero] () identifier[nnz_val] = identifier[np] . identifier[squeeze] ( identifier[np] . identifier[asarray] ( identifier[laplacian_matrix] . identifier[T] [( identifier[row] , identifier[column] )])) identifier[sorted_col_args] = identifier[np] . identifier[argsort] ( identifier[column] ) identifier[sorted_col_vals] = identifier[column] [ identifier[sorted_col_args] ] identifier[breaks_row_A] = identifier[np] . identifier[diff] ( identifier[row_A] ). identifier[nonzero] ()[ literal[int] ] identifier[breaks_col] = identifier[np] . identifier[diff] ( identifier[sorted_col_vals] ). identifier[nonzero] ()[ literal[int] ] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[n_samples] ): keyword[if] identifier[k] == literal[int] : identifier[nbk] . identifier[append] ( identifier[column_A] [: identifier[breaks_row_A] [ identifier[k] ]+ literal[int] ]. identifier[T] ) identifier[Rk_tensor] . identifier[append] ( identifier[nnz_val] [ identifier[np] . identifier[sort] ( identifier[sorted_col_args] [: identifier[breaks_col] [ identifier[k] ]+ literal[int] ])]) keyword[elif] identifier[k] == identifier[n_samples] - literal[int] : identifier[nbk] . identifier[append] ( identifier[column_A] [ identifier[breaks_row_A] [ identifier[k] - literal[int] ]+ literal[int] :]. identifier[T] ) identifier[Rk_tensor] . identifier[append] ( identifier[nnz_val] [ identifier[np] . identifier[sort] ( identifier[sorted_col_args] [ identifier[breaks_col] [ identifier[k] - literal[int] ]+ literal[int] :])]) keyword[else] : identifier[nbk] . identifier[append] ( identifier[column_A] [ identifier[breaks_row_A] [ identifier[k] - literal[int] ]+ literal[int] : identifier[breaks_row_A] [ identifier[k] ]+ literal[int] ]. identifier[T] ) identifier[Rk_tensor] . identifier[append] ( identifier[nnz_val] [ identifier[np] . identifier[sort] ( identifier[sorted_col_args] [ identifier[breaks_col] [ identifier[k] - literal[int] ]+ literal[int] : identifier[breaks_col] [ identifier[k] ]+ literal[int] ])]) keyword[return] identifier[Rk_tensor] , identifier[nbk]
def compute_Rk(L, A, n_samples): # TODO: need to inspect more into compute Rk. '\n Compute sparse L matrix and neighbors.\n\n Returns\n -------\n Rk_tensor : array-like. Length = n\n each component correspond to the sparse matrix of Lk, which is\n generated by extracting the kth row of laplacian and removing zeros.\n nbk : array-like. Length = n\n each component correspond to the neighbor index of point k, which is\n used in slicing the gradient, Y or S arrays.\n ' laplacian_matrix = L.copy() laplacian_matrix.setdiag(0) laplacian_matrix.eliminate_zeros() n = n_samples Rk_tensor = [] nbk = [] (row_A, column_A) = A.T.nonzero() (row, column) = laplacian_matrix.nonzero() nnz_val = np.squeeze(np.asarray(laplacian_matrix.T[row, column])) sorted_col_args = np.argsort(column) sorted_col_vals = column[sorted_col_args] breaks_row_A = np.diff(row_A).nonzero()[0] breaks_col = np.diff(sorted_col_vals).nonzero()[0] for k in range(n_samples): if k == 0: nbk.append(column_A[:breaks_row_A[k] + 1].T) Rk_tensor.append(nnz_val[np.sort(sorted_col_args[:breaks_col[k] + 1])]) # depends on [control=['if'], data=['k']] elif k == n_samples - 1: nbk.append(column_A[breaks_row_A[k - 1] + 1:].T) Rk_tensor.append(nnz_val[np.sort(sorted_col_args[breaks_col[k - 1] + 1:])]) # depends on [control=['if'], data=['k']] else: nbk.append(column_A[breaks_row_A[k - 1] + 1:breaks_row_A[k] + 1].T) Rk_tensor.append(nnz_val[np.sort(sorted_col_args[breaks_col[k - 1] + 1:breaks_col[k] + 1])]) # depends on [control=['for'], data=['k']] return (Rk_tensor, nbk)
def is_domain_class_collection_attribute(ent, attr_name): """ Checks if the given attribute name is a aggregate attribute of the given registered resource. """ attr = get_domain_class_attribute(ent, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION
def function[is_domain_class_collection_attribute, parameter[ent, attr_name]]: constant[ Checks if the given attribute name is a aggregate attribute of the given registered resource. ] variable[attr] assign[=] call[name[get_domain_class_attribute], parameter[name[ent], name[attr_name]]] return[compare[name[attr].kind equal[==] name[RESOURCE_ATTRIBUTE_KINDS].COLLECTION]]
keyword[def] identifier[is_domain_class_collection_attribute] ( identifier[ent] , identifier[attr_name] ): literal[string] identifier[attr] = identifier[get_domain_class_attribute] ( identifier[ent] , identifier[attr_name] ) keyword[return] identifier[attr] . identifier[kind] == identifier[RESOURCE_ATTRIBUTE_KINDS] . identifier[COLLECTION]
def is_domain_class_collection_attribute(ent, attr_name): """ Checks if the given attribute name is a aggregate attribute of the given registered resource. """ attr = get_domain_class_attribute(ent, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION
def dataset_path_iterator(file_path: str) -> Iterator[str]: """ An iterator returning file_paths in a directory containing CONLL-formatted files. """ logger.info("Reading CONLL sentences from dataset files at: %s", file_path) for root, _, files in list(os.walk(file_path)): for data_file in files: # These are a relic of the dataset pre-processing. Every # file will be duplicated - one file called filename.gold_skel # and one generated from the preprocessing called filename.gold_conll. if not data_file.endswith("gold_conll"): continue yield os.path.join(root, data_file)
def function[dataset_path_iterator, parameter[file_path]]: constant[ An iterator returning file_paths in a directory containing CONLL-formatted files. ] call[name[logger].info, parameter[constant[Reading CONLL sentences from dataset files at: %s], name[file_path]]] for taget[tuple[[<ast.Name object at 0x7da1b1f97160>, <ast.Name object at 0x7da1b1f97af0>, <ast.Name object at 0x7da1b1f97880>]]] in starred[call[name[list], parameter[call[name[os].walk, parameter[name[file_path]]]]]] begin[:] for taget[name[data_file]] in starred[name[files]] begin[:] if <ast.UnaryOp object at 0x7da1b1f956c0> begin[:] continue <ast.Yield object at 0x7da1b1f95870>
keyword[def] identifier[dataset_path_iterator] ( identifier[file_path] : identifier[str] )-> identifier[Iterator] [ identifier[str] ]: literal[string] identifier[logger] . identifier[info] ( literal[string] , identifier[file_path] ) keyword[for] identifier[root] , identifier[_] , identifier[files] keyword[in] identifier[list] ( identifier[os] . identifier[walk] ( identifier[file_path] )): keyword[for] identifier[data_file] keyword[in] identifier[files] : keyword[if] keyword[not] identifier[data_file] . identifier[endswith] ( literal[string] ): keyword[continue] keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[data_file] )
def dataset_path_iterator(file_path: str) -> Iterator[str]: """ An iterator returning file_paths in a directory containing CONLL-formatted files. """ logger.info('Reading CONLL sentences from dataset files at: %s', file_path) for (root, _, files) in list(os.walk(file_path)): for data_file in files: # These are a relic of the dataset pre-processing. Every # file will be duplicated - one file called filename.gold_skel # and one generated from the preprocessing called filename.gold_conll. if not data_file.endswith('gold_conll'): continue # depends on [control=['if'], data=[]] yield os.path.join(root, data_file) # depends on [control=['for'], data=['data_file']] # depends on [control=['for'], data=[]]
def record_to_objects(self, preference=None): """Create objects from files, or merge the files into the objects. """ from ambry.orm.file import File for f in self.list_records(): pref = preference if preference else f.record.preference if pref == File.PREFERENCE.FILE: self._bundle.logger.debug(' Cleaning objects for file {}'.format(f.path)) f.clean_objects() if pref in (File.PREFERENCE.FILE, File.PREFERENCE.MERGE): self._bundle.logger.debug(' rto {}'.format(f.path)) f.record_to_objects()
def function[record_to_objects, parameter[self, preference]]: constant[Create objects from files, or merge the files into the objects. ] from relative_module[ambry.orm.file] import module[File] for taget[name[f]] in starred[call[name[self].list_records, parameter[]]] begin[:] variable[pref] assign[=] <ast.IfExp object at 0x7da20c7961d0> if compare[name[pref] equal[==] name[File].PREFERENCE.FILE] begin[:] call[name[self]._bundle.logger.debug, parameter[call[constant[ Cleaning objects for file {}].format, parameter[name[f].path]]]] call[name[f].clean_objects, parameter[]] if compare[name[pref] in tuple[[<ast.Attribute object at 0x7da18c4cf280>, <ast.Attribute object at 0x7da18c4cfb50>]]] begin[:] call[name[self]._bundle.logger.debug, parameter[call[constant[ rto {}].format, parameter[name[f].path]]]] call[name[f].record_to_objects, parameter[]]
keyword[def] identifier[record_to_objects] ( identifier[self] , identifier[preference] = keyword[None] ): literal[string] keyword[from] identifier[ambry] . identifier[orm] . identifier[file] keyword[import] identifier[File] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[list_records] (): identifier[pref] = identifier[preference] keyword[if] identifier[preference] keyword[else] identifier[f] . identifier[record] . identifier[preference] keyword[if] identifier[pref] == identifier[File] . identifier[PREFERENCE] . identifier[FILE] : identifier[self] . identifier[_bundle] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f] . identifier[path] )) identifier[f] . identifier[clean_objects] () keyword[if] identifier[pref] keyword[in] ( identifier[File] . identifier[PREFERENCE] . identifier[FILE] , identifier[File] . identifier[PREFERENCE] . identifier[MERGE] ): identifier[self] . identifier[_bundle] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f] . identifier[path] )) identifier[f] . identifier[record_to_objects] ()
def record_to_objects(self, preference=None): """Create objects from files, or merge the files into the objects. """ from ambry.orm.file import File for f in self.list_records(): pref = preference if preference else f.record.preference if pref == File.PREFERENCE.FILE: self._bundle.logger.debug(' Cleaning objects for file {}'.format(f.path)) f.clean_objects() # depends on [control=['if'], data=[]] if pref in (File.PREFERENCE.FILE, File.PREFERENCE.MERGE): self._bundle.logger.debug(' rto {}'.format(f.path)) f.record_to_objects() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
def strip_escape(string='', encoding="utf-8"): # pylint: disable=redefined-outer-name """ Strip escape characters from string. :param string: string to work on :param encoding: string name of the encoding used. :return: stripped string """ matches = [] try: if hasattr(string, "decode"): string = string.decode(encoding) except Exception: # pylint: disable=broad-except # Tried to decode something that is not decodeable in the specified encoding. Let's just # move on. pass try: for match in ansi_eng.finditer(string): matches.append(match) except TypeError as error: raise TypeError("Unable to strip escape characters from data {}: {}".format( string, error)) matches.reverse() for match in matches: start = match.start() end = match.end() string = string[0:start] + string[end:] return string
def function[strip_escape, parameter[string, encoding]]: constant[ Strip escape characters from string. :param string: string to work on :param encoding: string name of the encoding used. :return: stripped string ] variable[matches] assign[=] list[[]] <ast.Try object at 0x7da1b0c355d0> <ast.Try object at 0x7da1b0c375e0> call[name[matches].reverse, parameter[]] for taget[name[match]] in starred[name[matches]] begin[:] variable[start] assign[=] call[name[match].start, parameter[]] variable[end] assign[=] call[name[match].end, parameter[]] variable[string] assign[=] binary_operation[call[name[string]][<ast.Slice object at 0x7da1b0d0cca0>] + call[name[string]][<ast.Slice object at 0x7da1b0d0c5e0>]] return[name[string]]
keyword[def] identifier[strip_escape] ( identifier[string] = literal[string] , identifier[encoding] = literal[string] ): literal[string] identifier[matches] =[] keyword[try] : keyword[if] identifier[hasattr] ( identifier[string] , literal[string] ): identifier[string] = identifier[string] . identifier[decode] ( identifier[encoding] ) keyword[except] identifier[Exception] : keyword[pass] keyword[try] : keyword[for] identifier[match] keyword[in] identifier[ansi_eng] . identifier[finditer] ( identifier[string] ): identifier[matches] . identifier[append] ( identifier[match] ) keyword[except] identifier[TypeError] keyword[as] identifier[error] : keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[string] , identifier[error] )) identifier[matches] . identifier[reverse] () keyword[for] identifier[match] keyword[in] identifier[matches] : identifier[start] = identifier[match] . identifier[start] () identifier[end] = identifier[match] . identifier[end] () identifier[string] = identifier[string] [ literal[int] : identifier[start] ]+ identifier[string] [ identifier[end] :] keyword[return] identifier[string]
def strip_escape(string='', encoding='utf-8'): # pylint: disable=redefined-outer-name '\n Strip escape characters from string.\n\n :param string: string to work on\n :param encoding: string name of the encoding used.\n :return: stripped string\n ' matches = [] try: if hasattr(string, 'decode'): string = string.decode(encoding) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception: # pylint: disable=broad-except # Tried to decode something that is not decodeable in the specified encoding. Let's just # move on. pass # depends on [control=['except'], data=[]] try: for match in ansi_eng.finditer(string): matches.append(match) # depends on [control=['for'], data=['match']] # depends on [control=['try'], data=[]] except TypeError as error: raise TypeError('Unable to strip escape characters from data {}: {}'.format(string, error)) # depends on [control=['except'], data=['error']] matches.reverse() for match in matches: start = match.start() end = match.end() string = string[0:start] + string[end:] # depends on [control=['for'], data=['match']] return string
def read_namespaced_service_status(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_service_status # noqa: E501 read status of the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_service_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Service If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_service_status_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_service_status_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
def function[read_namespaced_service_status, parameter[self, name, namespace]]: constant[read_namespaced_service_status # noqa: E501 read status of the specified Service # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_service_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Service (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Service If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].read_namespaced_service_status_with_http_info, parameter[name[name], name[namespace]]]]
keyword[def] identifier[read_namespaced_service_status] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[read_namespaced_service_status_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[read_namespaced_service_status_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] ) keyword[return] identifier[data]
def read_namespaced_service_status(self, name, namespace, **kwargs): # noqa: E501 "read_namespaced_service_status # noqa: E501\n\n read status of the specified Service # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.read_namespaced_service_status(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Service (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :return: V1Service\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_service_status_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.read_namespaced_service_status_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
def _get_folder(gi, folder_name, library, libitems): """Retrieve or create a folder inside the library with the specified name. """ for item in libitems: if item["type"] == "folder" and item["name"] == "/%s" % folder_name: return item return gi.libraries.create_folder(library.id, folder_name)[0]
def function[_get_folder, parameter[gi, folder_name, library, libitems]]: constant[Retrieve or create a folder inside the library with the specified name. ] for taget[name[item]] in starred[name[libitems]] begin[:] if <ast.BoolOp object at 0x7da1b1849bd0> begin[:] return[name[item]] return[call[call[name[gi].libraries.create_folder, parameter[name[library].id, name[folder_name]]]][constant[0]]]
keyword[def] identifier[_get_folder] ( identifier[gi] , identifier[folder_name] , identifier[library] , identifier[libitems] ): literal[string] keyword[for] identifier[item] keyword[in] identifier[libitems] : keyword[if] identifier[item] [ literal[string] ]== literal[string] keyword[and] identifier[item] [ literal[string] ]== literal[string] % identifier[folder_name] : keyword[return] identifier[item] keyword[return] identifier[gi] . identifier[libraries] . identifier[create_folder] ( identifier[library] . identifier[id] , identifier[folder_name] )[ literal[int] ]
def _get_folder(gi, folder_name, library, libitems): """Retrieve or create a folder inside the library with the specified name. """ for item in libitems: if item['type'] == 'folder' and item['name'] == '/%s' % folder_name: return item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] return gi.libraries.create_folder(library.id, folder_name)[0]
def _build(self, inputs): """Dynamic unroll across input objects. Args: inputs: tensor (batch x num_objects x feature). Objects to sort. Returns: Tensor (batch x num_objects); logits indicating the reference objects. """ batch_size = inputs.get_shape()[0] output_sequence, _ = tf.nn.dynamic_rnn( cell=self._core, inputs=inputs, time_major=False, initial_state=self._core.initial_state( batch_size, trainable=False) ) outputs = snt.BatchFlatten()(output_sequence[:, -1, :]) outputs = self._final_mlp(outputs) logits = snt.Linear(self._target_size)(outputs) return logits
def function[_build, parameter[self, inputs]]: constant[Dynamic unroll across input objects. Args: inputs: tensor (batch x num_objects x feature). Objects to sort. Returns: Tensor (batch x num_objects); logits indicating the reference objects. ] variable[batch_size] assign[=] call[call[name[inputs].get_shape, parameter[]]][constant[0]] <ast.Tuple object at 0x7da1b1cc82e0> assign[=] call[name[tf].nn.dynamic_rnn, parameter[]] variable[outputs] assign[=] call[call[name[snt].BatchFlatten, parameter[]], parameter[call[name[output_sequence]][tuple[[<ast.Slice object at 0x7da1b1cc89a0>, <ast.UnaryOp object at 0x7da1b1cc89d0>, <ast.Slice object at 0x7da1b1cc8a30>]]]]] variable[outputs] assign[=] call[name[self]._final_mlp, parameter[name[outputs]]] variable[logits] assign[=] call[call[name[snt].Linear, parameter[name[self]._target_size]], parameter[name[outputs]]] return[name[logits]]
keyword[def] identifier[_build] ( identifier[self] , identifier[inputs] ): literal[string] identifier[batch_size] = identifier[inputs] . identifier[get_shape] ()[ literal[int] ] identifier[output_sequence] , identifier[_] = identifier[tf] . identifier[nn] . identifier[dynamic_rnn] ( identifier[cell] = identifier[self] . identifier[_core] , identifier[inputs] = identifier[inputs] , identifier[time_major] = keyword[False] , identifier[initial_state] = identifier[self] . identifier[_core] . identifier[initial_state] ( identifier[batch_size] , identifier[trainable] = keyword[False] ) ) identifier[outputs] = identifier[snt] . identifier[BatchFlatten] ()( identifier[output_sequence] [:,- literal[int] ,:]) identifier[outputs] = identifier[self] . identifier[_final_mlp] ( identifier[outputs] ) identifier[logits] = identifier[snt] . identifier[Linear] ( identifier[self] . identifier[_target_size] )( identifier[outputs] ) keyword[return] identifier[logits]
def _build(self, inputs): """Dynamic unroll across input objects. Args: inputs: tensor (batch x num_objects x feature). Objects to sort. Returns: Tensor (batch x num_objects); logits indicating the reference objects. """ batch_size = inputs.get_shape()[0] (output_sequence, _) = tf.nn.dynamic_rnn(cell=self._core, inputs=inputs, time_major=False, initial_state=self._core.initial_state(batch_size, trainable=False)) outputs = snt.BatchFlatten()(output_sequence[:, -1, :]) outputs = self._final_mlp(outputs) logits = snt.Linear(self._target_size)(outputs) return logits
def _serialize(cls, key, value, fields): """ Marshal outgoing data into Taskwarrior's JSON format.""" converter = cls._get_converter_for_field(key, None, fields) return converter.serialize(value)
def function[_serialize, parameter[cls, key, value, fields]]: constant[ Marshal outgoing data into Taskwarrior's JSON format.] variable[converter] assign[=] call[name[cls]._get_converter_for_field, parameter[name[key], constant[None], name[fields]]] return[call[name[converter].serialize, parameter[name[value]]]]
keyword[def] identifier[_serialize] ( identifier[cls] , identifier[key] , identifier[value] , identifier[fields] ): literal[string] identifier[converter] = identifier[cls] . identifier[_get_converter_for_field] ( identifier[key] , keyword[None] , identifier[fields] ) keyword[return] identifier[converter] . identifier[serialize] ( identifier[value] )
def _serialize(cls, key, value, fields): """ Marshal outgoing data into Taskwarrior's JSON format.""" converter = cls._get_converter_for_field(key, None, fields) return converter.serialize(value)
def _get_search_page( self, query, page, per_page=1000, mentions=3, data=False, ): """ Retrieve one page of search results from the DocumentCloud API. """ if mentions > 10: raise ValueError("You cannot search for more than 10 mentions") params = { 'q': query, 'page': page, 'per_page': per_page, 'mentions': mentions, } if data: params['data'] = 'true' response = self.fetch('search.json', params) return response.get("documents")
def function[_get_search_page, parameter[self, query, page, per_page, mentions, data]]: constant[ Retrieve one page of search results from the DocumentCloud API. ] if compare[name[mentions] greater[>] constant[10]] begin[:] <ast.Raise object at 0x7da1b0cff250> variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0cff730>, <ast.Constant object at 0x7da1b0cff760>, <ast.Constant object at 0x7da1b0cff2b0>, <ast.Constant object at 0x7da1b0cffd30>], [<ast.Name object at 0x7da1b0cff700>, <ast.Name object at 0x7da1b0cff880>, <ast.Name object at 0x7da1b0cff3d0>, <ast.Name object at 0x7da1b0cff9d0>]] if name[data] begin[:] call[name[params]][constant[data]] assign[=] constant[true] variable[response] assign[=] call[name[self].fetch, parameter[constant[search.json], name[params]]] return[call[name[response].get, parameter[constant[documents]]]]
keyword[def] identifier[_get_search_page] ( identifier[self] , identifier[query] , identifier[page] , identifier[per_page] = literal[int] , identifier[mentions] = literal[int] , identifier[data] = keyword[False] , ): literal[string] keyword[if] identifier[mentions] > literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[params] ={ literal[string] : identifier[query] , literal[string] : identifier[page] , literal[string] : identifier[per_page] , literal[string] : identifier[mentions] , } keyword[if] identifier[data] : identifier[params] [ literal[string] ]= literal[string] identifier[response] = identifier[self] . identifier[fetch] ( literal[string] , identifier[params] ) keyword[return] identifier[response] . identifier[get] ( literal[string] )
def _get_search_page(self, query, page, per_page=1000, mentions=3, data=False): """ Retrieve one page of search results from the DocumentCloud API. """ if mentions > 10: raise ValueError('You cannot search for more than 10 mentions') # depends on [control=['if'], data=[]] params = {'q': query, 'page': page, 'per_page': per_page, 'mentions': mentions} if data: params['data'] = 'true' # depends on [control=['if'], data=[]] response = self.fetch('search.json', params) return response.get('documents')
def cycle_dist(x, y, perimeter): """Find Distance between x, y by means of a n-length cycle. :param x: :param y: :param perimeter: Example: >>> cycle_dist(1, 23, 24) = 2 >>> cycle_dist(5, 13, 24) = 8 >>> cycle_dist(0.0, 2.4, 1.0) = 0.4 >>> cycle_dist(0.0, 2.6, 1.0) = 0.4 **中文文档** 假设坐标轴是一个环, 计算两点之间在环上的最短距离。 """ dist = abs(x - y) % perimeter if dist > 0.5 * perimeter: dist = perimeter - dist return dist
def function[cycle_dist, parameter[x, y, perimeter]]: constant[Find Distance between x, y by means of a n-length cycle. :param x: :param y: :param perimeter: Example: >>> cycle_dist(1, 23, 24) = 2 >>> cycle_dist(5, 13, 24) = 8 >>> cycle_dist(0.0, 2.4, 1.0) = 0.4 >>> cycle_dist(0.0, 2.6, 1.0) = 0.4 **中文文档** 假设坐标轴是一个环, 计算两点之间在环上的最短距离。 ] variable[dist] assign[=] binary_operation[call[name[abs], parameter[binary_operation[name[x] - name[y]]]] <ast.Mod object at 0x7da2590d6920> name[perimeter]] if compare[name[dist] greater[>] binary_operation[constant[0.5] * name[perimeter]]] begin[:] variable[dist] assign[=] binary_operation[name[perimeter] - name[dist]] return[name[dist]]
keyword[def] identifier[cycle_dist] ( identifier[x] , identifier[y] , identifier[perimeter] ): literal[string] identifier[dist] = identifier[abs] ( identifier[x] - identifier[y] )% identifier[perimeter] keyword[if] identifier[dist] > literal[int] * identifier[perimeter] : identifier[dist] = identifier[perimeter] - identifier[dist] keyword[return] identifier[dist]
def cycle_dist(x, y, perimeter): """Find Distance between x, y by means of a n-length cycle. :param x: :param y: :param perimeter: Example: >>> cycle_dist(1, 23, 24) = 2 >>> cycle_dist(5, 13, 24) = 8 >>> cycle_dist(0.0, 2.4, 1.0) = 0.4 >>> cycle_dist(0.0, 2.6, 1.0) = 0.4 **中文文档** 假设坐标轴是一个环, 计算两点之间在环上的最短距离。 """ dist = abs(x - y) % perimeter if dist > 0.5 * perimeter: dist = perimeter - dist # depends on [control=['if'], data=['dist']] return dist
def is_condition_met(self, hand, win_tile, melds, is_tsumo): """ Three closed pon sets, the other sets need not to be closed :param hand: list of hand's sets :param win_tile: 136 tiles format :param melds: list Meld objects :param is_tsumo: :return: true|false """ win_tile //= 4 open_sets = [x.tiles_34 for x in melds if x.opened] chi_sets = [x for x in hand if (is_chi(x) and win_tile in x and x not in open_sets)] pon_sets = [x for x in hand if is_pon(x)] closed_pon_sets = [] for item in pon_sets: if item in open_sets: continue # if we do the ron on syanpon wait our pon will be consider as open # and it is not 789999 set if win_tile in item and not is_tsumo and not len(chi_sets): continue closed_pon_sets.append(item) return len(closed_pon_sets) == 3
def function[is_condition_met, parameter[self, hand, win_tile, melds, is_tsumo]]: constant[ Three closed pon sets, the other sets need not to be closed :param hand: list of hand's sets :param win_tile: 136 tiles format :param melds: list Meld objects :param is_tsumo: :return: true|false ] <ast.AugAssign object at 0x7da1b07e38e0> variable[open_sets] assign[=] <ast.ListComp object at 0x7da1b07e3c40> variable[chi_sets] assign[=] <ast.ListComp object at 0x7da1b07e20e0> variable[pon_sets] assign[=] <ast.ListComp object at 0x7da1b07e35e0> variable[closed_pon_sets] assign[=] list[[]] for taget[name[item]] in starred[name[pon_sets]] begin[:] if compare[name[item] in name[open_sets]] begin[:] continue if <ast.BoolOp object at 0x7da1b07e1ab0> begin[:] continue call[name[closed_pon_sets].append, parameter[name[item]]] return[compare[call[name[len], parameter[name[closed_pon_sets]]] equal[==] constant[3]]]
keyword[def] identifier[is_condition_met] ( identifier[self] , identifier[hand] , identifier[win_tile] , identifier[melds] , identifier[is_tsumo] ): literal[string] identifier[win_tile] //= literal[int] identifier[open_sets] =[ identifier[x] . identifier[tiles_34] keyword[for] identifier[x] keyword[in] identifier[melds] keyword[if] identifier[x] . identifier[opened] ] identifier[chi_sets] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[hand] keyword[if] ( identifier[is_chi] ( identifier[x] ) keyword[and] identifier[win_tile] keyword[in] identifier[x] keyword[and] identifier[x] keyword[not] keyword[in] identifier[open_sets] )] identifier[pon_sets] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[hand] keyword[if] identifier[is_pon] ( identifier[x] )] identifier[closed_pon_sets] =[] keyword[for] identifier[item] keyword[in] identifier[pon_sets] : keyword[if] identifier[item] keyword[in] identifier[open_sets] : keyword[continue] keyword[if] identifier[win_tile] keyword[in] identifier[item] keyword[and] keyword[not] identifier[is_tsumo] keyword[and] keyword[not] identifier[len] ( identifier[chi_sets] ): keyword[continue] identifier[closed_pon_sets] . identifier[append] ( identifier[item] ) keyword[return] identifier[len] ( identifier[closed_pon_sets] )== literal[int]
def is_condition_met(self, hand, win_tile, melds, is_tsumo): """ Three closed pon sets, the other sets need not to be closed :param hand: list of hand's sets :param win_tile: 136 tiles format :param melds: list Meld objects :param is_tsumo: :return: true|false """ win_tile //= 4 open_sets = [x.tiles_34 for x in melds if x.opened] chi_sets = [x for x in hand if is_chi(x) and win_tile in x and (x not in open_sets)] pon_sets = [x for x in hand if is_pon(x)] closed_pon_sets = [] for item in pon_sets: if item in open_sets: continue # depends on [control=['if'], data=[]] # if we do the ron on syanpon wait our pon will be consider as open # and it is not 789999 set if win_tile in item and (not is_tsumo) and (not len(chi_sets)): continue # depends on [control=['if'], data=[]] closed_pon_sets.append(item) # depends on [control=['for'], data=['item']] return len(closed_pon_sets) == 3
def _outer_distance_mod_n(ref, est, modulus=12): """Compute the absolute outer distance modulo n. Using this distance, d(11, 0) = 1 (modulo 12) Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values. est : np.ndarray, shape=(m,) Array of estimated values. modulus : int The modulus. 12 by default for octave equivalence. Returns ------- outer_distance : np.ndarray, shape=(n, m) The outer circular distance modulo n. """ ref_mod_n = np.mod(ref, modulus) est_mod_n = np.mod(est, modulus) abs_diff = np.abs(np.subtract.outer(ref_mod_n, est_mod_n)) return np.minimum(abs_diff, modulus - abs_diff)
def function[_outer_distance_mod_n, parameter[ref, est, modulus]]: constant[Compute the absolute outer distance modulo n. Using this distance, d(11, 0) = 1 (modulo 12) Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values. est : np.ndarray, shape=(m,) Array of estimated values. modulus : int The modulus. 12 by default for octave equivalence. Returns ------- outer_distance : np.ndarray, shape=(n, m) The outer circular distance modulo n. ] variable[ref_mod_n] assign[=] call[name[np].mod, parameter[name[ref], name[modulus]]] variable[est_mod_n] assign[=] call[name[np].mod, parameter[name[est], name[modulus]]] variable[abs_diff] assign[=] call[name[np].abs, parameter[call[name[np].subtract.outer, parameter[name[ref_mod_n], name[est_mod_n]]]]] return[call[name[np].minimum, parameter[name[abs_diff], binary_operation[name[modulus] - name[abs_diff]]]]]
keyword[def] identifier[_outer_distance_mod_n] ( identifier[ref] , identifier[est] , identifier[modulus] = literal[int] ): literal[string] identifier[ref_mod_n] = identifier[np] . identifier[mod] ( identifier[ref] , identifier[modulus] ) identifier[est_mod_n] = identifier[np] . identifier[mod] ( identifier[est] , identifier[modulus] ) identifier[abs_diff] = identifier[np] . identifier[abs] ( identifier[np] . identifier[subtract] . identifier[outer] ( identifier[ref_mod_n] , identifier[est_mod_n] )) keyword[return] identifier[np] . identifier[minimum] ( identifier[abs_diff] , identifier[modulus] - identifier[abs_diff] )
def _outer_distance_mod_n(ref, est, modulus=12): """Compute the absolute outer distance modulo n. Using this distance, d(11, 0) = 1 (modulo 12) Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values. est : np.ndarray, shape=(m,) Array of estimated values. modulus : int The modulus. 12 by default for octave equivalence. Returns ------- outer_distance : np.ndarray, shape=(n, m) The outer circular distance modulo n. """ ref_mod_n = np.mod(ref, modulus) est_mod_n = np.mod(est, modulus) abs_diff = np.abs(np.subtract.outer(ref_mod_n, est_mod_n)) return np.minimum(abs_diff, modulus - abs_diff)
def org_task(task_key, lock_timeout=None): """ Decorator to create an org task. :param task_key: the task key used for state storage and locking, e.g. 'do-stuff' :param lock_timeout: the lock timeout in seconds """ def _org_task(task_func): def _decorator(org_id): org = apps.get_model("orgs", "Org").objects.get(pk=org_id) maybe_run_for_org(org, task_func, task_key, lock_timeout) return shared_task(wraps(task_func)(_decorator)) return _org_task
def function[org_task, parameter[task_key, lock_timeout]]: constant[ Decorator to create an org task. :param task_key: the task key used for state storage and locking, e.g. 'do-stuff' :param lock_timeout: the lock timeout in seconds ] def function[_org_task, parameter[task_func]]: def function[_decorator, parameter[org_id]]: variable[org] assign[=] call[call[name[apps].get_model, parameter[constant[orgs], constant[Org]]].objects.get, parameter[]] call[name[maybe_run_for_org], parameter[name[org], name[task_func], name[task_key], name[lock_timeout]]] return[call[name[shared_task], parameter[call[call[name[wraps], parameter[name[task_func]]], parameter[name[_decorator]]]]]] return[name[_org_task]]
keyword[def] identifier[org_task] ( identifier[task_key] , identifier[lock_timeout] = keyword[None] ): literal[string] keyword[def] identifier[_org_task] ( identifier[task_func] ): keyword[def] identifier[_decorator] ( identifier[org_id] ): identifier[org] = identifier[apps] . identifier[get_model] ( literal[string] , literal[string] ). identifier[objects] . identifier[get] ( identifier[pk] = identifier[org_id] ) identifier[maybe_run_for_org] ( identifier[org] , identifier[task_func] , identifier[task_key] , identifier[lock_timeout] ) keyword[return] identifier[shared_task] ( identifier[wraps] ( identifier[task_func] )( identifier[_decorator] )) keyword[return] identifier[_org_task]
def org_task(task_key, lock_timeout=None): """ Decorator to create an org task. :param task_key: the task key used for state storage and locking, e.g. 'do-stuff' :param lock_timeout: the lock timeout in seconds """ def _org_task(task_func): def _decorator(org_id): org = apps.get_model('orgs', 'Org').objects.get(pk=org_id) maybe_run_for_org(org, task_func, task_key, lock_timeout) return shared_task(wraps(task_func)(_decorator)) return _org_task
def help_string(): """Generate help string with contents of registry.""" help_str = """ Registry contents: ------------------ Models: %s HParams: %s RangedHParams: %s Problems: %s Optimizers: %s Attacks: %s Attack HParams: %s Pruning HParams: %s Pruning Strategies: %s Env Problems: %s """ lists = tuple( display_list_by_prefix(entries, starting_spaces=4) for entries in [ # pylint: disable=g-complex-comprehension list_models(), list_hparams(), list_ranged_hparams(), list_base_problems(), list_optimizers(), list_attacks(), list_attack_params(), list_pruning_params(), list_pruning_strategies(), list_env_problems(), ]) return help_str % lists
def function[help_string, parameter[]]: constant[Generate help string with contents of registry.] variable[help_str] assign[=] constant[ Registry contents: ------------------ Models: %s HParams: %s RangedHParams: %s Problems: %s Optimizers: %s Attacks: %s Attack HParams: %s Pruning HParams: %s Pruning Strategies: %s Env Problems: %s ] variable[lists] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20e957040>]] return[binary_operation[name[help_str] <ast.Mod object at 0x7da2590d6920> name[lists]]]
keyword[def] identifier[help_string] (): literal[string] identifier[help_str] = literal[string] identifier[lists] = identifier[tuple] ( identifier[display_list_by_prefix] ( identifier[entries] , identifier[starting_spaces] = literal[int] ) keyword[for] identifier[entries] keyword[in] [ identifier[list_models] (), identifier[list_hparams] (), identifier[list_ranged_hparams] (), identifier[list_base_problems] (), identifier[list_optimizers] (), identifier[list_attacks] (), identifier[list_attack_params] (), identifier[list_pruning_params] (), identifier[list_pruning_strategies] (), identifier[list_env_problems] (), ]) keyword[return] identifier[help_str] % identifier[lists]
def help_string(): """Generate help string with contents of registry.""" help_str = '\nRegistry contents:\n------------------\n\n Models:\n%s\n\n HParams:\n%s\n\n RangedHParams:\n%s\n\n Problems:\n%s\n\n Optimizers:\n%s\n\n Attacks:\n%s\n\n Attack HParams:\n%s\n\n Pruning HParams:\n%s\n\n Pruning Strategies:\n%s\n\n Env Problems:\n%s\n' # pylint: disable=g-complex-comprehension lists = tuple((display_list_by_prefix(entries, starting_spaces=4) for entries in [list_models(), list_hparams(), list_ranged_hparams(), list_base_problems(), list_optimizers(), list_attacks(), list_attack_params(), list_pruning_params(), list_pruning_strategies(), list_env_problems()])) return help_str % lists
def build_keyjar(key_conf, kid_template="", keyjar=None, owner=''): """ Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to an existing KeyJar based on a key specification. An example of such a specification:: keys = [ {"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]}, {"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"}, {"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"} ] Keys in this specification are: type The type of key. Presently only 'rsa' and 'ec' supported. key A name of a file where a key can be found. Only works with PEM encoded RSA keys use What the key should be used for crv The elliptic curve that should be used. Only applies to elliptic curve keys :-) kid Key ID, can only be used with one usage type is specified. If there are more the one usage type specified 'kid' will just be ignored. :param key_conf: The key configuration :param kid_template: A template by which to build the key IDs. If no kid_template is given then the built-in function add_kid() will be used. :param keyjar: If an KeyJar instance the new keys are added to this key jar. :param owner: The default owner of the keys in the key jar. :return: A KeyJar instance """ if keyjar is None: keyjar = KeyJar() tot_kb = build_key_bundle(key_conf, kid_template) keyjar.add_kb(owner, tot_kb) return keyjar
def function[build_keyjar, parameter[key_conf, kid_template, keyjar, owner]]: constant[ Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to an existing KeyJar based on a key specification. An example of such a specification:: keys = [ {"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]}, {"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"}, {"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"} ] Keys in this specification are: type The type of key. Presently only 'rsa' and 'ec' supported. key A name of a file where a key can be found. Only works with PEM encoded RSA keys use What the key should be used for crv The elliptic curve that should be used. Only applies to elliptic curve keys :-) kid Key ID, can only be used with one usage type is specified. If there are more the one usage type specified 'kid' will just be ignored. :param key_conf: The key configuration :param kid_template: A template by which to build the key IDs. If no kid_template is given then the built-in function add_kid() will be used. :param keyjar: If an KeyJar instance the new keys are added to this key jar. :param owner: The default owner of the keys in the key jar. :return: A KeyJar instance ] if compare[name[keyjar] is constant[None]] begin[:] variable[keyjar] assign[=] call[name[KeyJar], parameter[]] variable[tot_kb] assign[=] call[name[build_key_bundle], parameter[name[key_conf], name[kid_template]]] call[name[keyjar].add_kb, parameter[name[owner], name[tot_kb]]] return[name[keyjar]]
keyword[def] identifier[build_keyjar] ( identifier[key_conf] , identifier[kid_template] = literal[string] , identifier[keyjar] = keyword[None] , identifier[owner] = literal[string] ): literal[string] keyword[if] identifier[keyjar] keyword[is] keyword[None] : identifier[keyjar] = identifier[KeyJar] () identifier[tot_kb] = identifier[build_key_bundle] ( identifier[key_conf] , identifier[kid_template] ) identifier[keyjar] . identifier[add_kb] ( identifier[owner] , identifier[tot_kb] ) keyword[return] identifier[keyjar]
def build_keyjar(key_conf, kid_template='', keyjar=None, owner=''): """ Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to an existing KeyJar based on a key specification. An example of such a specification:: keys = [ {"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]}, {"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"}, {"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"} ] Keys in this specification are: type The type of key. Presently only 'rsa' and 'ec' supported. key A name of a file where a key can be found. Only works with PEM encoded RSA keys use What the key should be used for crv The elliptic curve that should be used. Only applies to elliptic curve keys :-) kid Key ID, can only be used with one usage type is specified. If there are more the one usage type specified 'kid' will just be ignored. :param key_conf: The key configuration :param kid_template: A template by which to build the key IDs. If no kid_template is given then the built-in function add_kid() will be used. :param keyjar: If an KeyJar instance the new keys are added to this key jar. :param owner: The default owner of the keys in the key jar. :return: A KeyJar instance """ if keyjar is None: keyjar = KeyJar() # depends on [control=['if'], data=['keyjar']] tot_kb = build_key_bundle(key_conf, kid_template) keyjar.add_kb(owner, tot_kb) return keyjar
def flake(self, message): """ Get message like <filename>:<lineno>: <msg> """ err_range = { 'start': {'line': message.lineno - 1, 'character': message.col}, 'end': {'line': message.lineno - 1, 'character': len(self.lines[message.lineno - 1])}, } severity = lsp.DiagnosticSeverity.Warning for message_type in PYFLAKES_ERROR_MESSAGES: if isinstance(message, message_type): severity = lsp.DiagnosticSeverity.Error break self.diagnostics.append({ 'source': 'pyflakes', 'range': err_range, 'message': message.message % message.message_args, 'severity': severity })
def function[flake, parameter[self, message]]: constant[ Get message like <filename>:<lineno>: <msg> ] variable[err_range] assign[=] dictionary[[<ast.Constant object at 0x7da18c4ccfa0>, <ast.Constant object at 0x7da18c4cdc00>], [<ast.Dict object at 0x7da18c4cd9f0>, <ast.Dict object at 0x7da18c4cc910>]] variable[severity] assign[=] name[lsp].DiagnosticSeverity.Warning for taget[name[message_type]] in starred[name[PYFLAKES_ERROR_MESSAGES]] begin[:] if call[name[isinstance], parameter[name[message], name[message_type]]] begin[:] variable[severity] assign[=] name[lsp].DiagnosticSeverity.Error break call[name[self].diagnostics.append, parameter[dictionary[[<ast.Constant object at 0x7da2054a5ea0>, <ast.Constant object at 0x7da2054a53c0>, <ast.Constant object at 0x7da2054a7490>, <ast.Constant object at 0x7da2054a6560>], [<ast.Constant object at 0x7da2054a6c80>, <ast.Name object at 0x7da2054a4250>, <ast.BinOp object at 0x7da2054a55d0>, <ast.Name object at 0x7da2054a5a20>]]]]
keyword[def] identifier[flake] ( identifier[self] , identifier[message] ): literal[string] identifier[err_range] ={ literal[string] :{ literal[string] : identifier[message] . identifier[lineno] - literal[int] , literal[string] : identifier[message] . identifier[col] }, literal[string] :{ literal[string] : identifier[message] . identifier[lineno] - literal[int] , literal[string] : identifier[len] ( identifier[self] . identifier[lines] [ identifier[message] . identifier[lineno] - literal[int] ])}, } identifier[severity] = identifier[lsp] . identifier[DiagnosticSeverity] . identifier[Warning] keyword[for] identifier[message_type] keyword[in] identifier[PYFLAKES_ERROR_MESSAGES] : keyword[if] identifier[isinstance] ( identifier[message] , identifier[message_type] ): identifier[severity] = identifier[lsp] . identifier[DiagnosticSeverity] . identifier[Error] keyword[break] identifier[self] . identifier[diagnostics] . identifier[append] ({ literal[string] : literal[string] , literal[string] : identifier[err_range] , literal[string] : identifier[message] . identifier[message] % identifier[message] . identifier[message_args] , literal[string] : identifier[severity] })
def flake(self, message): """ Get message like <filename>:<lineno>: <msg> """ err_range = {'start': {'line': message.lineno - 1, 'character': message.col}, 'end': {'line': message.lineno - 1, 'character': len(self.lines[message.lineno - 1])}} severity = lsp.DiagnosticSeverity.Warning for message_type in PYFLAKES_ERROR_MESSAGES: if isinstance(message, message_type): severity = lsp.DiagnosticSeverity.Error break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['message_type']] self.diagnostics.append({'source': 'pyflakes', 'range': err_range, 'message': message.message % message.message_args, 'severity': severity})
def is_published(self): """Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False """ field773 = record_get_field_instances(self.record, '773') for f773 in field773: if 'c' in field_get_subfields(f773): return True return False
def function[is_published, parameter[self]]: constant[Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False ] variable[field773] assign[=] call[name[record_get_field_instances], parameter[name[self].record, constant[773]]] for taget[name[f773]] in starred[name[field773]] begin[:] if compare[constant[c] in call[name[field_get_subfields], parameter[name[f773]]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[is_published] ( identifier[self] ): literal[string] identifier[field773] = identifier[record_get_field_instances] ( identifier[self] . identifier[record] , literal[string] ) keyword[for] identifier[f773] keyword[in] identifier[field773] : keyword[if] literal[string] keyword[in] identifier[field_get_subfields] ( identifier[f773] ): keyword[return] keyword[True] keyword[return] keyword[False]
def is_published(self): """Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False """ field773 = record_get_field_instances(self.record, '773') for f773 in field773: if 'c' in field_get_subfields(f773): return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f773']] return False
def keywords(self): """ Get the plot keywords for a specific movie id. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('keywords') response = self._GET(path) self._set_attrs_to_values(response) return response
def function[keywords, parameter[self]]: constant[ Get the plot keywords for a specific movie id. Returns: A dict representation of the JSON returned from the API. ] variable[path] assign[=] call[name[self]._get_id_path, parameter[constant[keywords]]] variable[response] assign[=] call[name[self]._GET, parameter[name[path]]] call[name[self]._set_attrs_to_values, parameter[name[response]]] return[name[response]]
keyword[def] identifier[keywords] ( identifier[self] ): literal[string] identifier[path] = identifier[self] . identifier[_get_id_path] ( literal[string] ) identifier[response] = identifier[self] . identifier[_GET] ( identifier[path] ) identifier[self] . identifier[_set_attrs_to_values] ( identifier[response] ) keyword[return] identifier[response]
def keywords(self): """ Get the plot keywords for a specific movie id. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('keywords') response = self._GET(path) self._set_attrs_to_values(response) return response
def restart(self, offset: int): '''Send restart command. Coroutine. ''' yield from self._control_stream.write_command(Command('REST', str(offset))) reply = yield from self._control_stream.read_reply() self.raise_if_not_match('Restart', ReplyCodes.requested_file_action_pending_further_information, reply)
def function[restart, parameter[self, offset]]: constant[Send restart command. Coroutine. ] <ast.YieldFrom object at 0x7da2054a49a0> variable[reply] assign[=] <ast.YieldFrom object at 0x7da20e962860> call[name[self].raise_if_not_match, parameter[constant[Restart], name[ReplyCodes].requested_file_action_pending_further_information, name[reply]]]
keyword[def] identifier[restart] ( identifier[self] , identifier[offset] : identifier[int] ): literal[string] keyword[yield] keyword[from] identifier[self] . identifier[_control_stream] . identifier[write_command] ( identifier[Command] ( literal[string] , identifier[str] ( identifier[offset] ))) identifier[reply] = keyword[yield] keyword[from] identifier[self] . identifier[_control_stream] . identifier[read_reply] () identifier[self] . identifier[raise_if_not_match] ( literal[string] , identifier[ReplyCodes] . identifier[requested_file_action_pending_further_information] , identifier[reply] )
def restart(self, offset: int): """Send restart command. Coroutine. """ yield from self._control_stream.write_command(Command('REST', str(offset))) reply = (yield from self._control_stream.read_reply()) self.raise_if_not_match('Restart', ReplyCodes.requested_file_action_pending_further_information, reply)
def get_subdomains_owned_by_address(address, db_path=None, zonefiles_dir=None): """ Static method for getting the list of subdomains for a given address """ opts = get_blockstack_opts() if not is_subdomains_enabled(opts): return [] if db_path is None: db_path = opts['subdomaindb_path'] if zonefiles_dir is None: zonefiles_dir = opts['zonefiles'] db = SubdomainDB(db_path, zonefiles_dir) return db.get_subdomains_owned_by_address(address)
def function[get_subdomains_owned_by_address, parameter[address, db_path, zonefiles_dir]]: constant[ Static method for getting the list of subdomains for a given address ] variable[opts] assign[=] call[name[get_blockstack_opts], parameter[]] if <ast.UnaryOp object at 0x7da20e963ac0> begin[:] return[list[[]]] if compare[name[db_path] is constant[None]] begin[:] variable[db_path] assign[=] call[name[opts]][constant[subdomaindb_path]] if compare[name[zonefiles_dir] is constant[None]] begin[:] variable[zonefiles_dir] assign[=] call[name[opts]][constant[zonefiles]] variable[db] assign[=] call[name[SubdomainDB], parameter[name[db_path], name[zonefiles_dir]]] return[call[name[db].get_subdomains_owned_by_address, parameter[name[address]]]]
keyword[def] identifier[get_subdomains_owned_by_address] ( identifier[address] , identifier[db_path] = keyword[None] , identifier[zonefiles_dir] = keyword[None] ): literal[string] identifier[opts] = identifier[get_blockstack_opts] () keyword[if] keyword[not] identifier[is_subdomains_enabled] ( identifier[opts] ): keyword[return] [] keyword[if] identifier[db_path] keyword[is] keyword[None] : identifier[db_path] = identifier[opts] [ literal[string] ] keyword[if] identifier[zonefiles_dir] keyword[is] keyword[None] : identifier[zonefiles_dir] = identifier[opts] [ literal[string] ] identifier[db] = identifier[SubdomainDB] ( identifier[db_path] , identifier[zonefiles_dir] ) keyword[return] identifier[db] . identifier[get_subdomains_owned_by_address] ( identifier[address] )
def get_subdomains_owned_by_address(address, db_path=None, zonefiles_dir=None): """ Static method for getting the list of subdomains for a given address """ opts = get_blockstack_opts() if not is_subdomains_enabled(opts): return [] # depends on [control=['if'], data=[]] if db_path is None: db_path = opts['subdomaindb_path'] # depends on [control=['if'], data=['db_path']] if zonefiles_dir is None: zonefiles_dir = opts['zonefiles'] # depends on [control=['if'], data=['zonefiles_dir']] db = SubdomainDB(db_path, zonefiles_dir) return db.get_subdomains_owned_by_address(address)
def log_request(handler): """ Logging request is opposite to response, sometime its necessary, feel free to enable it. """ block = 'Request Infomations:\n' + _format_headers_log(handler.request.headers) if handler.request.arguments: block += '+----Arguments----+\n' for k, v in handler.request.arguments.items(): block += '| {0:<15} | {1:<15} \n'.format(repr(k), repr(v)) app_log.info(block)
def function[log_request, parameter[handler]]: constant[ Logging request is opposite to response, sometime its necessary, feel free to enable it. ] variable[block] assign[=] binary_operation[constant[Request Infomations: ] + call[name[_format_headers_log], parameter[name[handler].request.headers]]] if name[handler].request.arguments begin[:] <ast.AugAssign object at 0x7da1b22560e0> for taget[tuple[[<ast.Name object at 0x7da1b2255090>, <ast.Name object at 0x7da1b2254370>]]] in starred[call[name[handler].request.arguments.items, parameter[]]] begin[:] <ast.AugAssign object at 0x7da1b2257850> call[name[app_log].info, parameter[name[block]]]
keyword[def] identifier[log_request] ( identifier[handler] ): literal[string] identifier[block] = literal[string] + identifier[_format_headers_log] ( identifier[handler] . identifier[request] . identifier[headers] ) keyword[if] identifier[handler] . identifier[request] . identifier[arguments] : identifier[block] += literal[string] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[handler] . identifier[request] . identifier[arguments] . identifier[items] (): identifier[block] += literal[string] . identifier[format] ( identifier[repr] ( identifier[k] ), identifier[repr] ( identifier[v] )) identifier[app_log] . identifier[info] ( identifier[block] )
def log_request(handler): """ Logging request is opposite to response, sometime its necessary, feel free to enable it. """ block = 'Request Infomations:\n' + _format_headers_log(handler.request.headers) if handler.request.arguments: block += '+----Arguments----+\n' for (k, v) in handler.request.arguments.items(): block += '| {0:<15} | {1:<15} \n'.format(repr(k), repr(v)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] app_log.info(block)
def _read_mol(self): """-V3000""" self.system = dict() if self.file_content[2] != '\n': self.system['remarks'] = self.file_content[2] file_body = [i.split() for i in self.file_content] elements = [] coordinates = [] atom_data = False for line in file_body: if len(line) > 2: if line[2] == 'END' and line[3] == 'ATOM': atom_data = False if atom_data is True: elements.append(line[3]) coordinates.append(line[4:7]) if line[2] == 'BEGIN' and line[3] == 'ATOM': atom_data = True self.system['elements'] = np.array(elements) self.system['coordinates'] = np.array(coordinates, dtype=float) return self.system
def function[_read_mol, parameter[self]]: constant[-V3000] name[self].system assign[=] call[name[dict], parameter[]] if compare[call[name[self].file_content][constant[2]] not_equal[!=] constant[ ]] begin[:] call[name[self].system][constant[remarks]] assign[=] call[name[self].file_content][constant[2]] variable[file_body] assign[=] <ast.ListComp object at 0x7da18c4cf4f0> variable[elements] assign[=] list[[]] variable[coordinates] assign[=] list[[]] variable[atom_data] assign[=] constant[False] for taget[name[line]] in starred[name[file_body]] begin[:] if compare[call[name[len], parameter[name[line]]] greater[>] constant[2]] begin[:] if <ast.BoolOp object at 0x7da18dc98c40> begin[:] variable[atom_data] assign[=] constant[False] if compare[name[atom_data] is constant[True]] begin[:] call[name[elements].append, parameter[call[name[line]][constant[3]]]] call[name[coordinates].append, parameter[call[name[line]][<ast.Slice object at 0x7da18dc99b40>]]] if <ast.BoolOp object at 0x7da18dc9a7d0> begin[:] variable[atom_data] assign[=] constant[True] call[name[self].system][constant[elements]] assign[=] call[name[np].array, parameter[name[elements]]] call[name[self].system][constant[coordinates]] assign[=] call[name[np].array, parameter[name[coordinates]]] return[name[self].system]
keyword[def] identifier[_read_mol] ( identifier[self] ): literal[string] identifier[self] . identifier[system] = identifier[dict] () keyword[if] identifier[self] . identifier[file_content] [ literal[int] ]!= literal[string] : identifier[self] . identifier[system] [ literal[string] ]= identifier[self] . identifier[file_content] [ literal[int] ] identifier[file_body] =[ identifier[i] . identifier[split] () keyword[for] identifier[i] keyword[in] identifier[self] . identifier[file_content] ] identifier[elements] =[] identifier[coordinates] =[] identifier[atom_data] = keyword[False] keyword[for] identifier[line] keyword[in] identifier[file_body] : keyword[if] identifier[len] ( identifier[line] )> literal[int] : keyword[if] identifier[line] [ literal[int] ]== literal[string] keyword[and] identifier[line] [ literal[int] ]== literal[string] : identifier[atom_data] = keyword[False] keyword[if] identifier[atom_data] keyword[is] keyword[True] : identifier[elements] . identifier[append] ( identifier[line] [ literal[int] ]) identifier[coordinates] . identifier[append] ( identifier[line] [ literal[int] : literal[int] ]) keyword[if] identifier[line] [ literal[int] ]== literal[string] keyword[and] identifier[line] [ literal[int] ]== literal[string] : identifier[atom_data] = keyword[True] identifier[self] . identifier[system] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[elements] ) identifier[self] . identifier[system] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[coordinates] , identifier[dtype] = identifier[float] ) keyword[return] identifier[self] . identifier[system]
def _read_mol(self): """-V3000""" self.system = dict() if self.file_content[2] != '\n': self.system['remarks'] = self.file_content[2] # depends on [control=['if'], data=[]] file_body = [i.split() for i in self.file_content] elements = [] coordinates = [] atom_data = False for line in file_body: if len(line) > 2: if line[2] == 'END' and line[3] == 'ATOM': atom_data = False # depends on [control=['if'], data=[]] if atom_data is True: elements.append(line[3]) coordinates.append(line[4:7]) # depends on [control=['if'], data=[]] if line[2] == 'BEGIN' and line[3] == 'ATOM': atom_data = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] self.system['elements'] = np.array(elements) self.system['coordinates'] = np.array(coordinates, dtype=float) return self.system
def _hide_tick_lines_and_labels(axis): """ Set visible property of ticklines and ticklabels of an axis to False """ for item in axis.get_ticklines() + axis.get_ticklabels(): item.set_visible(False)
def function[_hide_tick_lines_and_labels, parameter[axis]]: constant[ Set visible property of ticklines and ticklabels of an axis to False ] for taget[name[item]] in starred[binary_operation[call[name[axis].get_ticklines, parameter[]] + call[name[axis].get_ticklabels, parameter[]]]] begin[:] call[name[item].set_visible, parameter[constant[False]]]
keyword[def] identifier[_hide_tick_lines_and_labels] ( identifier[axis] ): literal[string] keyword[for] identifier[item] keyword[in] identifier[axis] . identifier[get_ticklines] ()+ identifier[axis] . identifier[get_ticklabels] (): identifier[item] . identifier[set_visible] ( keyword[False] )
def _hide_tick_lines_and_labels(axis): """ Set visible property of ticklines and ticklabels of an axis to False """ for item in axis.get_ticklines() + axis.get_ticklabels(): item.set_visible(False) # depends on [control=['for'], data=['item']]
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] """Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments """ arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
def function[args_to_dict, parameter[args]]: constant[Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments ] variable[arguments] assign[=] call[name[dict], parameter[]] for taget[name[arg]] in starred[call[name[args].split, parameter[constant[,]]]] begin[:] <ast.Tuple object at 0x7da1b10227a0> assign[=] call[name[arg].split, parameter[constant[=]]] call[name[arguments]][name[key]] assign[=] name[value] return[name[arguments]]
keyword[def] identifier[args_to_dict] ( identifier[args] ): literal[string] identifier[arguments] = identifier[dict] () keyword[for] identifier[arg] keyword[in] identifier[args] . identifier[split] ( literal[string] ): identifier[key] , identifier[value] = identifier[arg] . identifier[split] ( literal[string] ) identifier[arguments] [ identifier[key] ]= identifier[value] keyword[return] identifier[arguments]
def args_to_dict(args): # type: (str) -> DictUpperBound[str,str] 'Convert command line arguments in a comma separated string to a dictionary\n\n Args:\n args (str): Command line arguments\n\n Returns:\n DictUpperBound[str,str]: Dictionary of arguments\n\n ' arguments = dict() for arg in args.split(','): (key, value) = arg.split('=') arguments[key] = value # depends on [control=['for'], data=['arg']] return arguments
def load_children(self, f): """Load the children of this section from a file-like object""" while True: line = self.readline(f) if line[0] == '&': if line[1:].startswith("END"): check_name = line[4:].strip().upper() if check_name != self.__name: raise FileFormatError("CP2KSection end mismatch, pos=%s", f.tell()) break else: section = CP2KSection() section.load(f, line) self.append(section) else: keyword = CP2KKeyword() keyword.load(line) self.append(keyword)
def function[load_children, parameter[self, f]]: constant[Load the children of this section from a file-like object] while constant[True] begin[:] variable[line] assign[=] call[name[self].readline, parameter[name[f]]] if compare[call[name[line]][constant[0]] equal[==] constant[&]] begin[:] if call[call[name[line]][<ast.Slice object at 0x7da20c7c8550>].startswith, parameter[constant[END]]] begin[:] variable[check_name] assign[=] call[call[call[name[line]][<ast.Slice object at 0x7da20c7c9930>].strip, parameter[]].upper, parameter[]] if compare[name[check_name] not_equal[!=] name[self].__name] begin[:] <ast.Raise object at 0x7da20c7ca620> break
keyword[def] identifier[load_children] ( identifier[self] , identifier[f] ): literal[string] keyword[while] keyword[True] : identifier[line] = identifier[self] . identifier[readline] ( identifier[f] ) keyword[if] identifier[line] [ literal[int] ]== literal[string] : keyword[if] identifier[line] [ literal[int] :]. identifier[startswith] ( literal[string] ): identifier[check_name] = identifier[line] [ literal[int] :]. identifier[strip] (). identifier[upper] () keyword[if] identifier[check_name] != identifier[self] . identifier[__name] : keyword[raise] identifier[FileFormatError] ( literal[string] , identifier[f] . identifier[tell] ()) keyword[break] keyword[else] : identifier[section] = identifier[CP2KSection] () identifier[section] . identifier[load] ( identifier[f] , identifier[line] ) identifier[self] . identifier[append] ( identifier[section] ) keyword[else] : identifier[keyword] = identifier[CP2KKeyword] () identifier[keyword] . identifier[load] ( identifier[line] ) identifier[self] . identifier[append] ( identifier[keyword] )
def load_children(self, f): """Load the children of this section from a file-like object""" while True: line = self.readline(f) if line[0] == '&': if line[1:].startswith('END'): check_name = line[4:].strip().upper() if check_name != self.__name: raise FileFormatError('CP2KSection end mismatch, pos=%s', f.tell()) # depends on [control=['if'], data=[]] break # depends on [control=['if'], data=[]] else: section = CP2KSection() section.load(f, line) self.append(section) # depends on [control=['if'], data=[]] else: keyword = CP2KKeyword() keyword.load(line) self.append(keyword) # depends on [control=['while'], data=[]]
def _get_module_via_sys_modules(self, fullname): """ Attempt to fetch source code via sys.modules. This is specifically to support __main__, but it may catch a few more cases. """ module = sys.modules.get(fullname) LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) if not isinstance(module, types.ModuleType): LOG.debug('sys.modules[%r] absent or not a regular module', fullname) return path = self._py_filename(getattr(module, '__file__', '')) if not path: return is_pkg = hasattr(module, '__path__') try: source = inspect.getsource(module) except IOError: # Work around inspect.getsourcelines() bug for 0-byte __init__.py # files. if not is_pkg: raise source = '\n' if isinstance(source, mitogen.core.UnicodeType): # get_source() returns "string" according to PEP-302, which was # reinterpreted for Python 3 to mean a Unicode string. source = source.encode('utf-8') return path, source, is_pkg
def function[_get_module_via_sys_modules, parameter[self, fullname]]: constant[ Attempt to fetch source code via sys.modules. This is specifically to support __main__, but it may catch a few more cases. ] variable[module] assign[=] call[name[sys].modules.get, parameter[name[fullname]]] call[name[LOG].debug, parameter[constant[_get_module_via_sys_modules(%r) -> %r], name[fullname], name[module]]] if <ast.UnaryOp object at 0x7da1b1d89030> begin[:] call[name[LOG].debug, parameter[constant[sys.modules[%r] absent or not a regular module], name[fullname]]] return[None] variable[path] assign[=] call[name[self]._py_filename, parameter[call[name[getattr], parameter[name[module], constant[__file__], constant[]]]]] if <ast.UnaryOp object at 0x7da1b1d88a00> begin[:] return[None] variable[is_pkg] assign[=] call[name[hasattr], parameter[name[module], constant[__path__]]] <ast.Try object at 0x7da1b1d89e10> if call[name[isinstance], parameter[name[source], name[mitogen].core.UnicodeType]] begin[:] variable[source] assign[=] call[name[source].encode, parameter[constant[utf-8]]] return[tuple[[<ast.Name object at 0x7da1b1d0ccd0>, <ast.Name object at 0x7da1b1d0c0d0>, <ast.Name object at 0x7da1b1d0eb60>]]]
keyword[def] identifier[_get_module_via_sys_modules] ( identifier[self] , identifier[fullname] ): literal[string] identifier[module] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[fullname] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[fullname] , identifier[module] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[module] , identifier[types] . identifier[ModuleType] ): identifier[LOG] . identifier[debug] ( literal[string] , identifier[fullname] ) keyword[return] identifier[path] = identifier[self] . identifier[_py_filename] ( identifier[getattr] ( identifier[module] , literal[string] , literal[string] )) keyword[if] keyword[not] identifier[path] : keyword[return] identifier[is_pkg] = identifier[hasattr] ( identifier[module] , literal[string] ) keyword[try] : identifier[source] = identifier[inspect] . identifier[getsource] ( identifier[module] ) keyword[except] identifier[IOError] : keyword[if] keyword[not] identifier[is_pkg] : keyword[raise] identifier[source] = literal[string] keyword[if] identifier[isinstance] ( identifier[source] , identifier[mitogen] . identifier[core] . identifier[UnicodeType] ): identifier[source] = identifier[source] . identifier[encode] ( literal[string] ) keyword[return] identifier[path] , identifier[source] , identifier[is_pkg]
def _get_module_via_sys_modules(self, fullname): """ Attempt to fetch source code via sys.modules. This is specifically to support __main__, but it may catch a few more cases. """ module = sys.modules.get(fullname) LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) if not isinstance(module, types.ModuleType): LOG.debug('sys.modules[%r] absent or not a regular module', fullname) return # depends on [control=['if'], data=[]] path = self._py_filename(getattr(module, '__file__', '')) if not path: return # depends on [control=['if'], data=[]] is_pkg = hasattr(module, '__path__') try: source = inspect.getsource(module) # depends on [control=['try'], data=[]] except IOError: # Work around inspect.getsourcelines() bug for 0-byte __init__.py # files. if not is_pkg: raise # depends on [control=['if'], data=[]] source = '\n' # depends on [control=['except'], data=[]] if isinstance(source, mitogen.core.UnicodeType): # get_source() returns "string" according to PEP-302, which was # reinterpreted for Python 3 to mean a Unicode string. source = source.encode('utf-8') # depends on [control=['if'], data=[]] return (path, source, is_pkg)
def conditional_expected_average_profit(self, frequency=None, monetary_value=None): """ Conditional expectation of the average profit. This method computes the conditional expectation of the average profit per transaction for a group of one or more customers. Parameters ---------- frequency: array_like, optional a vector containing the customers' frequencies. Defaults to the whole set of frequencies used for fitting the model. monetary_value: array_like, optional a vector containing the customers' monetary values. Defaults to the whole set of monetary values used for fitting the model. Returns ------- array_like: The conditional expectation of the average profit per transaction """ if monetary_value is None: monetary_value = self.data["monetary_value"] if frequency is None: frequency = self.data["frequency"] p, q, v = self._unload_params("p", "q", "v") # The expected average profit is a weighted average of individual # monetary value and the population mean. individual_weight = p * frequency / (p * frequency + q - 1) population_mean = v * p / (q - 1) return (1 - individual_weight) * population_mean + individual_weight * monetary_value
def function[conditional_expected_average_profit, parameter[self, frequency, monetary_value]]: constant[ Conditional expectation of the average profit. This method computes the conditional expectation of the average profit per transaction for a group of one or more customers. Parameters ---------- frequency: array_like, optional a vector containing the customers' frequencies. Defaults to the whole set of frequencies used for fitting the model. monetary_value: array_like, optional a vector containing the customers' monetary values. Defaults to the whole set of monetary values used for fitting the model. Returns ------- array_like: The conditional expectation of the average profit per transaction ] if compare[name[monetary_value] is constant[None]] begin[:] variable[monetary_value] assign[=] call[name[self].data][constant[monetary_value]] if compare[name[frequency] is constant[None]] begin[:] variable[frequency] assign[=] call[name[self].data][constant[frequency]] <ast.Tuple object at 0x7da1b1d28e80> assign[=] call[name[self]._unload_params, parameter[constant[p], constant[q], constant[v]]] variable[individual_weight] assign[=] binary_operation[binary_operation[name[p] * name[frequency]] / binary_operation[binary_operation[binary_operation[name[p] * name[frequency]] + name[q]] - constant[1]]] variable[population_mean] assign[=] binary_operation[binary_operation[name[v] * name[p]] / binary_operation[name[q] - constant[1]]] return[binary_operation[binary_operation[binary_operation[constant[1] - name[individual_weight]] * name[population_mean]] + binary_operation[name[individual_weight] * name[monetary_value]]]]
keyword[def] identifier[conditional_expected_average_profit] ( identifier[self] , identifier[frequency] = keyword[None] , identifier[monetary_value] = keyword[None] ): literal[string] keyword[if] identifier[monetary_value] keyword[is] keyword[None] : identifier[monetary_value] = identifier[self] . identifier[data] [ literal[string] ] keyword[if] identifier[frequency] keyword[is] keyword[None] : identifier[frequency] = identifier[self] . identifier[data] [ literal[string] ] identifier[p] , identifier[q] , identifier[v] = identifier[self] . identifier[_unload_params] ( literal[string] , literal[string] , literal[string] ) identifier[individual_weight] = identifier[p] * identifier[frequency] /( identifier[p] * identifier[frequency] + identifier[q] - literal[int] ) identifier[population_mean] = identifier[v] * identifier[p] /( identifier[q] - literal[int] ) keyword[return] ( literal[int] - identifier[individual_weight] )* identifier[population_mean] + identifier[individual_weight] * identifier[monetary_value]
def conditional_expected_average_profit(self, frequency=None, monetary_value=None): """ Conditional expectation of the average profit. This method computes the conditional expectation of the average profit per transaction for a group of one or more customers. Parameters ---------- frequency: array_like, optional a vector containing the customers' frequencies. Defaults to the whole set of frequencies used for fitting the model. monetary_value: array_like, optional a vector containing the customers' monetary values. Defaults to the whole set of monetary values used for fitting the model. Returns ------- array_like: The conditional expectation of the average profit per transaction """ if monetary_value is None: monetary_value = self.data['monetary_value'] # depends on [control=['if'], data=['monetary_value']] if frequency is None: frequency = self.data['frequency'] # depends on [control=['if'], data=['frequency']] (p, q, v) = self._unload_params('p', 'q', 'v') # The expected average profit is a weighted average of individual # monetary value and the population mean. individual_weight = p * frequency / (p * frequency + q - 1) population_mean = v * p / (q - 1) return (1 - individual_weight) * population_mean + individual_weight * monetary_value
def get_bucket_page(page): """ Returns all the keys in a s3 bucket paginator page. """ key_list = page.get('Contents', []) logger.debug("Retrieving page with {} keys".format( len(key_list), )) return dict((k.get('Key'), k) for k in key_list)
def function[get_bucket_page, parameter[page]]: constant[ Returns all the keys in a s3 bucket paginator page. ] variable[key_list] assign[=] call[name[page].get, parameter[constant[Contents], list[[]]]] call[name[logger].debug, parameter[call[constant[Retrieving page with {} keys].format, parameter[call[name[len], parameter[name[key_list]]]]]]] return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da2054a5cc0>]]]
keyword[def] identifier[get_bucket_page] ( identifier[page] ): literal[string] identifier[key_list] = identifier[page] . identifier[get] ( literal[string] ,[]) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[key_list] ), )) keyword[return] identifier[dict] (( identifier[k] . identifier[get] ( literal[string] ), identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[key_list] )
def get_bucket_page(page): """ Returns all the keys in a s3 bucket paginator page. """ key_list = page.get('Contents', []) logger.debug('Retrieving page with {} keys'.format(len(key_list))) return dict(((k.get('Key'), k) for k in key_list))
def get_objects(self, path, marker=None, limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT): """Get objects. Certain upload clients may add a 0-byte object (e.g., ``FOLDER`` object for path ``path/to/FOLDER`` - ``path/to/FOLDER/FOLDER``). We add an extra +1 limit query and ignore any such file objects. """ # Get basename of implied folder. folder = path.split(SEP)[-1] # Query extra objects, then strip 0-byte dummy object if present. objs = super(GsContainer, self).get_objects(path, marker, limit + 1) objs = [o for o in objs if not (o.size == 0 and o.name == folder)] return objs[:limit]
def function[get_objects, parameter[self, path, marker, limit]]: constant[Get objects. Certain upload clients may add a 0-byte object (e.g., ``FOLDER`` object for path ``path/to/FOLDER`` - ``path/to/FOLDER/FOLDER``). We add an extra +1 limit query and ignore any such file objects. ] variable[folder] assign[=] call[call[name[path].split, parameter[name[SEP]]]][<ast.UnaryOp object at 0x7da18bccbc10>] variable[objs] assign[=] call[call[name[super], parameter[name[GsContainer], name[self]]].get_objects, parameter[name[path], name[marker], binary_operation[name[limit] + constant[1]]]] variable[objs] assign[=] <ast.ListComp object at 0x7da18f00df60> return[call[name[objs]][<ast.Slice object at 0x7da18dc077c0>]]
keyword[def] identifier[get_objects] ( identifier[self] , identifier[path] , identifier[marker] = keyword[None] , identifier[limit] = identifier[settings] . identifier[CLOUD_BROWSER_DEFAULT_LIST_LIMIT] ): literal[string] identifier[folder] = identifier[path] . identifier[split] ( identifier[SEP] )[- literal[int] ] identifier[objs] = identifier[super] ( identifier[GsContainer] , identifier[self] ). identifier[get_objects] ( identifier[path] , identifier[marker] , identifier[limit] + literal[int] ) identifier[objs] =[ identifier[o] keyword[for] identifier[o] keyword[in] identifier[objs] keyword[if] keyword[not] ( identifier[o] . identifier[size] == literal[int] keyword[and] identifier[o] . identifier[name] == identifier[folder] )] keyword[return] identifier[objs] [: identifier[limit] ]
def get_objects(self, path, marker=None, limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT): """Get objects. Certain upload clients may add a 0-byte object (e.g., ``FOLDER`` object for path ``path/to/FOLDER`` - ``path/to/FOLDER/FOLDER``). We add an extra +1 limit query and ignore any such file objects. """ # Get basename of implied folder. folder = path.split(SEP)[-1] # Query extra objects, then strip 0-byte dummy object if present. objs = super(GsContainer, self).get_objects(path, marker, limit + 1) objs = [o for o in objs if not (o.size == 0 and o.name == folder)] return objs[:limit]
def forward_ocr(self, img_): """Forward the image through the LSTM network model Parameters ---------- img_: int of array Returns ---------- label_list: string of list """ img_ = cv2.resize(img_, (80, 30)) img_ = img_.transpose(1, 0) print(img_.shape) img_ = img_.reshape((1, 80, 30)) print(img_.shape) # img_ = img_.reshape((80 * 30)) img_ = np.multiply(img_, 1 / 255.0) self.predictor.forward(data=img_, **self.init_state_dict) prob = self.predictor.get_output(0) label_list = [] for p in prob: print(np.argsort(p)) max_index = np.argsort(p)[::-1][0] label_list.append(max_index) return self.__get_string(label_list)
def function[forward_ocr, parameter[self, img_]]: constant[Forward the image through the LSTM network model Parameters ---------- img_: int of array Returns ---------- label_list: string of list ] variable[img_] assign[=] call[name[cv2].resize, parameter[name[img_], tuple[[<ast.Constant object at 0x7da1b2064820>, <ast.Constant object at 0x7da1b2065900>]]]] variable[img_] assign[=] call[name[img_].transpose, parameter[constant[1], constant[0]]] call[name[print], parameter[name[img_].shape]] variable[img_] assign[=] call[name[img_].reshape, parameter[tuple[[<ast.Constant object at 0x7da1b2089240>, <ast.Constant object at 0x7da1b2089720>, <ast.Constant object at 0x7da1b208b1c0>]]]] call[name[print], parameter[name[img_].shape]] variable[img_] assign[=] call[name[np].multiply, parameter[name[img_], binary_operation[constant[1] / constant[255.0]]]] call[name[self].predictor.forward, parameter[]] variable[prob] assign[=] call[name[self].predictor.get_output, parameter[constant[0]]] variable[label_list] assign[=] list[[]] for taget[name[p]] in starred[name[prob]] begin[:] call[name[print], parameter[call[name[np].argsort, parameter[name[p]]]]] variable[max_index] assign[=] call[call[call[name[np].argsort, parameter[name[p]]]][<ast.Slice object at 0x7da1b20ed4b0>]][constant[0]] call[name[label_list].append, parameter[name[max_index]]] return[call[name[self].__get_string, parameter[name[label_list]]]]
keyword[def] identifier[forward_ocr] ( identifier[self] , identifier[img_] ): literal[string] identifier[img_] = identifier[cv2] . identifier[resize] ( identifier[img_] ,( literal[int] , literal[int] )) identifier[img_] = identifier[img_] . identifier[transpose] ( literal[int] , literal[int] ) identifier[print] ( identifier[img_] . identifier[shape] ) identifier[img_] = identifier[img_] . identifier[reshape] (( literal[int] , literal[int] , literal[int] )) identifier[print] ( identifier[img_] . identifier[shape] ) identifier[img_] = identifier[np] . identifier[multiply] ( identifier[img_] , literal[int] / literal[int] ) identifier[self] . identifier[predictor] . identifier[forward] ( identifier[data] = identifier[img_] ,** identifier[self] . identifier[init_state_dict] ) identifier[prob] = identifier[self] . identifier[predictor] . identifier[get_output] ( literal[int] ) identifier[label_list] =[] keyword[for] identifier[p] keyword[in] identifier[prob] : identifier[print] ( identifier[np] . identifier[argsort] ( identifier[p] )) identifier[max_index] = identifier[np] . identifier[argsort] ( identifier[p] )[::- literal[int] ][ literal[int] ] identifier[label_list] . identifier[append] ( identifier[max_index] ) keyword[return] identifier[self] . identifier[__get_string] ( identifier[label_list] )
def forward_ocr(self, img_): """Forward the image through the LSTM network model Parameters ---------- img_: int of array Returns ---------- label_list: string of list """ img_ = cv2.resize(img_, (80, 30)) img_ = img_.transpose(1, 0) print(img_.shape) img_ = img_.reshape((1, 80, 30)) print(img_.shape) # img_ = img_.reshape((80 * 30)) img_ = np.multiply(img_, 1 / 255.0) self.predictor.forward(data=img_, **self.init_state_dict) prob = self.predictor.get_output(0) label_list = [] for p in prob: print(np.argsort(p)) max_index = np.argsort(p)[::-1][0] label_list.append(max_index) # depends on [control=['for'], data=['p']] return self.__get_string(label_list)
def render_field_error(self, obj_id, obj, exception, request): """ Default rendering for items in field where the the usual rendering method raised an exception. """ if obj is None: msg = 'No match for ID={0}'.format(obj_id) else: msg = unicode(exception) return u'<p class="error">{0}</p>'.format(msg)
def function[render_field_error, parameter[self, obj_id, obj, exception, request]]: constant[ Default rendering for items in field where the the usual rendering method raised an exception. ] if compare[name[obj] is constant[None]] begin[:] variable[msg] assign[=] call[constant[No match for ID={0}].format, parameter[name[obj_id]]] return[call[constant[<p class="error">{0}</p>].format, parameter[name[msg]]]]
keyword[def] identifier[render_field_error] ( identifier[self] , identifier[obj_id] , identifier[obj] , identifier[exception] , identifier[request] ): literal[string] keyword[if] identifier[obj] keyword[is] keyword[None] : identifier[msg] = literal[string] . identifier[format] ( identifier[obj_id] ) keyword[else] : identifier[msg] = identifier[unicode] ( identifier[exception] ) keyword[return] literal[string] . identifier[format] ( identifier[msg] )
def render_field_error(self, obj_id, obj, exception, request): """ Default rendering for items in field where the the usual rendering method raised an exception. """ if obj is None: msg = 'No match for ID={0}'.format(obj_id) # depends on [control=['if'], data=[]] else: msg = unicode(exception) return u'<p class="error">{0}</p>'.format(msg)
def sync_to(self): """Wrapper method that synchronizes configuration to DG. Executes the containing object's cm :meth:`~f5.bigip.cm.Cm.exec_cmd` method to sync the configuration TO the device-group. :note:: Both sync_to, and sync_from methods are convenience methods which usually are not what this SDK offers. It is best to execute config-sync with the use of exec_cmd() method on the cm endpoint. """ device_group_collection = self._meta_data['container'] cm = device_group_collection._meta_data['container'] sync_cmd = 'config-sync to-group %s' % self.name cm.exec_cmd('run', utilCmdArgs=sync_cmd)
def function[sync_to, parameter[self]]: constant[Wrapper method that synchronizes configuration to DG. Executes the containing object's cm :meth:`~f5.bigip.cm.Cm.exec_cmd` method to sync the configuration TO the device-group. :note:: Both sync_to, and sync_from methods are convenience methods which usually are not what this SDK offers. It is best to execute config-sync with the use of exec_cmd() method on the cm endpoint. ] variable[device_group_collection] assign[=] call[name[self]._meta_data][constant[container]] variable[cm] assign[=] call[name[device_group_collection]._meta_data][constant[container]] variable[sync_cmd] assign[=] binary_operation[constant[config-sync to-group %s] <ast.Mod object at 0x7da2590d6920> name[self].name] call[name[cm].exec_cmd, parameter[constant[run]]]
keyword[def] identifier[sync_to] ( identifier[self] ): literal[string] identifier[device_group_collection] = identifier[self] . identifier[_meta_data] [ literal[string] ] identifier[cm] = identifier[device_group_collection] . identifier[_meta_data] [ literal[string] ] identifier[sync_cmd] = literal[string] % identifier[self] . identifier[name] identifier[cm] . identifier[exec_cmd] ( literal[string] , identifier[utilCmdArgs] = identifier[sync_cmd] )
def sync_to(self): """Wrapper method that synchronizes configuration to DG. Executes the containing object's cm :meth:`~f5.bigip.cm.Cm.exec_cmd` method to sync the configuration TO the device-group. :note:: Both sync_to, and sync_from methods are convenience methods which usually are not what this SDK offers. It is best to execute config-sync with the use of exec_cmd() method on the cm endpoint. """ device_group_collection = self._meta_data['container'] cm = device_group_collection._meta_data['container'] sync_cmd = 'config-sync to-group %s' % self.name cm.exec_cmd('run', utilCmdArgs=sync_cmd)
def deleteable(self, request): ''' Checks the both, check_deleteable and apply_deleteable, against the owned model and it's instance set ''' return self.apply_deleteable(self.get_queryset(), request) if self.check_deleteable(self.model, request) is not False else self.get_queryset().none()
def function[deleteable, parameter[self, request]]: constant[ Checks the both, check_deleteable and apply_deleteable, against the owned model and it's instance set ] return[<ast.IfExp object at 0x7da20c991fc0>]
keyword[def] identifier[deleteable] ( identifier[self] , identifier[request] ): literal[string] keyword[return] identifier[self] . identifier[apply_deleteable] ( identifier[self] . identifier[get_queryset] (), identifier[request] ) keyword[if] identifier[self] . identifier[check_deleteable] ( identifier[self] . identifier[model] , identifier[request] ) keyword[is] keyword[not] keyword[False] keyword[else] identifier[self] . identifier[get_queryset] (). identifier[none] ()
def deleteable(self, request): """ Checks the both, check_deleteable and apply_deleteable, against the owned model and it's instance set """ return self.apply_deleteable(self.get_queryset(), request) if self.check_deleteable(self.model, request) is not False else self.get_queryset().none()
def summarize_grading(samples, vkey="validate"): """Provide summaries of grading results across all samples. Handles both traditional pipelines (validation part of variants) and CWL pipelines (validation at top level) """ samples = list(utils.flatten(samples)) if not _has_grading_info(samples, vkey): return [[d] for d in samples] validate_dir = utils.safe_makedir(os.path.join(samples[0]["dirs"]["work"], vkey)) header = ["sample", "caller", "variant.type", "category", "value"] _summarize_combined(samples, vkey) validated, out = _group_validate_samples(samples, vkey, (["metadata", "validate_batch"], ["metadata", "batch"], ["description"])) for vname, vitems in validated.items(): out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname) with open(out_csv, "w") as out_handle: writer = csv.writer(out_handle) writer.writerow(header) plot_data = [] plot_files = [] for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x)) or ""): validations = [variant.get(vkey) for variant in data.get("variants", []) if isinstance(variant, dict)] validations = [v for v in validations if v] if len(validations) == 0 and vkey in data: validations = [data.get(vkey)] for validate in validations: if validate: validate["grading_summary"] = out_csv if validate.get("grading"): for row in _get_validate_plotdata_yaml(validate["grading"], data): writer.writerow(row) plot_data.append(row) elif validate.get("summary") and not validate.get("summary") == "None": if isinstance(validate["summary"], (list, tuple)): plot_files.extend(list(set(validate["summary"]))) else: plot_files.append(validate["summary"]) if plot_files: plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv) elif plot_data: plots = validateplot.create(plot_data, header, 0, data["config"], os.path.splitext(out_csv)[0]) else: plots = [] for data in vitems: if data.get(vkey): data[vkey]["grading_plots"] = plots for variant in data.get("variants", []): if isinstance(variant, dict) and variant.get(vkey): variant[vkey]["grading_plots"] = plots out.append([data]) return out
def function[summarize_grading, parameter[samples, vkey]]: constant[Provide summaries of grading results across all samples. Handles both traditional pipelines (validation part of variants) and CWL pipelines (validation at top level) ] variable[samples] assign[=] call[name[list], parameter[call[name[utils].flatten, parameter[name[samples]]]]] if <ast.UnaryOp object at 0x7da1b18cbc70> begin[:] return[<ast.ListComp object at 0x7da1b18cbb50>] variable[validate_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[call[call[name[samples]][constant[0]]][constant[dirs]]][constant[work]], name[vkey]]]]] variable[header] assign[=] list[[<ast.Constant object at 0x7da1b18cb5e0>, <ast.Constant object at 0x7da1b18cb5b0>, <ast.Constant object at 0x7da1b18cb580>, <ast.Constant object at 0x7da1b18cb550>, <ast.Constant object at 0x7da1b18cb520>]] call[name[_summarize_combined], parameter[name[samples], name[vkey]]] <ast.Tuple object at 0x7da1b18cb3a0> assign[=] call[name[_group_validate_samples], parameter[name[samples], name[vkey], tuple[[<ast.List object at 0x7da1b18cb220>, <ast.List object at 0x7da1b18cb190>, <ast.List object at 0x7da1b18cb100>]]]] for taget[tuple[[<ast.Name object at 0x7da1b18cb040>, <ast.Name object at 0x7da1b18cb010>]]] in starred[call[name[validated].items, parameter[]]] begin[:] variable[out_csv] assign[=] call[name[os].path.join, parameter[name[validate_dir], binary_operation[constant[grading-summary-%s.csv] <ast.Mod object at 0x7da2590d6920> name[vname]]]] with call[name[open], parameter[name[out_csv], constant[w]]] begin[:] variable[writer] assign[=] call[name[csv].writer, parameter[name[out_handle]]] call[name[writer].writerow, parameter[name[header]]] variable[plot_data] assign[=] list[[]] variable[plot_files] assign[=] list[[]] for taget[name[data]] in starred[call[name[sorted], parameter[name[vitems]]]] begin[:] variable[validations] assign[=] <ast.ListComp object at 0x7da1b18ca470> variable[validations] assign[=] <ast.ListComp object at 0x7da1b18ca0e0> if <ast.BoolOp object at 0x7da1b18c9f60> begin[:] variable[validations] assign[=] list[[<ast.Call object at 0x7da1b19b9a50>]] for taget[name[validate]] in starred[name[validations]] begin[:] if name[validate] begin[:] call[name[validate]][constant[grading_summary]] assign[=] name[out_csv] if call[name[validate].get, parameter[constant[grading]]] begin[:] for taget[name[row]] in starred[call[name[_get_validate_plotdata_yaml], parameter[call[name[validate]][constant[grading]], name[data]]]] begin[:] call[name[writer].writerow, parameter[name[row]]] call[name[plot_data].append, parameter[name[row]]] if name[plot_files] begin[:] variable[plots] assign[=] call[name[validateplot].classifyplot_from_plotfiles, parameter[name[plot_files], name[out_csv]]] for taget[name[data]] in starred[name[vitems]] begin[:] if call[name[data].get, parameter[name[vkey]]] begin[:] call[call[name[data]][name[vkey]]][constant[grading_plots]] assign[=] name[plots] for taget[name[variant]] in starred[call[name[data].get, parameter[constant[variants], list[[]]]]] begin[:] if <ast.BoolOp object at 0x7da1b18c9990> begin[:] call[call[name[variant]][name[vkey]]][constant[grading_plots]] assign[=] name[plots] call[name[out].append, parameter[list[[<ast.Name object at 0x7da1b18c95a0>]]]] return[name[out]]
keyword[def] identifier[summarize_grading] ( identifier[samples] , identifier[vkey] = literal[string] ): literal[string] identifier[samples] = identifier[list] ( identifier[utils] . identifier[flatten] ( identifier[samples] )) keyword[if] keyword[not] identifier[_has_grading_info] ( identifier[samples] , identifier[vkey] ): keyword[return] [[ identifier[d] ] keyword[for] identifier[d] keyword[in] identifier[samples] ] identifier[validate_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[samples] [ literal[int] ][ literal[string] ][ literal[string] ], identifier[vkey] )) identifier[header] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[_summarize_combined] ( identifier[samples] , identifier[vkey] ) identifier[validated] , identifier[out] = identifier[_group_validate_samples] ( identifier[samples] , identifier[vkey] , ([ literal[string] , literal[string] ],[ literal[string] , literal[string] ],[ literal[string] ])) keyword[for] identifier[vname] , identifier[vitems] keyword[in] identifier[validated] . identifier[items] (): identifier[out_csv] = identifier[os] . identifier[path] . identifier[join] ( identifier[validate_dir] , literal[string] % identifier[vname] ) keyword[with] identifier[open] ( identifier[out_csv] , literal[string] ) keyword[as] identifier[out_handle] : identifier[writer] = identifier[csv] . identifier[writer] ( identifier[out_handle] ) identifier[writer] . identifier[writerow] ( identifier[header] ) identifier[plot_data] =[] identifier[plot_files] =[] keyword[for] identifier[data] keyword[in] identifier[sorted] ( identifier[vitems] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[get] ( literal[string] , identifier[dd] . identifier[get_sample_name] ( identifier[x] )) keyword[or] literal[string] ): identifier[validations] =[ identifier[variant] . identifier[get] ( identifier[vkey] ) keyword[for] identifier[variant] keyword[in] identifier[data] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[isinstance] ( identifier[variant] , identifier[dict] )] identifier[validations] =[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[validations] keyword[if] identifier[v] ] keyword[if] identifier[len] ( identifier[validations] )== literal[int] keyword[and] identifier[vkey] keyword[in] identifier[data] : identifier[validations] =[ identifier[data] . identifier[get] ( identifier[vkey] )] keyword[for] identifier[validate] keyword[in] identifier[validations] : keyword[if] identifier[validate] : identifier[validate] [ literal[string] ]= identifier[out_csv] keyword[if] identifier[validate] . identifier[get] ( literal[string] ): keyword[for] identifier[row] keyword[in] identifier[_get_validate_plotdata_yaml] ( identifier[validate] [ literal[string] ], identifier[data] ): identifier[writer] . identifier[writerow] ( identifier[row] ) identifier[plot_data] . identifier[append] ( identifier[row] ) keyword[elif] identifier[validate] . identifier[get] ( literal[string] ) keyword[and] keyword[not] identifier[validate] . identifier[get] ( literal[string] )== literal[string] : keyword[if] identifier[isinstance] ( identifier[validate] [ literal[string] ],( identifier[list] , identifier[tuple] )): identifier[plot_files] . identifier[extend] ( identifier[list] ( identifier[set] ( identifier[validate] [ literal[string] ]))) keyword[else] : identifier[plot_files] . identifier[append] ( identifier[validate] [ literal[string] ]) keyword[if] identifier[plot_files] : identifier[plots] = identifier[validateplot] . identifier[classifyplot_from_plotfiles] ( identifier[plot_files] , identifier[out_csv] ) keyword[elif] identifier[plot_data] : identifier[plots] = identifier[validateplot] . identifier[create] ( identifier[plot_data] , identifier[header] , literal[int] , identifier[data] [ literal[string] ], identifier[os] . identifier[path] . identifier[splitext] ( identifier[out_csv] )[ literal[int] ]) keyword[else] : identifier[plots] =[] keyword[for] identifier[data] keyword[in] identifier[vitems] : keyword[if] identifier[data] . identifier[get] ( identifier[vkey] ): identifier[data] [ identifier[vkey] ][ literal[string] ]= identifier[plots] keyword[for] identifier[variant] keyword[in] identifier[data] . identifier[get] ( literal[string] ,[]): keyword[if] identifier[isinstance] ( identifier[variant] , identifier[dict] ) keyword[and] identifier[variant] . identifier[get] ( identifier[vkey] ): identifier[variant] [ identifier[vkey] ][ literal[string] ]= identifier[plots] identifier[out] . identifier[append] ([ identifier[data] ]) keyword[return] identifier[out]
def summarize_grading(samples, vkey='validate'): """Provide summaries of grading results across all samples. Handles both traditional pipelines (validation part of variants) and CWL pipelines (validation at top level) """ samples = list(utils.flatten(samples)) if not _has_grading_info(samples, vkey): return [[d] for d in samples] # depends on [control=['if'], data=[]] validate_dir = utils.safe_makedir(os.path.join(samples[0]['dirs']['work'], vkey)) header = ['sample', 'caller', 'variant.type', 'category', 'value'] _summarize_combined(samples, vkey) (validated, out) = _group_validate_samples(samples, vkey, (['metadata', 'validate_batch'], ['metadata', 'batch'], ['description'])) for (vname, vitems) in validated.items(): out_csv = os.path.join(validate_dir, 'grading-summary-%s.csv' % vname) with open(out_csv, 'w') as out_handle: writer = csv.writer(out_handle) writer.writerow(header) plot_data = [] plot_files = [] for data in sorted(vitems, key=lambda x: x.get('lane', dd.get_sample_name(x)) or ''): validations = [variant.get(vkey) for variant in data.get('variants', []) if isinstance(variant, dict)] validations = [v for v in validations if v] if len(validations) == 0 and vkey in data: validations = [data.get(vkey)] # depends on [control=['if'], data=[]] for validate in validations: if validate: validate['grading_summary'] = out_csv if validate.get('grading'): for row in _get_validate_plotdata_yaml(validate['grading'], data): writer.writerow(row) plot_data.append(row) # depends on [control=['for'], data=['row']] # depends on [control=['if'], data=[]] elif validate.get('summary') and (not validate.get('summary') == 'None'): if isinstance(validate['summary'], (list, tuple)): plot_files.extend(list(set(validate['summary']))) # depends on [control=['if'], data=[]] else: plot_files.append(validate['summary']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['validate']] # depends on [control=['for'], data=['data']] # depends on [control=['with'], data=['out_handle']] if plot_files: plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv) # depends on [control=['if'], data=[]] elif plot_data: plots = validateplot.create(plot_data, header, 0, data['config'], os.path.splitext(out_csv)[0]) # depends on [control=['if'], data=[]] else: plots = [] for data in vitems: if data.get(vkey): data[vkey]['grading_plots'] = plots # depends on [control=['if'], data=[]] for variant in data.get('variants', []): if isinstance(variant, dict) and variant.get(vkey): variant[vkey]['grading_plots'] = plots # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['variant']] out.append([data]) # depends on [control=['for'], data=['data']] # depends on [control=['for'], data=[]] return out
def entries(self, query=None): """Fetches all Entries from the Space (up to the set limit, can be modified in `query`). API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space :param query: (optional) Dict with API options. :return: List of :class:`Entry <contentful.entry.Entry>` objects. :rtype: List of contentful.entry.Entry Usage: >>> entries = client.entries() [<Entry[cat] id='happycat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>, <Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>, <Entry[cat] id='garfield'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>, <Entry[cat] id='nyancat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>, <Entry[human] id='finn'>, <Entry[dog] id='jake'>] """ if query is None: query = {} self._normalize_select(query) return self._get( self.environment_url('/entries'), query )
def function[entries, parameter[self, query]]: constant[Fetches all Entries from the Space (up to the set limit, can be modified in `query`). API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space :param query: (optional) Dict with API options. :return: List of :class:`Entry <contentful.entry.Entry>` objects. :rtype: List of contentful.entry.Entry Usage: >>> entries = client.entries() [<Entry[cat] id='happycat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>, <Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>, <Entry[cat] id='garfield'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>, <Entry[cat] id='nyancat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>, <Entry[human] id='finn'>, <Entry[dog] id='jake'>] ] if compare[name[query] is constant[None]] begin[:] variable[query] assign[=] dictionary[[], []] call[name[self]._normalize_select, parameter[name[query]]] return[call[name[self]._get, parameter[call[name[self].environment_url, parameter[constant[/entries]]], name[query]]]]
keyword[def] identifier[entries] ( identifier[self] , identifier[query] = keyword[None] ): literal[string] keyword[if] identifier[query] keyword[is] keyword[None] : identifier[query] ={} identifier[self] . identifier[_normalize_select] ( identifier[query] ) keyword[return] identifier[self] . identifier[_get] ( identifier[self] . identifier[environment_url] ( literal[string] ), identifier[query] )
def entries(self, query=None): """Fetches all Entries from the Space (up to the set limit, can be modified in `query`). API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space :param query: (optional) Dict with API options. :return: List of :class:`Entry <contentful.entry.Entry>` objects. :rtype: List of contentful.entry.Entry Usage: >>> entries = client.entries() [<Entry[cat] id='happycat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>, <Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>, <Entry[cat] id='garfield'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>, <Entry[cat] id='nyancat'>, <Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>, <Entry[human] id='finn'>, <Entry[dog] id='jake'>] """ if query is None: query = {} # depends on [control=['if'], data=['query']] self._normalize_select(query) return self._get(self.environment_url('/entries'), query)
def fetch_page(cls, index, max_=None): """ Return a query set which requests a specific page. :param index: Index of the first element of the page to fetch. :type index: :class:`int` :param max_: Maximum number of elements to fetch :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request a page starting with the element indexed by `index`. .. note:: This way of retrieving items may be approximate. See :xep:`59` and the embedding protocol for which RSM is used for specifics. """ result = cls() result.index = index result.max_ = max_ return result
def function[fetch_page, parameter[cls, index, max_]]: constant[ Return a query set which requests a specific page. :param index: Index of the first element of the page to fetch. :type index: :class:`int` :param max_: Maximum number of elements to fetch :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request a page starting with the element indexed by `index`. .. note:: This way of retrieving items may be approximate. See :xep:`59` and the embedding protocol for which RSM is used for specifics. ] variable[result] assign[=] call[name[cls], parameter[]] name[result].index assign[=] name[index] name[result].max_ assign[=] name[max_] return[name[result]]
keyword[def] identifier[fetch_page] ( identifier[cls] , identifier[index] , identifier[max_] = keyword[None] ): literal[string] identifier[result] = identifier[cls] () identifier[result] . identifier[index] = identifier[index] identifier[result] . identifier[max_] = identifier[max_] keyword[return] identifier[result]
def fetch_page(cls, index, max_=None): """ Return a query set which requests a specific page. :param index: Index of the first element of the page to fetch. :type index: :class:`int` :param max_: Maximum number of elements to fetch :type max_: :class:`int` or :data:`None` :rtype: :class:`ResultSetMetadata` :return: A new request set up to request a page starting with the element indexed by `index`. .. note:: This way of retrieving items may be approximate. See :xep:`59` and the embedding protocol for which RSM is used for specifics. """ result = cls() result.index = index result.max_ = max_ return result
def reference_pix_from_wcs(frames, pixref, origin=1): """Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image """ result = [] with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world([pixref], origin) result.append(pixref) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, origin) result.append(tuple(pixval[0])) return result
def function[reference_pix_from_wcs, parameter[frames, pixref, origin]]: constant[Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image ] variable[result] assign[=] list[[]] with call[call[name[frames]][constant[0]].open, parameter[]] begin[:] variable[wcsh] assign[=] call[name[wcs].WCS, parameter[call[name[hdulist]][constant[0]].header]] variable[skyref] assign[=] call[name[wcsh].wcs_pix2world, parameter[list[[<ast.Name object at 0x7da18bccab30>]], name[origin]]] call[name[result].append, parameter[name[pixref]]] for taget[tuple[[<ast.Name object at 0x7da18bcc9a80>, <ast.Name object at 0x7da18bcc8160>]]] in starred[call[name[enumerate], parameter[call[name[frames]][<ast.Slice object at 0x7da18bccba60>]]]] begin[:] with call[name[frame].open, parameter[]] begin[:] variable[wcsh] assign[=] call[name[wcs].WCS, parameter[call[name[hdulist]][constant[0]].header]] variable[pixval] assign[=] call[name[wcsh].wcs_world2pix, parameter[name[skyref], name[origin]]] call[name[result].append, parameter[call[name[tuple], parameter[call[name[pixval]][constant[0]]]]]] return[name[result]]
keyword[def] identifier[reference_pix_from_wcs] ( identifier[frames] , identifier[pixref] , identifier[origin] = literal[int] ): literal[string] identifier[result] =[] keyword[with] identifier[frames] [ literal[int] ]. identifier[open] () keyword[as] identifier[hdulist] : identifier[wcsh] = identifier[wcs] . identifier[WCS] ( identifier[hdulist] [ literal[int] ]. identifier[header] ) identifier[skyref] = identifier[wcsh] . identifier[wcs_pix2world] ([ identifier[pixref] ], identifier[origin] ) identifier[result] . identifier[append] ( identifier[pixref] ) keyword[for] identifier[idx] , identifier[frame] keyword[in] identifier[enumerate] ( identifier[frames] [ literal[int] :]): keyword[with] identifier[frame] . identifier[open] () keyword[as] identifier[hdulist] : identifier[wcsh] = identifier[wcs] . identifier[WCS] ( identifier[hdulist] [ literal[int] ]. identifier[header] ) identifier[pixval] = identifier[wcsh] . identifier[wcs_world2pix] ( identifier[skyref] , identifier[origin] ) identifier[result] . identifier[append] ( identifier[tuple] ( identifier[pixval] [ literal[int] ])) keyword[return] identifier[result]
def reference_pix_from_wcs(frames, pixref, origin=1): """Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image """ result = [] with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world([pixref], origin) result.append(pixref) # depends on [control=['with'], data=['hdulist']] for (idx, frame) in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, origin) result.append(tuple(pixval[0])) # depends on [control=['with'], data=['hdulist']] # depends on [control=['for'], data=[]] return result
def validate_quota_change(self, quota_deltas, raise_exception=False): """ Get error messages about object and his ancestor quotas that will be exceeded if quota_delta will be added. raise_exception - if True QuotaExceededException will be raised if validation fails quota_deltas - dictionary of quotas deltas, example: { 'ram': 1024, 'storage': 2048, ... } Example of output: ['ram quota limit: 1024, requires: 2048(instance#1)', ...] """ errors = [] for name, delta in six.iteritems(quota_deltas): quota = self.quotas.get(name=name) if quota.is_exceeded(delta): errors.append('%s quota limit: %s, requires %s (%s)\n' % ( quota.name, quota.limit, quota.usage + delta, quota.scope)) if not raise_exception: return errors else: if errors: raise exceptions.QuotaExceededException(_('One or more quotas were exceeded: %s') % ';'.join(errors))
def function[validate_quota_change, parameter[self, quota_deltas, raise_exception]]: constant[ Get error messages about object and his ancestor quotas that will be exceeded if quota_delta will be added. raise_exception - if True QuotaExceededException will be raised if validation fails quota_deltas - dictionary of quotas deltas, example: { 'ram': 1024, 'storage': 2048, ... } Example of output: ['ram quota limit: 1024, requires: 2048(instance#1)', ...] ] variable[errors] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b0fe6fb0>, <ast.Name object at 0x7da1b0fe7550>]]] in starred[call[name[six].iteritems, parameter[name[quota_deltas]]]] begin[:] variable[quota] assign[=] call[name[self].quotas.get, parameter[]] if call[name[quota].is_exceeded, parameter[name[delta]]] begin[:] call[name[errors].append, parameter[binary_operation[constant[%s quota limit: %s, requires %s (%s) ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0f5abc0>, <ast.Attribute object at 0x7da1b0f5aa40>, <ast.BinOp object at 0x7da1b0f58d00>, <ast.Attribute object at 0x7da1b0f59ab0>]]]]] if <ast.UnaryOp object at 0x7da1b0f58640> begin[:] return[name[errors]]
keyword[def] identifier[validate_quota_change] ( identifier[self] , identifier[quota_deltas] , identifier[raise_exception] = keyword[False] ): literal[string] identifier[errors] =[] keyword[for] identifier[name] , identifier[delta] keyword[in] identifier[six] . identifier[iteritems] ( identifier[quota_deltas] ): identifier[quota] = identifier[self] . identifier[quotas] . identifier[get] ( identifier[name] = identifier[name] ) keyword[if] identifier[quota] . identifier[is_exceeded] ( identifier[delta] ): identifier[errors] . identifier[append] ( literal[string] %( identifier[quota] . identifier[name] , identifier[quota] . identifier[limit] , identifier[quota] . identifier[usage] + identifier[delta] , identifier[quota] . identifier[scope] )) keyword[if] keyword[not] identifier[raise_exception] : keyword[return] identifier[errors] keyword[else] : keyword[if] identifier[errors] : keyword[raise] identifier[exceptions] . identifier[QuotaExceededException] ( identifier[_] ( literal[string] )% literal[string] . identifier[join] ( identifier[errors] ))
def validate_quota_change(self, quota_deltas, raise_exception=False): """ Get error messages about object and his ancestor quotas that will be exceeded if quota_delta will be added. raise_exception - if True QuotaExceededException will be raised if validation fails quota_deltas - dictionary of quotas deltas, example: { 'ram': 1024, 'storage': 2048, ... } Example of output: ['ram quota limit: 1024, requires: 2048(instance#1)', ...] """ errors = [] for (name, delta) in six.iteritems(quota_deltas): quota = self.quotas.get(name=name) if quota.is_exceeded(delta): errors.append('%s quota limit: %s, requires %s (%s)\n' % (quota.name, quota.limit, quota.usage + delta, quota.scope)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if not raise_exception: return errors # depends on [control=['if'], data=[]] elif errors: raise exceptions.QuotaExceededException(_('One or more quotas were exceeded: %s') % ';'.join(errors)) # depends on [control=['if'], data=[]]
def api_key(api_key): """Authenticate via an api key.""" none() _config.api_key_prefix["Authorization"] = "api-key" _config.api_key["Authorization"] = "key=" + b64encode(api_key.encode()).decode()
def function[api_key, parameter[api_key]]: constant[Authenticate via an api key.] call[name[none], parameter[]] call[name[_config].api_key_prefix][constant[Authorization]] assign[=] constant[api-key] call[name[_config].api_key][constant[Authorization]] assign[=] binary_operation[constant[key=] + call[call[name[b64encode], parameter[call[name[api_key].encode, parameter[]]]].decode, parameter[]]]
keyword[def] identifier[api_key] ( identifier[api_key] ): literal[string] identifier[none] () identifier[_config] . identifier[api_key_prefix] [ literal[string] ]= literal[string] identifier[_config] . identifier[api_key] [ literal[string] ]= literal[string] + identifier[b64encode] ( identifier[api_key] . identifier[encode] ()). identifier[decode] ()
def api_key(api_key): """Authenticate via an api key.""" none() _config.api_key_prefix['Authorization'] = 'api-key' _config.api_key['Authorization'] = 'key=' + b64encode(api_key.encode()).decode()
def list_(pkg=None, dir=None, runas=None, env=None, depth=None): ''' List installed NPM packages. If no directory is specified, this will return the list of globally- installed packages. pkg Limit package listing by name dir The directory whose packages will be listed, or None for global installation runas The user to run NPM with .. versionadded:: 2014.7.0 env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. .. versionadded:: 2014.7.0 depth Limit the depth of the packages listed .. versionadded:: 2016.11.6,2017.7.0 CLI Example: .. code-block:: bash salt '*' npm.list ''' env = env or {} if runas: uid = salt.utils.user.get_uid(runas) if uid: env.update({'SUDO_UID': uid, 'SUDO_USER': ''}) cmd = ['npm', 'list', '--json', '--silent'] if not dir: cmd.append('--global') if depth is not None: if not isinstance(depth, (int, float)): raise salt.exceptions.SaltInvocationError('Error: depth {0} must be a number'.format(depth)) cmd.append('--depth={0}'.format(int(depth))) if pkg: # Protect against injection pkg = _cmd_quote(pkg) cmd.append('"{0}"'.format(pkg)) cmd = ' '.join(cmd) result = __salt__['cmd.run_all']( cmd, cwd=dir, runas=runas, env=env, python_shell=True, ignore_retcode=True) # npm will return error code 1 for both no packages found and an actual # error. The only difference between the two cases are if stderr is empty if result['retcode'] != 0 and result['stderr']: raise CommandExecutionError(result['stderr']) return salt.utils.json.loads(result['stdout']).get('dependencies', {})
def function[list_, parameter[pkg, dir, runas, env, depth]]: constant[ List installed NPM packages. If no directory is specified, this will return the list of globally- installed packages. pkg Limit package listing by name dir The directory whose packages will be listed, or None for global installation runas The user to run NPM with .. versionadded:: 2014.7.0 env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. .. versionadded:: 2014.7.0 depth Limit the depth of the packages listed .. versionadded:: 2016.11.6,2017.7.0 CLI Example: .. code-block:: bash salt '*' npm.list ] variable[env] assign[=] <ast.BoolOp object at 0x7da18ede7970> if name[runas] begin[:] variable[uid] assign[=] call[name[salt].utils.user.get_uid, parameter[name[runas]]] if name[uid] begin[:] call[name[env].update, parameter[dictionary[[<ast.Constant object at 0x7da18ede6e00>, <ast.Constant object at 0x7da18ede4040>], [<ast.Name object at 0x7da18ede5150>, <ast.Constant object at 0x7da18ede7c40>]]]] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18ede6c80>, <ast.Constant object at 0x7da18ede4df0>, <ast.Constant object at 0x7da18ede75b0>, <ast.Constant object at 0x7da18ede46d0>]] if <ast.UnaryOp object at 0x7da18ede73a0> begin[:] call[name[cmd].append, parameter[constant[--global]]] if compare[name[depth] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da18ede70d0> begin[:] <ast.Raise object at 0x7da18ede4f40> call[name[cmd].append, parameter[call[constant[--depth={0}].format, parameter[call[name[int], parameter[name[depth]]]]]]] if name[pkg] begin[:] variable[pkg] assign[=] call[name[_cmd_quote], parameter[name[pkg]]] call[name[cmd].append, parameter[call[constant["{0}"].format, parameter[name[pkg]]]]] variable[cmd] assign[=] call[constant[ ].join, parameter[name[cmd]]] variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]] if <ast.BoolOp object at 0x7da18ede4550> begin[:] <ast.Raise object at 0x7da18ede6980> return[call[call[name[salt].utils.json.loads, parameter[call[name[result]][constant[stdout]]]].get, parameter[constant[dependencies], dictionary[[], []]]]]
keyword[def] identifier[list_] ( identifier[pkg] = keyword[None] , identifier[dir] = keyword[None] , identifier[runas] = keyword[None] , identifier[env] = keyword[None] , identifier[depth] = keyword[None] ): literal[string] identifier[env] = identifier[env] keyword[or] {} keyword[if] identifier[runas] : identifier[uid] = identifier[salt] . identifier[utils] . identifier[user] . identifier[get_uid] ( identifier[runas] ) keyword[if] identifier[uid] : identifier[env] . identifier[update] ({ literal[string] : identifier[uid] , literal[string] : literal[string] }) identifier[cmd] =[ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] keyword[not] identifier[dir] : identifier[cmd] . identifier[append] ( literal[string] ) keyword[if] identifier[depth] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[isinstance] ( identifier[depth] ,( identifier[int] , identifier[float] )): keyword[raise] identifier[salt] . identifier[exceptions] . identifier[SaltInvocationError] ( literal[string] . identifier[format] ( identifier[depth] )) identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[int] ( identifier[depth] ))) keyword[if] identifier[pkg] : identifier[pkg] = identifier[_cmd_quote] ( identifier[pkg] ) identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[pkg] )) identifier[cmd] = literal[string] . identifier[join] ( identifier[cmd] ) identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[cwd] = identifier[dir] , identifier[runas] = identifier[runas] , identifier[env] = identifier[env] , identifier[python_shell] = keyword[True] , identifier[ignore_retcode] = keyword[True] ) keyword[if] identifier[result] [ literal[string] ]!= literal[int] keyword[and] identifier[result] [ literal[string] ]: keyword[raise] identifier[CommandExecutionError] ( identifier[result] [ literal[string] ]) keyword[return] identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[result] [ literal[string] ]). identifier[get] ( literal[string] ,{})
def list_(pkg=None, dir=None, runas=None, env=None, depth=None): """ List installed NPM packages. If no directory is specified, this will return the list of globally- installed packages. pkg Limit package listing by name dir The directory whose packages will be listed, or None for global installation runas The user to run NPM with .. versionadded:: 2014.7.0 env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. .. versionadded:: 2014.7.0 depth Limit the depth of the packages listed .. versionadded:: 2016.11.6,2017.7.0 CLI Example: .. code-block:: bash salt '*' npm.list """ env = env or {} if runas: uid = salt.utils.user.get_uid(runas) if uid: env.update({'SUDO_UID': uid, 'SUDO_USER': ''}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] cmd = ['npm', 'list', '--json', '--silent'] if not dir: cmd.append('--global') # depends on [control=['if'], data=[]] if depth is not None: if not isinstance(depth, (int, float)): raise salt.exceptions.SaltInvocationError('Error: depth {0} must be a number'.format(depth)) # depends on [control=['if'], data=[]] cmd.append('--depth={0}'.format(int(depth))) # depends on [control=['if'], data=['depth']] if pkg: # Protect against injection pkg = _cmd_quote(pkg) cmd.append('"{0}"'.format(pkg)) # depends on [control=['if'], data=[]] cmd = ' '.join(cmd) result = __salt__['cmd.run_all'](cmd, cwd=dir, runas=runas, env=env, python_shell=True, ignore_retcode=True) # npm will return error code 1 for both no packages found and an actual # error. The only difference between the two cases are if stderr is empty if result['retcode'] != 0 and result['stderr']: raise CommandExecutionError(result['stderr']) # depends on [control=['if'], data=[]] return salt.utils.json.loads(result['stdout']).get('dependencies', {})
def cancel(self): """Stops the current search and deletes the results cache. :return: The :class:`Job`. """ try: self.post("control", action="cancel") except HTTPError as he: if he.status == 404: # The job has already been cancelled, so # cancelling it twice is a nop. pass else: raise return self
def function[cancel, parameter[self]]: constant[Stops the current search and deletes the results cache. :return: The :class:`Job`. ] <ast.Try object at 0x7da1b1981090> return[name[self]]
keyword[def] identifier[cancel] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[post] ( literal[string] , identifier[action] = literal[string] ) keyword[except] identifier[HTTPError] keyword[as] identifier[he] : keyword[if] identifier[he] . identifier[status] == literal[int] : keyword[pass] keyword[else] : keyword[raise] keyword[return] identifier[self]
def cancel(self): """Stops the current search and deletes the results cache. :return: The :class:`Job`. """ try: self.post('control', action='cancel') # depends on [control=['try'], data=[]] except HTTPError as he: if he.status == 404: # The job has already been cancelled, so # cancelling it twice is a nop. pass # depends on [control=['if'], data=[]] else: raise # depends on [control=['except'], data=['he']] return self
def do_edit(self, args: argparse.Namespace) -> None: """Edit a file in a text editor""" if not self.editor: raise EnvironmentError("Please use 'set editor' to specify your text editing program of choice.") command = utils.quote_string_if_needed(os.path.expanduser(self.editor)) if args.file_path: command += " " + utils.quote_string_if_needed(os.path.expanduser(args.file_path)) self.do_shell(command)
def function[do_edit, parameter[self, args]]: constant[Edit a file in a text editor] if <ast.UnaryOp object at 0x7da18fe91540> begin[:] <ast.Raise object at 0x7da2045673d0> variable[command] assign[=] call[name[utils].quote_string_if_needed, parameter[call[name[os].path.expanduser, parameter[name[self].editor]]]] if name[args].file_path begin[:] <ast.AugAssign object at 0x7da204565720> call[name[self].do_shell, parameter[name[command]]]
keyword[def] identifier[do_edit] ( identifier[self] , identifier[args] : identifier[argparse] . identifier[Namespace] )-> keyword[None] : literal[string] keyword[if] keyword[not] identifier[self] . identifier[editor] : keyword[raise] identifier[EnvironmentError] ( literal[string] ) identifier[command] = identifier[utils] . identifier[quote_string_if_needed] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[self] . identifier[editor] )) keyword[if] identifier[args] . identifier[file_path] : identifier[command] += literal[string] + identifier[utils] . identifier[quote_string_if_needed] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[args] . identifier[file_path] )) identifier[self] . identifier[do_shell] ( identifier[command] )
def do_edit(self, args: argparse.Namespace) -> None: """Edit a file in a text editor""" if not self.editor: raise EnvironmentError("Please use 'set editor' to specify your text editing program of choice.") # depends on [control=['if'], data=[]] command = utils.quote_string_if_needed(os.path.expanduser(self.editor)) if args.file_path: command += ' ' + utils.quote_string_if_needed(os.path.expanduser(args.file_path)) # depends on [control=['if'], data=[]] self.do_shell(command)
def notify_txn_invalid(self, txn_id, message=None, extended_data=None): """Adds a batch id to the invalid cache along with the id of the transaction that was rejected and any error message or extended data. Removes that batch id from the pending set. The cache is only temporary, and the batch info will be purged after one hour. Args: txn_id (str): The id of the invalid batch message (str, optional): Message explaining why batch is invalid extended_data (bytes, optional): Additional error data """ invalid_txn_info = {'id': txn_id} if message is not None: invalid_txn_info['message'] = message if extended_data is not None: invalid_txn_info['extended_data'] = extended_data with self._lock: for batch_id, txn_ids in self._batch_info.items(): if txn_id in txn_ids: if batch_id not in self._invalid: self._invalid[batch_id] = [invalid_txn_info] else: self._invalid[batch_id].append(invalid_txn_info) self._pending.discard(batch_id) self._update_observers(batch_id, ClientBatchStatus.INVALID) return
def function[notify_txn_invalid, parameter[self, txn_id, message, extended_data]]: constant[Adds a batch id to the invalid cache along with the id of the transaction that was rejected and any error message or extended data. Removes that batch id from the pending set. The cache is only temporary, and the batch info will be purged after one hour. Args: txn_id (str): The id of the invalid batch message (str, optional): Message explaining why batch is invalid extended_data (bytes, optional): Additional error data ] variable[invalid_txn_info] assign[=] dictionary[[<ast.Constant object at 0x7da18f09c820>], [<ast.Name object at 0x7da18f09d300>]] if compare[name[message] is_not constant[None]] begin[:] call[name[invalid_txn_info]][constant[message]] assign[=] name[message] if compare[name[extended_data] is_not constant[None]] begin[:] call[name[invalid_txn_info]][constant[extended_data]] assign[=] name[extended_data] with name[self]._lock begin[:] for taget[tuple[[<ast.Name object at 0x7da18f09e9b0>, <ast.Name object at 0x7da18f09e230>]]] in starred[call[name[self]._batch_info.items, parameter[]]] begin[:] if compare[name[txn_id] in name[txn_ids]] begin[:] if compare[name[batch_id] <ast.NotIn object at 0x7da2590d7190> name[self]._invalid] begin[:] call[name[self]._invalid][name[batch_id]] assign[=] list[[<ast.Name object at 0x7da18f09cbb0>]] call[name[self]._pending.discard, parameter[name[batch_id]]] call[name[self]._update_observers, parameter[name[batch_id], name[ClientBatchStatus].INVALID]] return[None]
keyword[def] identifier[notify_txn_invalid] ( identifier[self] , identifier[txn_id] , identifier[message] = keyword[None] , identifier[extended_data] = keyword[None] ): literal[string] identifier[invalid_txn_info] ={ literal[string] : identifier[txn_id] } keyword[if] identifier[message] keyword[is] keyword[not] keyword[None] : identifier[invalid_txn_info] [ literal[string] ]= identifier[message] keyword[if] identifier[extended_data] keyword[is] keyword[not] keyword[None] : identifier[invalid_txn_info] [ literal[string] ]= identifier[extended_data] keyword[with] identifier[self] . identifier[_lock] : keyword[for] identifier[batch_id] , identifier[txn_ids] keyword[in] identifier[self] . identifier[_batch_info] . identifier[items] (): keyword[if] identifier[txn_id] keyword[in] identifier[txn_ids] : keyword[if] identifier[batch_id] keyword[not] keyword[in] identifier[self] . identifier[_invalid] : identifier[self] . identifier[_invalid] [ identifier[batch_id] ]=[ identifier[invalid_txn_info] ] keyword[else] : identifier[self] . identifier[_invalid] [ identifier[batch_id] ]. identifier[append] ( identifier[invalid_txn_info] ) identifier[self] . identifier[_pending] . identifier[discard] ( identifier[batch_id] ) identifier[self] . identifier[_update_observers] ( identifier[batch_id] , identifier[ClientBatchStatus] . identifier[INVALID] ) keyword[return]
def notify_txn_invalid(self, txn_id, message=None, extended_data=None): """Adds a batch id to the invalid cache along with the id of the transaction that was rejected and any error message or extended data. Removes that batch id from the pending set. The cache is only temporary, and the batch info will be purged after one hour. Args: txn_id (str): The id of the invalid batch message (str, optional): Message explaining why batch is invalid extended_data (bytes, optional): Additional error data """ invalid_txn_info = {'id': txn_id} if message is not None: invalid_txn_info['message'] = message # depends on [control=['if'], data=['message']] if extended_data is not None: invalid_txn_info['extended_data'] = extended_data # depends on [control=['if'], data=['extended_data']] with self._lock: for (batch_id, txn_ids) in self._batch_info.items(): if txn_id in txn_ids: if batch_id not in self._invalid: self._invalid[batch_id] = [invalid_txn_info] # depends on [control=['if'], data=['batch_id']] else: self._invalid[batch_id].append(invalid_txn_info) self._pending.discard(batch_id) self._update_observers(batch_id, ClientBatchStatus.INVALID) return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
def get_image(self, cat, img): """ Loads an image from disk. """ filename = self.path(cat, img) data = [] if filename.endswith('mat'): data = loadmat(filename)['output'] else: data = imread(filename) if self.size is not None: return imresize(data, self.size) else: return data
def function[get_image, parameter[self, cat, img]]: constant[ Loads an image from disk. ] variable[filename] assign[=] call[name[self].path, parameter[name[cat], name[img]]] variable[data] assign[=] list[[]] if call[name[filename].endswith, parameter[constant[mat]]] begin[:] variable[data] assign[=] call[call[name[loadmat], parameter[name[filename]]]][constant[output]] if compare[name[self].size is_not constant[None]] begin[:] return[call[name[imresize], parameter[name[data], name[self].size]]]
keyword[def] identifier[get_image] ( identifier[self] , identifier[cat] , identifier[img] ): literal[string] identifier[filename] = identifier[self] . identifier[path] ( identifier[cat] , identifier[img] ) identifier[data] =[] keyword[if] identifier[filename] . identifier[endswith] ( literal[string] ): identifier[data] = identifier[loadmat] ( identifier[filename] )[ literal[string] ] keyword[else] : identifier[data] = identifier[imread] ( identifier[filename] ) keyword[if] identifier[self] . identifier[size] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[imresize] ( identifier[data] , identifier[self] . identifier[size] ) keyword[else] : keyword[return] identifier[data]
def get_image(self, cat, img): """ Loads an image from disk. """ filename = self.path(cat, img) data = [] if filename.endswith('mat'): data = loadmat(filename)['output'] # depends on [control=['if'], data=[]] else: data = imread(filename) if self.size is not None: return imresize(data, self.size) # depends on [control=['if'], data=[]] else: return data
def replace_parent(self, parent_simples): """If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors. """ assert parent_simples ancestors = parent_simples[:-1] parent = parent_simples[-1] did_replace = False new_tokens = [] for token in self.tokens: if not did_replace and token in ('&', 'self'): did_replace = True new_tokens.extend(parent.tokens) if token == 'self': warn(FutureWarning( "The xCSS 'self' selector is deprecated and will be " "removed in 2.0. Use & instead. ({0!r})" .format(self) )) else: new_tokens.append(token) if not did_replace: # This simple selector doesn't contain a parent reference so just # stick it on the end return parent_simples + (self,) # This simple selector was merged into the direct parent. merged_self = type(self)(parent.combinator, new_tokens) selector = ancestors + (merged_self,) # Our combinator goes on the first ancestor, i.e., substituting "foo # bar baz" into "+ &.quux" produces "+ foo bar baz.quux". This means a # potential conflict with the first ancestor's combinator! root = selector[0] if not _is_combinator_subset_of(self.combinator, root.combinator): raise ValueError( "Can't sub parent {0!r} into {1!r}: " "combinators {2!r} and {3!r} conflict!" .format( parent_simples, self, self.combinator, root.combinator)) root = type(self)(self.combinator, root.tokens) selector = (root,) + selector[1:] return tuple(selector)
def function[replace_parent, parameter[self, parent_simples]]: constant[If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors. ] assert[name[parent_simples]] variable[ancestors] assign[=] call[name[parent_simples]][<ast.Slice object at 0x7da1b0d013c0>] variable[parent] assign[=] call[name[parent_simples]][<ast.UnaryOp object at 0x7da1b0d01450>] variable[did_replace] assign[=] constant[False] variable[new_tokens] assign[=] list[[]] for taget[name[token]] in starred[name[self].tokens] begin[:] if <ast.BoolOp object at 0x7da1b0d016c0> begin[:] variable[did_replace] assign[=] constant[True] call[name[new_tokens].extend, parameter[name[parent].tokens]] if compare[name[token] equal[==] constant[self]] begin[:] call[name[warn], parameter[call[name[FutureWarning], parameter[call[constant[The xCSS 'self' selector is deprecated and will be removed in 2.0. Use & instead. ({0!r})].format, parameter[name[self]]]]]]] if <ast.UnaryOp object at 0x7da1b0d03d60> begin[:] return[binary_operation[name[parent_simples] + tuple[[<ast.Name object at 0x7da1b0d02260>]]]] variable[merged_self] assign[=] call[call[name[type], parameter[name[self]]], parameter[name[parent].combinator, name[new_tokens]]] variable[selector] assign[=] binary_operation[name[ancestors] + tuple[[<ast.Name object at 0x7da1b0d00700>]]] variable[root] assign[=] call[name[selector]][constant[0]] if <ast.UnaryOp object at 0x7da1b0d01510> begin[:] <ast.Raise object at 0x7da1b0d027d0> variable[root] assign[=] call[call[name[type], parameter[name[self]]], parameter[name[self].combinator, name[root].tokens]] variable[selector] assign[=] binary_operation[tuple[[<ast.Name object at 0x7da20c795120>]] + call[name[selector]][<ast.Slice object at 0x7da20c796140>]] return[call[name[tuple], parameter[name[selector]]]]
keyword[def] identifier[replace_parent] ( identifier[self] , identifier[parent_simples] ): literal[string] keyword[assert] identifier[parent_simples] identifier[ancestors] = identifier[parent_simples] [:- literal[int] ] identifier[parent] = identifier[parent_simples] [- literal[int] ] identifier[did_replace] = keyword[False] identifier[new_tokens] =[] keyword[for] identifier[token] keyword[in] identifier[self] . identifier[tokens] : keyword[if] keyword[not] identifier[did_replace] keyword[and] identifier[token] keyword[in] ( literal[string] , literal[string] ): identifier[did_replace] = keyword[True] identifier[new_tokens] . identifier[extend] ( identifier[parent] . identifier[tokens] ) keyword[if] identifier[token] == literal[string] : identifier[warn] ( identifier[FutureWarning] ( literal[string] literal[string] . identifier[format] ( identifier[self] ) )) keyword[else] : identifier[new_tokens] . identifier[append] ( identifier[token] ) keyword[if] keyword[not] identifier[did_replace] : keyword[return] identifier[parent_simples] +( identifier[self] ,) identifier[merged_self] = identifier[type] ( identifier[self] )( identifier[parent] . identifier[combinator] , identifier[new_tokens] ) identifier[selector] = identifier[ancestors] +( identifier[merged_self] ,) identifier[root] = identifier[selector] [ literal[int] ] keyword[if] keyword[not] identifier[_is_combinator_subset_of] ( identifier[self] . identifier[combinator] , identifier[root] . identifier[combinator] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[parent_simples] , identifier[self] , identifier[self] . identifier[combinator] , identifier[root] . identifier[combinator] )) identifier[root] = identifier[type] ( identifier[self] )( identifier[self] . identifier[combinator] , identifier[root] . identifier[tokens] ) identifier[selector] =( identifier[root] ,)+ identifier[selector] [ literal[int] :] keyword[return] identifier[tuple] ( identifier[selector] )
def replace_parent(self, parent_simples): """If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors. """ assert parent_simples ancestors = parent_simples[:-1] parent = parent_simples[-1] did_replace = False new_tokens = [] for token in self.tokens: if not did_replace and token in ('&', 'self'): did_replace = True new_tokens.extend(parent.tokens) if token == 'self': warn(FutureWarning("The xCSS 'self' selector is deprecated and will be removed in 2.0. Use & instead. ({0!r})".format(self))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: new_tokens.append(token) # depends on [control=['for'], data=['token']] if not did_replace: # This simple selector doesn't contain a parent reference so just # stick it on the end return parent_simples + (self,) # depends on [control=['if'], data=[]] # This simple selector was merged into the direct parent. merged_self = type(self)(parent.combinator, new_tokens) selector = ancestors + (merged_self,) # Our combinator goes on the first ancestor, i.e., substituting "foo # bar baz" into "+ &.quux" produces "+ foo bar baz.quux". This means a # potential conflict with the first ancestor's combinator! root = selector[0] if not _is_combinator_subset_of(self.combinator, root.combinator): raise ValueError("Can't sub parent {0!r} into {1!r}: combinators {2!r} and {3!r} conflict!".format(parent_simples, self, self.combinator, root.combinator)) # depends on [control=['if'], data=[]] root = type(self)(self.combinator, root.tokens) selector = (root,) + selector[1:] return tuple(selector)
def _close_last(self): """Close the resultset and reset collected meta data. """ if self._rs: self._rs.close() self._rs = None if self._prep: self._prep.close() self._prep = None self._meta = None self._description = None
def function[_close_last, parameter[self]]: constant[Close the resultset and reset collected meta data. ] if name[self]._rs begin[:] call[name[self]._rs.close, parameter[]] name[self]._rs assign[=] constant[None] if name[self]._prep begin[:] call[name[self]._prep.close, parameter[]] name[self]._prep assign[=] constant[None] name[self]._meta assign[=] constant[None] name[self]._description assign[=] constant[None]
keyword[def] identifier[_close_last] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_rs] : identifier[self] . identifier[_rs] . identifier[close] () identifier[self] . identifier[_rs] = keyword[None] keyword[if] identifier[self] . identifier[_prep] : identifier[self] . identifier[_prep] . identifier[close] () identifier[self] . identifier[_prep] = keyword[None] identifier[self] . identifier[_meta] = keyword[None] identifier[self] . identifier[_description] = keyword[None]
def _close_last(self): """Close the resultset and reset collected meta data. """ if self._rs: self._rs.close() # depends on [control=['if'], data=[]] self._rs = None if self._prep: self._prep.close() # depends on [control=['if'], data=[]] self._prep = None self._meta = None self._description = None
def complete_xml_element(self, xmlnode, _unused): """Complete the XML node with `self` content. Should be overriden in classes derived from `StanzaPayloadObject`. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `_unused`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `_unused`: `libxml2.xmlDoc`""" tm=self.timestamp.strftime("%Y%m%dT%H:%M:%S") xmlnode.setProp("stamp",tm) if self.delay_from: xmlnode.setProp("from",self.delay_from.as_utf8()) if self.reason: xmlnode.setContent(to_utf8(self.reason))
def function[complete_xml_element, parameter[self, xmlnode, _unused]]: constant[Complete the XML node with `self` content. Should be overriden in classes derived from `StanzaPayloadObject`. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `_unused`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `_unused`: `libxml2.xmlDoc`] variable[tm] assign[=] call[name[self].timestamp.strftime, parameter[constant[%Y%m%dT%H:%M:%S]]] call[name[xmlnode].setProp, parameter[constant[stamp], name[tm]]] if name[self].delay_from begin[:] call[name[xmlnode].setProp, parameter[constant[from], call[name[self].delay_from.as_utf8, parameter[]]]] if name[self].reason begin[:] call[name[xmlnode].setContent, parameter[call[name[to_utf8], parameter[name[self].reason]]]]
keyword[def] identifier[complete_xml_element] ( identifier[self] , identifier[xmlnode] , identifier[_unused] ): literal[string] identifier[tm] = identifier[self] . identifier[timestamp] . identifier[strftime] ( literal[string] ) identifier[xmlnode] . identifier[setProp] ( literal[string] , identifier[tm] ) keyword[if] identifier[self] . identifier[delay_from] : identifier[xmlnode] . identifier[setProp] ( literal[string] , identifier[self] . identifier[delay_from] . identifier[as_utf8] ()) keyword[if] identifier[self] . identifier[reason] : identifier[xmlnode] . identifier[setContent] ( identifier[to_utf8] ( identifier[self] . identifier[reason] ))
def complete_xml_element(self, xmlnode, _unused): """Complete the XML node with `self` content. Should be overriden in classes derived from `StanzaPayloadObject`. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `_unused`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `_unused`: `libxml2.xmlDoc`""" tm = self.timestamp.strftime('%Y%m%dT%H:%M:%S') xmlnode.setProp('stamp', tm) if self.delay_from: xmlnode.setProp('from', self.delay_from.as_utf8()) # depends on [control=['if'], data=[]] if self.reason: xmlnode.setContent(to_utf8(self.reason)) # depends on [control=['if'], data=[]]
def skip(self): """Skip this py-pdb command to avoid attaching within the same loop.""" line = self.line self.line = '' # 'line' is the statement line of the previous py-pdb command. if line in self.lines: if not self.skipping: self.skipping = True printflush('Skipping lines', end='') printflush('.', end='') return True elif line: self.lines.append(line) if len(self.lines) > 30: self.lines.popleft() return False
def function[skip, parameter[self]]: constant[Skip this py-pdb command to avoid attaching within the same loop.] variable[line] assign[=] name[self].line name[self].line assign[=] constant[] if compare[name[line] in name[self].lines] begin[:] if <ast.UnaryOp object at 0x7da1b0e70490> begin[:] name[self].skipping assign[=] constant[True] call[name[printflush], parameter[constant[Skipping lines]]] call[name[printflush], parameter[constant[.]]] return[constant[True]] return[constant[False]]
keyword[def] identifier[skip] ( identifier[self] ): literal[string] identifier[line] = identifier[self] . identifier[line] identifier[self] . identifier[line] = literal[string] keyword[if] identifier[line] keyword[in] identifier[self] . identifier[lines] : keyword[if] keyword[not] identifier[self] . identifier[skipping] : identifier[self] . identifier[skipping] = keyword[True] identifier[printflush] ( literal[string] , identifier[end] = literal[string] ) identifier[printflush] ( literal[string] , identifier[end] = literal[string] ) keyword[return] keyword[True] keyword[elif] identifier[line] : identifier[self] . identifier[lines] . identifier[append] ( identifier[line] ) keyword[if] identifier[len] ( identifier[self] . identifier[lines] )> literal[int] : identifier[self] . identifier[lines] . identifier[popleft] () keyword[return] keyword[False]
def skip(self): """Skip this py-pdb command to avoid attaching within the same loop.""" line = self.line self.line = '' # 'line' is the statement line of the previous py-pdb command. if line in self.lines: if not self.skipping: self.skipping = True printflush('Skipping lines', end='') # depends on [control=['if'], data=[]] printflush('.', end='') return True # depends on [control=['if'], data=[]] elif line: self.lines.append(line) if len(self.lines) > 30: self.lines.popleft() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return False
def retrieve_object(model, *args, **kwargs): """ Retrieves a specific object from a given model by primary-key lookup, and stores it in a context variable. Syntax:: {% retrieve_object [app_name].[model_name] [lookup kwargs] as [varname] %} Example:: {% retrieve_object flatpages.flatpage pk=12 as my_flat_page %} """ if len(args) == 1: kwargs.update({'pk': args[0]}) _model = _get_model(model) try: return _model._default_manager.get(**kwargs) except _model.DoesNotExist: return ''
def function[retrieve_object, parameter[model]]: constant[ Retrieves a specific object from a given model by primary-key lookup, and stores it in a context variable. Syntax:: {% retrieve_object [app_name].[model_name] [lookup kwargs] as [varname] %} Example:: {% retrieve_object flatpages.flatpage pk=12 as my_flat_page %} ] if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:] call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204621300>], [<ast.Subscript object at 0x7da2046239a0>]]]] variable[_model] assign[=] call[name[_get_model], parameter[name[model]]] <ast.Try object at 0x7da204621c60>
keyword[def] identifier[retrieve_object] ( identifier[model] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[len] ( identifier[args] )== literal[int] : identifier[kwargs] . identifier[update] ({ literal[string] : identifier[args] [ literal[int] ]}) identifier[_model] = identifier[_get_model] ( identifier[model] ) keyword[try] : keyword[return] identifier[_model] . identifier[_default_manager] . identifier[get] (** identifier[kwargs] ) keyword[except] identifier[_model] . identifier[DoesNotExist] : keyword[return] literal[string]
def retrieve_object(model, *args, **kwargs): """ Retrieves a specific object from a given model by primary-key lookup, and stores it in a context variable. Syntax:: {% retrieve_object [app_name].[model_name] [lookup kwargs] as [varname] %} Example:: {% retrieve_object flatpages.flatpage pk=12 as my_flat_page %} """ if len(args) == 1: kwargs.update({'pk': args[0]}) # depends on [control=['if'], data=[]] _model = _get_model(model) try: return _model._default_manager.get(**kwargs) # depends on [control=['try'], data=[]] except _model.DoesNotExist: return '' # depends on [control=['except'], data=[]]
def searchNsByHref(self, doc, href): """Search a Ns aliasing a given URI. Recurse on the parents until it finds the defined namespace or return None otherwise. """ if doc is None: doc__o = None else: doc__o = doc._o ret = libxml2mod.xmlSearchNsByHref(doc__o, self._o, href) if ret is None:raise treeError('xmlSearchNsByHref() failed') __tmp = xmlNs(_obj=ret) return __tmp
def function[searchNsByHref, parameter[self, doc, href]]: constant[Search a Ns aliasing a given URI. Recurse on the parents until it finds the defined namespace or return None otherwise. ] if compare[name[doc] is constant[None]] begin[:] variable[doc__o] assign[=] constant[None] variable[ret] assign[=] call[name[libxml2mod].xmlSearchNsByHref, parameter[name[doc__o], name[self]._o, name[href]]] if compare[name[ret] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1fa4af0> variable[__tmp] assign[=] call[name[xmlNs], parameter[]] return[name[__tmp]]
keyword[def] identifier[searchNsByHref] ( identifier[self] , identifier[doc] , identifier[href] ): literal[string] keyword[if] identifier[doc] keyword[is] keyword[None] : identifier[doc__o] = keyword[None] keyword[else] : identifier[doc__o] = identifier[doc] . identifier[_o] identifier[ret] = identifier[libxml2mod] . identifier[xmlSearchNsByHref] ( identifier[doc__o] , identifier[self] . identifier[_o] , identifier[href] ) keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] ) identifier[__tmp] = identifier[xmlNs] ( identifier[_obj] = identifier[ret] ) keyword[return] identifier[__tmp]
def searchNsByHref(self, doc, href): """Search a Ns aliasing a given URI. Recurse on the parents until it finds the defined namespace or return None otherwise. """ if doc is None: doc__o = None # depends on [control=['if'], data=[]] else: doc__o = doc._o ret = libxml2mod.xmlSearchNsByHref(doc__o, self._o, href) if ret is None: raise treeError('xmlSearchNsByHref() failed') # depends on [control=['if'], data=[]] __tmp = xmlNs(_obj=ret) return __tmp
def blit( self, dest: tcod.console.Console, fill_fore: bool = True, fill_back: bool = True, ) -> None: """Use libtcod's "fill" functions to write the buffer to a console. Args: dest (Console): Console object to modify. fill_fore (bool): If True, fill the foreground color and characters. fill_back (bool): If True, fill the background color. """ if not dest: dest = tcod.console.Console._from_cdata(ffi.NULL) if dest.width != self.width or dest.height != self.height: raise ValueError( "ConsoleBuffer.blit: " "Destination console has an incorrect size." ) if fill_back: bg = dest.bg.ravel() bg[0::3] = self.back_r bg[1::3] = self.back_g bg[2::3] = self.back_b if fill_fore: fg = dest.fg.ravel() fg[0::3] = self.fore_r fg[1::3] = self.fore_g fg[2::3] = self.fore_b dest.ch.ravel()[:] = self.char
def function[blit, parameter[self, dest, fill_fore, fill_back]]: constant[Use libtcod's "fill" functions to write the buffer to a console. Args: dest (Console): Console object to modify. fill_fore (bool): If True, fill the foreground color and characters. fill_back (bool): If True, fill the background color. ] if <ast.UnaryOp object at 0x7da18eb56410> begin[:] variable[dest] assign[=] call[name[tcod].console.Console._from_cdata, parameter[name[ffi].NULL]] if <ast.BoolOp object at 0x7da18eb555a0> begin[:] <ast.Raise object at 0x7da18eb54250> if name[fill_back] begin[:] variable[bg] assign[=] call[name[dest].bg.ravel, parameter[]] call[name[bg]][<ast.Slice object at 0x7da18eb54310>] assign[=] name[self].back_r call[name[bg]][<ast.Slice object at 0x7da18eb56500>] assign[=] name[self].back_g call[name[bg]][<ast.Slice object at 0x7da18eb568f0>] assign[=] name[self].back_b if name[fill_fore] begin[:] variable[fg] assign[=] call[name[dest].fg.ravel, parameter[]] call[name[fg]][<ast.Slice object at 0x7da18eb548b0>] assign[=] name[self].fore_r call[name[fg]][<ast.Slice object at 0x7da18bcc9270>] assign[=] name[self].fore_g call[name[fg]][<ast.Slice object at 0x7da18bcc9d50>] assign[=] name[self].fore_b call[call[name[dest].ch.ravel, parameter[]]][<ast.Slice object at 0x7da18bcc85b0>] assign[=] name[self].char
keyword[def] identifier[blit] ( identifier[self] , identifier[dest] : identifier[tcod] . identifier[console] . identifier[Console] , identifier[fill_fore] : identifier[bool] = keyword[True] , identifier[fill_back] : identifier[bool] = keyword[True] , )-> keyword[None] : literal[string] keyword[if] keyword[not] identifier[dest] : identifier[dest] = identifier[tcod] . identifier[console] . identifier[Console] . identifier[_from_cdata] ( identifier[ffi] . identifier[NULL] ) keyword[if] identifier[dest] . identifier[width] != identifier[self] . identifier[width] keyword[or] identifier[dest] . identifier[height] != identifier[self] . identifier[height] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[fill_back] : identifier[bg] = identifier[dest] . identifier[bg] . identifier[ravel] () identifier[bg] [ literal[int] :: literal[int] ]= identifier[self] . identifier[back_r] identifier[bg] [ literal[int] :: literal[int] ]= identifier[self] . identifier[back_g] identifier[bg] [ literal[int] :: literal[int] ]= identifier[self] . identifier[back_b] keyword[if] identifier[fill_fore] : identifier[fg] = identifier[dest] . identifier[fg] . identifier[ravel] () identifier[fg] [ literal[int] :: literal[int] ]= identifier[self] . identifier[fore_r] identifier[fg] [ literal[int] :: literal[int] ]= identifier[self] . identifier[fore_g] identifier[fg] [ literal[int] :: literal[int] ]= identifier[self] . identifier[fore_b] identifier[dest] . identifier[ch] . identifier[ravel] ()[:]= identifier[self] . identifier[char]
def blit(self, dest: tcod.console.Console, fill_fore: bool=True, fill_back: bool=True) -> None: """Use libtcod's "fill" functions to write the buffer to a console. Args: dest (Console): Console object to modify. fill_fore (bool): If True, fill the foreground color and characters. fill_back (bool): If True, fill the background color. """ if not dest: dest = tcod.console.Console._from_cdata(ffi.NULL) # depends on [control=['if'], data=[]] if dest.width != self.width or dest.height != self.height: raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.') # depends on [control=['if'], data=[]] if fill_back: bg = dest.bg.ravel() bg[0::3] = self.back_r bg[1::3] = self.back_g bg[2::3] = self.back_b # depends on [control=['if'], data=[]] if fill_fore: fg = dest.fg.ravel() fg[0::3] = self.fore_r fg[1::3] = self.fore_g fg[2::3] = self.fore_b dest.ch.ravel()[:] = self.char # depends on [control=['if'], data=[]]
def leave_transaction_management(self) -> None: """ End a transaction. Must not be dirty when doing so. ie. commit() or rollback() must be called if changes made. If dirty, changes will be discarded. """ if len(self._transactions) == 0: raise RuntimeError("leave_transaction_management called outside transaction") elif len(self._transactions[-1]) > 0: raise RuntimeError("leave_transaction_management called with uncommited rollbacks") else: self._transactions.pop()
def function[leave_transaction_management, parameter[self]]: constant[ End a transaction. Must not be dirty when doing so. ie. commit() or rollback() must be called if changes made. If dirty, changes will be discarded. ] if compare[call[name[len], parameter[name[self]._transactions]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18f09c8e0>
keyword[def] identifier[leave_transaction_management] ( identifier[self] )-> keyword[None] : literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[_transactions] )== literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[elif] identifier[len] ( identifier[self] . identifier[_transactions] [- literal[int] ])> literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[else] : identifier[self] . identifier[_transactions] . identifier[pop] ()
def leave_transaction_management(self) -> None: """ End a transaction. Must not be dirty when doing so. ie. commit() or rollback() must be called if changes made. If dirty, changes will be discarded. """ if len(self._transactions) == 0: raise RuntimeError('leave_transaction_management called outside transaction') # depends on [control=['if'], data=[]] elif len(self._transactions[-1]) > 0: raise RuntimeError('leave_transaction_management called with uncommited rollbacks') # depends on [control=['if'], data=[]] else: self._transactions.pop()
def get_layout_view(self, request): """ Return the metadata about a layout """ template_name = request.GET['name'] # Check if template is allowed, avoid parsing random templates templates = dict(appconfig.SIMPLECMS_TEMPLATE_CHOICES) if template_name not in templates: jsondata = {'success': False, 'error': 'Template not found'} status = 404 else: # Extract placeholders from the template, and pass to the client. template = get_template(template_name) placeholders = get_template_placeholder_data(template) jsondata = { 'placeholders': [p.as_dict() for p in placeholders], } status = 200 jsonstr = json.dumps(jsondata) return HttpResponse(jsonstr, content_type='application/json', status=status)
def function[get_layout_view, parameter[self, request]]: constant[ Return the metadata about a layout ] variable[template_name] assign[=] call[name[request].GET][constant[name]] variable[templates] assign[=] call[name[dict], parameter[name[appconfig].SIMPLECMS_TEMPLATE_CHOICES]] if compare[name[template_name] <ast.NotIn object at 0x7da2590d7190> name[templates]] begin[:] variable[jsondata] assign[=] dictionary[[<ast.Constant object at 0x7da1b11c2b30>, <ast.Constant object at 0x7da1b11c20e0>], [<ast.Constant object at 0x7da1b11c25f0>, <ast.Constant object at 0x7da1b11c3520>]] variable[status] assign[=] constant[404] variable[jsonstr] assign[=] call[name[json].dumps, parameter[name[jsondata]]] return[call[name[HttpResponse], parameter[name[jsonstr]]]]
keyword[def] identifier[get_layout_view] ( identifier[self] , identifier[request] ): literal[string] identifier[template_name] = identifier[request] . identifier[GET] [ literal[string] ] identifier[templates] = identifier[dict] ( identifier[appconfig] . identifier[SIMPLECMS_TEMPLATE_CHOICES] ) keyword[if] identifier[template_name] keyword[not] keyword[in] identifier[templates] : identifier[jsondata] ={ literal[string] : keyword[False] , literal[string] : literal[string] } identifier[status] = literal[int] keyword[else] : identifier[template] = identifier[get_template] ( identifier[template_name] ) identifier[placeholders] = identifier[get_template_placeholder_data] ( identifier[template] ) identifier[jsondata] ={ literal[string] :[ identifier[p] . identifier[as_dict] () keyword[for] identifier[p] keyword[in] identifier[placeholders] ], } identifier[status] = literal[int] identifier[jsonstr] = identifier[json] . identifier[dumps] ( identifier[jsondata] ) keyword[return] identifier[HttpResponse] ( identifier[jsonstr] , identifier[content_type] = literal[string] , identifier[status] = identifier[status] )
def get_layout_view(self, request): """ Return the metadata about a layout """ template_name = request.GET['name'] # Check if template is allowed, avoid parsing random templates templates = dict(appconfig.SIMPLECMS_TEMPLATE_CHOICES) if template_name not in templates: jsondata = {'success': False, 'error': 'Template not found'} status = 404 # depends on [control=['if'], data=[]] else: # Extract placeholders from the template, and pass to the client. template = get_template(template_name) placeholders = get_template_placeholder_data(template) jsondata = {'placeholders': [p.as_dict() for p in placeholders]} status = 200 jsonstr = json.dumps(jsondata) return HttpResponse(jsonstr, content_type='application/json', status=status)
def get_version(): """Extracts the version number from the version.py file.""" VERSION_FILE = '../malcolm/version.py' mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M) if mo: return mo.group(1) else: raise RuntimeError( 'Unable to find version string in {0}.'.format(VERSION_FILE))
def function[get_version, parameter[]]: constant[Extracts the version number from the version.py file.] variable[VERSION_FILE] assign[=] constant[../malcolm/version.py] variable[mo] assign[=] call[name[re].search, parameter[constant[^__version__ = [\'"]([^\'"]*)[\'"]], call[call[name[open], parameter[name[VERSION_FILE], constant[rt]]].read, parameter[]], name[re].M]] if name[mo] begin[:] return[call[name[mo].group, parameter[constant[1]]]]
keyword[def] identifier[get_version] (): literal[string] identifier[VERSION_FILE] = literal[string] identifier[mo] = identifier[re] . identifier[search] ( literal[string] , identifier[open] ( identifier[VERSION_FILE] , literal[string] ). identifier[read] (), identifier[re] . identifier[M] ) keyword[if] identifier[mo] : keyword[return] identifier[mo] . identifier[group] ( literal[int] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[VERSION_FILE] ))
def get_version(): """Extracts the version number from the version.py file.""" VERSION_FILE = '../malcolm/version.py' mo = re.search('^__version__ = [\\\'"]([^\\\'"]*)[\\\'"]', open(VERSION_FILE, 'rt').read(), re.M) if mo: return mo.group(1) # depends on [control=['if'], data=[]] else: raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
def decompress(f): """Decompress a Plan 9 image file. Assumes f is already cued past the initial 'compressed\n' string. """ r = meta(f.read(60)) return r, decomprest(f, r[4])
def function[decompress, parameter[f]]: constant[Decompress a Plan 9 image file. Assumes f is already cued past the initial 'compressed ' string. ] variable[r] assign[=] call[name[meta], parameter[call[name[f].read, parameter[constant[60]]]]] return[tuple[[<ast.Name object at 0x7da1b0781870>, <ast.Call object at 0x7da1b0781270>]]]
keyword[def] identifier[decompress] ( identifier[f] ): literal[string] identifier[r] = identifier[meta] ( identifier[f] . identifier[read] ( literal[int] )) keyword[return] identifier[r] , identifier[decomprest] ( identifier[f] , identifier[r] [ literal[int] ])
def decompress(f): """Decompress a Plan 9 image file. Assumes f is already cued past the initial 'compressed ' string. """ r = meta(f.read(60)) return (r, decomprest(f, r[4]))
def validate(schema, data, owner=None): """Validate input data with input schema. :param Schema schema: schema able to validate input data. :param data: data to validate. :param Schema owner: input schema parent schema. :raises: Exception if the data is not validated. """ schema._validate(data=data, owner=owner)
def function[validate, parameter[schema, data, owner]]: constant[Validate input data with input schema. :param Schema schema: schema able to validate input data. :param data: data to validate. :param Schema owner: input schema parent schema. :raises: Exception if the data is not validated. ] call[name[schema]._validate, parameter[]]
keyword[def] identifier[validate] ( identifier[schema] , identifier[data] , identifier[owner] = keyword[None] ): literal[string] identifier[schema] . identifier[_validate] ( identifier[data] = identifier[data] , identifier[owner] = identifier[owner] )
def validate(schema, data, owner=None): """Validate input data with input schema. :param Schema schema: schema able to validate input data. :param data: data to validate. :param Schema owner: input schema parent schema. :raises: Exception if the data is not validated. """ schema._validate(data=data, owner=owner)
def put_scheduled_update_group_action(AutoScalingGroupName=None, ScheduledActionName=None, Time=None, StartTime=None, EndTime=None, Recurrence=None, MinSize=None, MaxSize=None, DesiredCapacity=None): """ Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged. For more information, see Scheduled Scaling in the Auto Scaling User Guide . See also: AWS API Documentation Examples This example adds the specified scheduled action to the specified Auto Scaling group. Expected Output: :example: response = client.put_scheduled_update_group_action( AutoScalingGroupName='string', ScheduledActionName='string', Time=datetime(2015, 1, 1), StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), Recurrence='string', MinSize=123, MaxSize=123, DesiredCapacity=123 ) :type AutoScalingGroupName: string :param AutoScalingGroupName: [REQUIRED] The name or Amazon Resource Name (ARN) of the Auto Scaling group. :type ScheduledActionName: string :param ScheduledActionName: [REQUIRED] The name of this scaling action. :type Time: datetime :param Time: This parameter is deprecated. :type StartTime: datetime :param StartTime: The time for this action to start, in 'YYYY-MM-DDThh:mm:ssZ' format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ). If you specify Recurrence and StartTime , Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence. If you try to schedule your action in the past, Auto Scaling returns an error message. :type EndTime: datetime :param EndTime: The time for the recurring schedule to end. Auto Scaling does not perform the action after this time. :type Recurrence: string :param Recurrence: The recurring schedule for this action, in Unix cron syntax format. For more information, see Cron in Wikipedia. :type MinSize: integer :param MinSize: The minimum size for the Auto Scaling group. :type MaxSize: integer :param MaxSize: The maximum size for the Auto Scaling group. :type DesiredCapacity: integer :param DesiredCapacity: The number of EC2 instances that should be running in the group. :return: response = client.put_scheduled_update_group_action( AutoScalingGroupName='my-auto-scaling-group', DesiredCapacity=4, EndTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0), MaxSize=6, MinSize=2, ScheduledActionName='my-scheduled-action', StartTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0), ) print(response) """ pass
def function[put_scheduled_update_group_action, parameter[AutoScalingGroupName, ScheduledActionName, Time, StartTime, EndTime, Recurrence, MinSize, MaxSize, DesiredCapacity]]: constant[ Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged. For more information, see Scheduled Scaling in the Auto Scaling User Guide . See also: AWS API Documentation Examples This example adds the specified scheduled action to the specified Auto Scaling group. Expected Output: :example: response = client.put_scheduled_update_group_action( AutoScalingGroupName='string', ScheduledActionName='string', Time=datetime(2015, 1, 1), StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), Recurrence='string', MinSize=123, MaxSize=123, DesiredCapacity=123 ) :type AutoScalingGroupName: string :param AutoScalingGroupName: [REQUIRED] The name or Amazon Resource Name (ARN) of the Auto Scaling group. :type ScheduledActionName: string :param ScheduledActionName: [REQUIRED] The name of this scaling action. :type Time: datetime :param Time: This parameter is deprecated. :type StartTime: datetime :param StartTime: The time for this action to start, in 'YYYY-MM-DDThh:mm:ssZ' format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ). If you specify Recurrence and StartTime , Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence. If you try to schedule your action in the past, Auto Scaling returns an error message. :type EndTime: datetime :param EndTime: The time for the recurring schedule to end. Auto Scaling does not perform the action after this time. :type Recurrence: string :param Recurrence: The recurring schedule for this action, in Unix cron syntax format. For more information, see Cron in Wikipedia. :type MinSize: integer :param MinSize: The minimum size for the Auto Scaling group. :type MaxSize: integer :param MaxSize: The maximum size for the Auto Scaling group. :type DesiredCapacity: integer :param DesiredCapacity: The number of EC2 instances that should be running in the group. :return: response = client.put_scheduled_update_group_action( AutoScalingGroupName='my-auto-scaling-group', DesiredCapacity=4, EndTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0), MaxSize=6, MinSize=2, ScheduledActionName='my-scheduled-action', StartTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0), ) print(response) ] pass
keyword[def] identifier[put_scheduled_update_group_action] ( identifier[AutoScalingGroupName] = keyword[None] , identifier[ScheduledActionName] = keyword[None] , identifier[Time] = keyword[None] , identifier[StartTime] = keyword[None] , identifier[EndTime] = keyword[None] , identifier[Recurrence] = keyword[None] , identifier[MinSize] = keyword[None] , identifier[MaxSize] = keyword[None] , identifier[DesiredCapacity] = keyword[None] ): literal[string] keyword[pass]
def put_scheduled_update_group_action(AutoScalingGroupName=None, ScheduledActionName=None, Time=None, StartTime=None, EndTime=None, Recurrence=None, MinSize=None, MaxSize=None, DesiredCapacity=None): """ Creates or updates a scheduled scaling action for an Auto Scaling group. When updating a scheduled scaling action, if you leave a parameter unspecified, the corresponding value remains unchanged. For more information, see Scheduled Scaling in the Auto Scaling User Guide . See also: AWS API Documentation Examples This example adds the specified scheduled action to the specified Auto Scaling group. Expected Output: :example: response = client.put_scheduled_update_group_action( AutoScalingGroupName='string', ScheduledActionName='string', Time=datetime(2015, 1, 1), StartTime=datetime(2015, 1, 1), EndTime=datetime(2015, 1, 1), Recurrence='string', MinSize=123, MaxSize=123, DesiredCapacity=123 ) :type AutoScalingGroupName: string :param AutoScalingGroupName: [REQUIRED] The name or Amazon Resource Name (ARN) of the Auto Scaling group. :type ScheduledActionName: string :param ScheduledActionName: [REQUIRED] The name of this scaling action. :type Time: datetime :param Time: This parameter is deprecated. :type StartTime: datetime :param StartTime: The time for this action to start, in 'YYYY-MM-DDThh:mm:ssZ' format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ). If you specify Recurrence and StartTime , Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence. If you try to schedule your action in the past, Auto Scaling returns an error message. :type EndTime: datetime :param EndTime: The time for the recurring schedule to end. Auto Scaling does not perform the action after this time. :type Recurrence: string :param Recurrence: The recurring schedule for this action, in Unix cron syntax format. For more information, see Cron in Wikipedia. :type MinSize: integer :param MinSize: The minimum size for the Auto Scaling group. :type MaxSize: integer :param MaxSize: The maximum size for the Auto Scaling group. :type DesiredCapacity: integer :param DesiredCapacity: The number of EC2 instances that should be running in the group. :return: response = client.put_scheduled_update_group_action( AutoScalingGroupName='my-auto-scaling-group', DesiredCapacity=4, EndTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0), MaxSize=6, MinSize=2, ScheduledActionName='my-scheduled-action', StartTime=datetime(2014, 5, 12, 8, 0, 0, 0, 132, 0), ) print(response) """ pass
def remove_pid_file(process_name): """ removes pid file """ pid_filename = get_pid_filename(process_name) try: os.remove(pid_filename) print('Removed pid file at: {0}'.format(pid_filename), file=sys.stdout) except Exception as e: print('Unable to remove pid file at: {0}, because of: {1}'.format(pid_filename, e), file=sys.stderr)
def function[remove_pid_file, parameter[process_name]]: constant[ removes pid file ] variable[pid_filename] assign[=] call[name[get_pid_filename], parameter[name[process_name]]] <ast.Try object at 0x7da1b2440c70>
keyword[def] identifier[remove_pid_file] ( identifier[process_name] ): literal[string] identifier[pid_filename] = identifier[get_pid_filename] ( identifier[process_name] ) keyword[try] : identifier[os] . identifier[remove] ( identifier[pid_filename] ) identifier[print] ( literal[string] . identifier[format] ( identifier[pid_filename] ), identifier[file] = identifier[sys] . identifier[stdout] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( literal[string] . identifier[format] ( identifier[pid_filename] , identifier[e] ), identifier[file] = identifier[sys] . identifier[stderr] )
def remove_pid_file(process_name): """ removes pid file """ pid_filename = get_pid_filename(process_name) try: os.remove(pid_filename) print('Removed pid file at: {0}'.format(pid_filename), file=sys.stdout) # depends on [control=['try'], data=[]] except Exception as e: print('Unable to remove pid file at: {0}, because of: {1}'.format(pid_filename, e), file=sys.stderr) # depends on [control=['except'], data=['e']]
def list_ignored(): ''' List all updates that have been ignored. Ignored updates are shown without the '-' and version number at the end, this is how the softwareupdate command works. :return: The list of ignored updates :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_ignored ''' cmd = ['softwareupdate', '--list', '--ignore'] out = salt.utils.mac_utils.execute_return_result(cmd) # rep parses lines that look like the following: # "Safari6.1.2MountainLion-6.1.2", # or: # Safari6.1.2MountainLion-6.1.2 rexp = re.compile('(?m)^ ["]?' r'([^,|\s].*[^"|\n|,])[,|"]?') return rexp.findall(out)
def function[list_ignored, parameter[]]: constant[ List all updates that have been ignored. Ignored updates are shown without the '-' and version number at the end, this is how the softwareupdate command works. :return: The list of ignored updates :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_ignored ] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18bc71570>, <ast.Constant object at 0x7da18bc72470>, <ast.Constant object at 0x7da18bc72c20>]] variable[out] assign[=] call[name[salt].utils.mac_utils.execute_return_result, parameter[name[cmd]]] variable[rexp] assign[=] call[name[re].compile, parameter[constant[(?m)^ ["]?([^,|\s].*[^"|\n|,])[,|"]?]]] return[call[name[rexp].findall, parameter[name[out]]]]
keyword[def] identifier[list_ignored] (): literal[string] identifier[cmd] =[ literal[string] , literal[string] , literal[string] ] identifier[out] = identifier[salt] . identifier[utils] . identifier[mac_utils] . identifier[execute_return_result] ( identifier[cmd] ) identifier[rexp] = identifier[re] . identifier[compile] ( literal[string] literal[string] ) keyword[return] identifier[rexp] . identifier[findall] ( identifier[out] )
def list_ignored(): """ List all updates that have been ignored. Ignored updates are shown without the '-' and version number at the end, this is how the softwareupdate command works. :return: The list of ignored updates :rtype: list CLI Example: .. code-block:: bash salt '*' softwareupdate.list_ignored """ cmd = ['softwareupdate', '--list', '--ignore'] out = salt.utils.mac_utils.execute_return_result(cmd) # rep parses lines that look like the following: # "Safari6.1.2MountainLion-6.1.2", # or: # Safari6.1.2MountainLion-6.1.2 rexp = re.compile('(?m)^ ["]?([^,|\\s].*[^"|\\n|,])[,|"]?') return rexp.findall(out)
def creep_data(data_set='creep_rupture'): """Brun and Yoshida's metal creep rupture data.""" if not data_available(data_set): download_data(data_set) path = os.path.join(data_path, data_set) tar_file = os.path.join(path, 'creeprupt.tar') tar = tarfile.open(tar_file) print('Extracting file.') tar.extractall(path=path) tar.close() all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka')) y = all_data[:, 1:2].copy() features = [0] features.extend(range(2, 31)) X = all_data[:, features].copy() return data_details_return({'X': X, 'y': y}, data_set)
def function[creep_data, parameter[data_set]]: constant[Brun and Yoshida's metal creep rupture data.] if <ast.UnaryOp object at 0x7da1b1c7e860> begin[:] call[name[download_data], parameter[name[data_set]]] variable[path] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set]]] variable[tar_file] assign[=] call[name[os].path.join, parameter[name[path], constant[creeprupt.tar]]] variable[tar] assign[=] call[name[tarfile].open, parameter[name[tar_file]]] call[name[print], parameter[constant[Extracting file.]]] call[name[tar].extractall, parameter[]] call[name[tar].close, parameter[]] variable[all_data] assign[=] call[name[np].loadtxt, parameter[call[name[os].path.join, parameter[name[data_path], name[data_set], constant[taka]]]]] variable[y] assign[=] call[call[name[all_data]][tuple[[<ast.Slice object at 0x7da1b1c7c4c0>, <ast.Slice object at 0x7da1b1c7ed70>]]].copy, parameter[]] variable[features] assign[=] list[[<ast.Constant object at 0x7da1b1c7da80>]] call[name[features].extend, parameter[call[name[range], parameter[constant[2], constant[31]]]]] variable[X] assign[=] call[call[name[all_data]][tuple[[<ast.Slice object at 0x7da1b1c7dab0>, <ast.Name object at 0x7da1b1c7ca60>]]].copy, parameter[]] return[call[name[data_details_return], parameter[dictionary[[<ast.Constant object at 0x7da1b1c7e740>, <ast.Constant object at 0x7da1b1c7c6d0>], [<ast.Name object at 0x7da1b1c7c4f0>, <ast.Name object at 0x7da1b1c7d750>]], name[data_set]]]]
keyword[def] identifier[creep_data] ( identifier[data_set] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[data_available] ( identifier[data_set] ): identifier[download_data] ( identifier[data_set] ) identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] ) identifier[tar_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ) identifier[tar] = identifier[tarfile] . identifier[open] ( identifier[tar_file] ) identifier[print] ( literal[string] ) identifier[tar] . identifier[extractall] ( identifier[path] = identifier[path] ) identifier[tar] . identifier[close] () identifier[all_data] = identifier[np] . identifier[loadtxt] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] , literal[string] )) identifier[y] = identifier[all_data] [:, literal[int] : literal[int] ]. identifier[copy] () identifier[features] =[ literal[int] ] identifier[features] . identifier[extend] ( identifier[range] ( literal[int] , literal[int] )) identifier[X] = identifier[all_data] [:, identifier[features] ]. identifier[copy] () keyword[return] identifier[data_details_return] ({ literal[string] : identifier[X] , literal[string] : identifier[y] }, identifier[data_set] )
def creep_data(data_set='creep_rupture'): """Brun and Yoshida's metal creep rupture data.""" if not data_available(data_set): download_data(data_set) path = os.path.join(data_path, data_set) tar_file = os.path.join(path, 'creeprupt.tar') tar = tarfile.open(tar_file) print('Extracting file.') tar.extractall(path=path) tar.close() # depends on [control=['if'], data=[]] all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka')) y = all_data[:, 1:2].copy() features = [0] features.extend(range(2, 31)) X = all_data[:, features].copy() return data_details_return({'X': X, 'y': y}, data_set)
def write_csv_header(mol, csv_writer): """ Write the csv header """ # create line list where line elements for writing will be stored line = [] # ID line.append('id') # status line.append('status') # query labels queryList = mol.properties.keys() for queryLabel in queryList: line.append(queryLabel) # write line csv_writer.writerow(line)
def function[write_csv_header, parameter[mol, csv_writer]]: constant[ Write the csv header ] variable[line] assign[=] list[[]] call[name[line].append, parameter[constant[id]]] call[name[line].append, parameter[constant[status]]] variable[queryList] assign[=] call[name[mol].properties.keys, parameter[]] for taget[name[queryLabel]] in starred[name[queryList]] begin[:] call[name[line].append, parameter[name[queryLabel]]] call[name[csv_writer].writerow, parameter[name[line]]]
keyword[def] identifier[write_csv_header] ( identifier[mol] , identifier[csv_writer] ): literal[string] identifier[line] =[] identifier[line] . identifier[append] ( literal[string] ) identifier[line] . identifier[append] ( literal[string] ) identifier[queryList] = identifier[mol] . identifier[properties] . identifier[keys] () keyword[for] identifier[queryLabel] keyword[in] identifier[queryList] : identifier[line] . identifier[append] ( identifier[queryLabel] ) identifier[csv_writer] . identifier[writerow] ( identifier[line] )
def write_csv_header(mol, csv_writer): """ Write the csv header """ # create line list where line elements for writing will be stored line = [] # ID line.append('id') # status line.append('status') # query labels queryList = mol.properties.keys() for queryLabel in queryList: line.append(queryLabel) # depends on [control=['for'], data=['queryLabel']] # write line csv_writer.writerow(line)
def from_array(array): """ Deserialize a new ChosenInlineResult from a given dictionary. :return: new ChosenInlineResult instance. :rtype: ChosenInlineResult """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") from ..receivable.media import Location from ..receivable.peer import User data = {} data['result_id'] = u(array.get('result_id')) data['from_peer'] = User.from_array(array.get('from')) data['query'] = u(array.get('query')) data['location'] = Location.from_array(array.get('location')) if array.get('location') is not None else None data['inline_message_id'] = u(array.get('inline_message_id')) if array.get('inline_message_id') is not None else None data['_raw'] = array return ChosenInlineResult(**data)
def function[from_array, parameter[array]]: constant[ Deserialize a new ChosenInlineResult from a given dictionary. :return: new ChosenInlineResult instance. :rtype: ChosenInlineResult ] if <ast.BoolOp object at 0x7da18f58d120> begin[:] return[constant[None]] call[name[assert_type_or_raise], parameter[name[array], name[dict]]] from relative_module[receivable.media] import module[Location] from relative_module[receivable.peer] import module[User] variable[data] assign[=] dictionary[[], []] call[name[data]][constant[result_id]] assign[=] call[name[u], parameter[call[name[array].get, parameter[constant[result_id]]]]] call[name[data]][constant[from_peer]] assign[=] call[name[User].from_array, parameter[call[name[array].get, parameter[constant[from]]]]] call[name[data]][constant[query]] assign[=] call[name[u], parameter[call[name[array].get, parameter[constant[query]]]]] call[name[data]][constant[location]] assign[=] <ast.IfExp object at 0x7da18f58e890> call[name[data]][constant[inline_message_id]] assign[=] <ast.IfExp object at 0x7da1b04d6080> call[name[data]][constant[_raw]] assign[=] name[array] return[call[name[ChosenInlineResult], parameter[]]]
keyword[def] identifier[from_array] ( identifier[array] ): literal[string] keyword[if] identifier[array] keyword[is] keyword[None] keyword[or] keyword[not] identifier[array] : keyword[return] keyword[None] identifier[assert_type_or_raise] ( identifier[array] , identifier[dict] , identifier[parameter_name] = literal[string] ) keyword[from] .. identifier[receivable] . identifier[media] keyword[import] identifier[Location] keyword[from] .. identifier[receivable] . identifier[peer] keyword[import] identifier[User] identifier[data] ={} identifier[data] [ literal[string] ]= identifier[u] ( identifier[array] . identifier[get] ( literal[string] )) identifier[data] [ literal[string] ]= identifier[User] . identifier[from_array] ( identifier[array] . identifier[get] ( literal[string] )) identifier[data] [ literal[string] ]= identifier[u] ( identifier[array] . identifier[get] ( literal[string] )) identifier[data] [ literal[string] ]= identifier[Location] . identifier[from_array] ( identifier[array] . identifier[get] ( literal[string] )) keyword[if] identifier[array] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] identifier[data] [ literal[string] ]= identifier[u] ( identifier[array] . identifier[get] ( literal[string] )) keyword[if] identifier[array] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] identifier[data] [ literal[string] ]= identifier[array] keyword[return] identifier[ChosenInlineResult] (** identifier[data] )
def from_array(array): """ Deserialize a new ChosenInlineResult from a given dictionary. :return: new ChosenInlineResult instance. :rtype: ChosenInlineResult """ if array is None or not array: return None # depends on [control=['if'], data=[]] # end if assert_type_or_raise(array, dict, parameter_name='array') from ..receivable.media import Location from ..receivable.peer import User data = {} data['result_id'] = u(array.get('result_id')) data['from_peer'] = User.from_array(array.get('from')) data['query'] = u(array.get('query')) data['location'] = Location.from_array(array.get('location')) if array.get('location') is not None else None data['inline_message_id'] = u(array.get('inline_message_id')) if array.get('inline_message_id') is not None else None data['_raw'] = array return ChosenInlineResult(**data)
def shl(computation: BaseComputation) -> None: """ Bitwise left shift """ shift_length, value = computation.stack_pop(num_items=2, type_hint=constants.UINT256) if shift_length >= 256: result = 0 else: result = (value << shift_length) & constants.UINT_256_MAX computation.stack_push(result)
def function[shl, parameter[computation]]: constant[ Bitwise left shift ] <ast.Tuple object at 0x7da1b175dd20> assign[=] call[name[computation].stack_pop, parameter[]] if compare[name[shift_length] greater_or_equal[>=] constant[256]] begin[:] variable[result] assign[=] constant[0] call[name[computation].stack_push, parameter[name[result]]]
keyword[def] identifier[shl] ( identifier[computation] : identifier[BaseComputation] )-> keyword[None] : literal[string] identifier[shift_length] , identifier[value] = identifier[computation] . identifier[stack_pop] ( identifier[num_items] = literal[int] , identifier[type_hint] = identifier[constants] . identifier[UINT256] ) keyword[if] identifier[shift_length] >= literal[int] : identifier[result] = literal[int] keyword[else] : identifier[result] =( identifier[value] << identifier[shift_length] )& identifier[constants] . identifier[UINT_256_MAX] identifier[computation] . identifier[stack_push] ( identifier[result] )
def shl(computation: BaseComputation) -> None: """ Bitwise left shift """ (shift_length, value) = computation.stack_pop(num_items=2, type_hint=constants.UINT256) if shift_length >= 256: result = 0 # depends on [control=['if'], data=[]] else: result = value << shift_length & constants.UINT_256_MAX computation.stack_push(result)
def create(self, fullname, shortname, category_id, **kwargs): """ Create a new course :param string fullname: The course's fullname :param string shortname: The course's shortname :param int category_id: The course's category :keyword string idnumber: (optional) Course ID number. \ Yes, it's a string, blame Moodle. :keyword int summaryformat: (optional) Defaults to 1 (HTML). \ Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, \ or 4 = Markdown) :keyword string format: (optional) Defaults to "topics" Topic options: (weeks, topics, social, site) :keyword bool showgrades: (optional) Defaults to True. \ Determines if grades are shown :keyword int newsitems: (optional) Defaults to 5. \ Number of recent items appearing on the course page :keyword bool startdate: (optional) Timestamp when the course start :keyword int maxbytes: (optional) Defaults to 83886080. \ Largest size of file that can be uploaded into the course :keyword bool showreports: Default to True. Are activity report shown? :keyword bool visible: (optional) Determines if course is \ visible to students :keyword int groupmode: (optional) Defaults to 2. options: (0 = no group, 1 = separate, 2 = visible) :keyword bool groupmodeforce: (optional) Defaults to False. \ Force group mode :keyword int defaultgroupingid: (optional) Defaults to 0. \ Default grouping id :keyword bool enablecompletion: (optional) Enable control via \ completion in activity settings. :keyword bool completionstartonenrol: (optional) \ Begin tracking a student's progress in course completion after :keyword bool completionnotify: (optional) Default? Dunno. \ Presumably notifies course completion :keyword string lang: (optional) Force course language. :keyword string forcetheme: (optional) Name of the force theme Example Usage:: >>> import muddle >>> muddle.course().create('a new course', 'new-course', 20) """ allowed_options = ['idnumber', 'summaryformat', 'format', 'showgrades', 'newsitems', 'startdate', 'maxbytes', 'showreports', 'visible', 'groupmode', 'groupmodeforce', 'jdefaultgroupingid', 'enablecompletion', 'completionstartonenrol', 'completionnotify', 'lang', 'forcetheme'] if valid_options(kwargs, allowed_options): option_params = {} for index, key in enumerate(kwargs): val = kwargs.get(key) if isinstance(val, bool): val = int(val) option_params.update({'courses[0][' + key + ']': val}) params = {'wsfunction': 'core_course_create_courses', 'courses[0][fullname]': fullname, 'courses[0][shortname]': shortname, 'courses[0][categoryid]': category_id} params.update(option_params) params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False)
def function[create, parameter[self, fullname, shortname, category_id]]: constant[ Create a new course :param string fullname: The course's fullname :param string shortname: The course's shortname :param int category_id: The course's category :keyword string idnumber: (optional) Course ID number. Yes, it's a string, blame Moodle. :keyword int summaryformat: (optional) Defaults to 1 (HTML). Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, or 4 = Markdown) :keyword string format: (optional) Defaults to "topics" Topic options: (weeks, topics, social, site) :keyword bool showgrades: (optional) Defaults to True. Determines if grades are shown :keyword int newsitems: (optional) Defaults to 5. Number of recent items appearing on the course page :keyword bool startdate: (optional) Timestamp when the course start :keyword int maxbytes: (optional) Defaults to 83886080. Largest size of file that can be uploaded into the course :keyword bool showreports: Default to True. Are activity report shown? :keyword bool visible: (optional) Determines if course is visible to students :keyword int groupmode: (optional) Defaults to 2. options: (0 = no group, 1 = separate, 2 = visible) :keyword bool groupmodeforce: (optional) Defaults to False. Force group mode :keyword int defaultgroupingid: (optional) Defaults to 0. Default grouping id :keyword bool enablecompletion: (optional) Enable control via completion in activity settings. :keyword bool completionstartonenrol: (optional) Begin tracking a student's progress in course completion after :keyword bool completionnotify: (optional) Default? Dunno. Presumably notifies course completion :keyword string lang: (optional) Force course language. :keyword string forcetheme: (optional) Name of the force theme Example Usage:: >>> import muddle >>> muddle.course().create('a new course', 'new-course', 20) ] variable[allowed_options] assign[=] list[[<ast.Constant object at 0x7da1b0aa6c20>, <ast.Constant object at 0x7da1b0aa6800>, <ast.Constant object at 0x7da1b0aa6b00>, <ast.Constant object at 0x7da1b0aa4dc0>, <ast.Constant object at 0x7da1b0aa4e50>, <ast.Constant object at 0x7da1b0aa7c10>, <ast.Constant object at 0x7da1b0aa40a0>, <ast.Constant object at 0x7da1b0aa52d0>, <ast.Constant object at 0x7da1b0aa6830>, <ast.Constant object at 0x7da1b0aa5360>, <ast.Constant object at 0x7da1b0aa4e20>, <ast.Constant object at 0x7da1b0aa6d70>, <ast.Constant object at 0x7da1b0aa7ee0>, <ast.Constant object at 0x7da1b0aa69e0>, <ast.Constant object at 0x7da1b0aa79d0>, <ast.Constant object at 0x7da1b0aa4520>, <ast.Constant object at 0x7da1b0aa5d20>]] if call[name[valid_options], parameter[name[kwargs], name[allowed_options]]] begin[:] variable[option_params] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b0aa7b50>, <ast.Name object at 0x7da1b0aa6650>]]] in starred[call[name[enumerate], parameter[name[kwargs]]]] begin[:] variable[val] assign[=] call[name[kwargs].get, parameter[name[key]]] if call[name[isinstance], parameter[name[val], name[bool]]] begin[:] variable[val] assign[=] call[name[int], parameter[name[val]]] call[name[option_params].update, parameter[dictionary[[<ast.BinOp object at 0x7da1b0aa5090>], [<ast.Name object at 0x7da1b0aa5060>]]]] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0aa7880>, <ast.Constant object at 0x7da1b0aa5ed0>, <ast.Constant object at 0x7da1b0aa4c70>, <ast.Constant object at 0x7da1b0aa69b0>], [<ast.Constant object at 0x7da1b0aa5ae0>, <ast.Name object at 0x7da1b0aa7b20>, <ast.Name object at 0x7da1b0aa67a0>, <ast.Name object at 0x7da1b0aa6b30>]] call[name[params].update, parameter[name[option_params]]] call[name[params].update, parameter[name[self].request_params]] return[call[name[requests].post, parameter[name[self].api_url]]]
keyword[def] identifier[create] ( identifier[self] , identifier[fullname] , identifier[shortname] , identifier[category_id] ,** identifier[kwargs] ): literal[string] identifier[allowed_options] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[valid_options] ( identifier[kwargs] , identifier[allowed_options] ): identifier[option_params] ={} keyword[for] identifier[index] , identifier[key] keyword[in] identifier[enumerate] ( identifier[kwargs] ): identifier[val] = identifier[kwargs] . identifier[get] ( identifier[key] ) keyword[if] identifier[isinstance] ( identifier[val] , identifier[bool] ): identifier[val] = identifier[int] ( identifier[val] ) identifier[option_params] . identifier[update] ({ literal[string] + identifier[key] + literal[string] : identifier[val] }) identifier[params] ={ literal[string] : literal[string] , literal[string] : identifier[fullname] , literal[string] : identifier[shortname] , literal[string] : identifier[category_id] } identifier[params] . identifier[update] ( identifier[option_params] ) identifier[params] . identifier[update] ( identifier[self] . identifier[request_params] ) keyword[return] identifier[requests] . identifier[post] ( identifier[self] . identifier[api_url] , identifier[params] = identifier[params] , identifier[verify] = keyword[False] )
def create(self, fullname, shortname, category_id, **kwargs): """ Create a new course :param string fullname: The course's fullname :param string shortname: The course's shortname :param int category_id: The course's category :keyword string idnumber: (optional) Course ID number. Yes, it's a string, blame Moodle. :keyword int summaryformat: (optional) Defaults to 1 (HTML). Summary format options: (1 = HTML, 0 = Moodle, 2 = Plain, or 4 = Markdown) :keyword string format: (optional) Defaults to "topics" Topic options: (weeks, topics, social, site) :keyword bool showgrades: (optional) Defaults to True. Determines if grades are shown :keyword int newsitems: (optional) Defaults to 5. Number of recent items appearing on the course page :keyword bool startdate: (optional) Timestamp when the course start :keyword int maxbytes: (optional) Defaults to 83886080. Largest size of file that can be uploaded into the course :keyword bool showreports: Default to True. Are activity report shown? :keyword bool visible: (optional) Determines if course is visible to students :keyword int groupmode: (optional) Defaults to 2. options: (0 = no group, 1 = separate, 2 = visible) :keyword bool groupmodeforce: (optional) Defaults to False. Force group mode :keyword int defaultgroupingid: (optional) Defaults to 0. Default grouping id :keyword bool enablecompletion: (optional) Enable control via completion in activity settings. :keyword bool completionstartonenrol: (optional) Begin tracking a student's progress in course completion after :keyword bool completionnotify: (optional) Default? Dunno. Presumably notifies course completion :keyword string lang: (optional) Force course language. :keyword string forcetheme: (optional) Name of the force theme Example Usage:: >>> import muddle >>> muddle.course().create('a new course', 'new-course', 20) """ allowed_options = ['idnumber', 'summaryformat', 'format', 'showgrades', 'newsitems', 'startdate', 'maxbytes', 'showreports', 'visible', 'groupmode', 'groupmodeforce', 'jdefaultgroupingid', 'enablecompletion', 'completionstartonenrol', 'completionnotify', 'lang', 'forcetheme'] if valid_options(kwargs, allowed_options): option_params = {} for (index, key) in enumerate(kwargs): val = kwargs.get(key) if isinstance(val, bool): val = int(val) # depends on [control=['if'], data=[]] option_params.update({'courses[0][' + key + ']': val}) # depends on [control=['for'], data=[]] params = {'wsfunction': 'core_course_create_courses', 'courses[0][fullname]': fullname, 'courses[0][shortname]': shortname, 'courses[0][categoryid]': category_id} params.update(option_params) params.update(self.request_params) return requests.post(self.api_url, params=params, verify=False) # depends on [control=['if'], data=[]]
def get_protocol(self,sweep): """ given a sweep, return the protocol as [Xs,Ys]. This is good for plotting/recreating the protocol trace. There may be duplicate numbers. """ self.setsweep(sweep) return list(self.protoX),list(self.protoY)
def function[get_protocol, parameter[self, sweep]]: constant[ given a sweep, return the protocol as [Xs,Ys]. This is good for plotting/recreating the protocol trace. There may be duplicate numbers. ] call[name[self].setsweep, parameter[name[sweep]]] return[tuple[[<ast.Call object at 0x7da1afe573a0>, <ast.Call object at 0x7da1afe57010>]]]
keyword[def] identifier[get_protocol] ( identifier[self] , identifier[sweep] ): literal[string] identifier[self] . identifier[setsweep] ( identifier[sweep] ) keyword[return] identifier[list] ( identifier[self] . identifier[protoX] ), identifier[list] ( identifier[self] . identifier[protoY] )
def get_protocol(self, sweep): """ given a sweep, return the protocol as [Xs,Ys]. This is good for plotting/recreating the protocol trace. There may be duplicate numbers. """ self.setsweep(sweep) return (list(self.protoX), list(self.protoY))
def randomize_nick(cls, base, suffix_length=3): """ Generates a pseudo-random nickname. :param base: prefix to use for the generated nickname. :type base: unicode :param suffix_length: amount of digits to append to `base` :type suffix_length: int :return: generated nickname. :rtype: unicode """ suffix = u''.join(choice(u'0123456789') for _ in range(suffix_length)) return u'{0}{1}'.format(base, suffix)
def function[randomize_nick, parameter[cls, base, suffix_length]]: constant[ Generates a pseudo-random nickname. :param base: prefix to use for the generated nickname. :type base: unicode :param suffix_length: amount of digits to append to `base` :type suffix_length: int :return: generated nickname. :rtype: unicode ] variable[suffix] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da207f9b4f0>]] return[call[constant[{0}{1}].format, parameter[name[base], name[suffix]]]]
keyword[def] identifier[randomize_nick] ( identifier[cls] , identifier[base] , identifier[suffix_length] = literal[int] ): literal[string] identifier[suffix] = literal[string] . identifier[join] ( identifier[choice] ( literal[string] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[suffix_length] )) keyword[return] literal[string] . identifier[format] ( identifier[base] , identifier[suffix] )
def randomize_nick(cls, base, suffix_length=3): """ Generates a pseudo-random nickname. :param base: prefix to use for the generated nickname. :type base: unicode :param suffix_length: amount of digits to append to `base` :type suffix_length: int :return: generated nickname. :rtype: unicode """ suffix = u''.join((choice(u'0123456789') for _ in range(suffix_length))) return u'{0}{1}'.format(base, suffix)
def postinit(self, elt=None, generators=None): """Do some setup after initialisation. :param elt: The element that forms the output of the expression. :type elt: NodeNG or None :param generators: The generators that are looped through. :type generators: list(Comprehension) or None """ self.elt = elt self.generators = generators
def function[postinit, parameter[self, elt, generators]]: constant[Do some setup after initialisation. :param elt: The element that forms the output of the expression. :type elt: NodeNG or None :param generators: The generators that are looped through. :type generators: list(Comprehension) or None ] name[self].elt assign[=] name[elt] name[self].generators assign[=] name[generators]
keyword[def] identifier[postinit] ( identifier[self] , identifier[elt] = keyword[None] , identifier[generators] = keyword[None] ): literal[string] identifier[self] . identifier[elt] = identifier[elt] identifier[self] . identifier[generators] = identifier[generators]
def postinit(self, elt=None, generators=None): """Do some setup after initialisation. :param elt: The element that forms the output of the expression. :type elt: NodeNG or None :param generators: The generators that are looped through. :type generators: list(Comprehension) or None """ self.elt = elt self.generators = generators
def get_region(): """Gets the AWS Region ID for this system :return: (str) AWS Region ID where this system lives """ log = logging.getLogger(mod_logger + '.get_region') # First get the availability zone availability_zone = get_availability_zone() if availability_zone is None: msg = 'Unable to determine the Availability Zone for this system, cannot determine the AWS Region' log.error(msg) return # Strip of the last character to get the region region = availability_zone[:-1] return region
def function[get_region, parameter[]]: constant[Gets the AWS Region ID for this system :return: (str) AWS Region ID where this system lives ] variable[log] assign[=] call[name[logging].getLogger, parameter[binary_operation[name[mod_logger] + constant[.get_region]]]] variable[availability_zone] assign[=] call[name[get_availability_zone], parameter[]] if compare[name[availability_zone] is constant[None]] begin[:] variable[msg] assign[=] constant[Unable to determine the Availability Zone for this system, cannot determine the AWS Region] call[name[log].error, parameter[name[msg]]] return[None] variable[region] assign[=] call[name[availability_zone]][<ast.Slice object at 0x7da1b10ecd00>] return[name[region]]
keyword[def] identifier[get_region] (): literal[string] identifier[log] = identifier[logging] . identifier[getLogger] ( identifier[mod_logger] + literal[string] ) identifier[availability_zone] = identifier[get_availability_zone] () keyword[if] identifier[availability_zone] keyword[is] keyword[None] : identifier[msg] = literal[string] identifier[log] . identifier[error] ( identifier[msg] ) keyword[return] identifier[region] = identifier[availability_zone] [:- literal[int] ] keyword[return] identifier[region]
def get_region(): """Gets the AWS Region ID for this system :return: (str) AWS Region ID where this system lives """ log = logging.getLogger(mod_logger + '.get_region') # First get the availability zone availability_zone = get_availability_zone() if availability_zone is None: msg = 'Unable to determine the Availability Zone for this system, cannot determine the AWS Region' log.error(msg) return # depends on [control=['if'], data=[]] # Strip of the last character to get the region region = availability_zone[:-1] return region
def add_interrupt_callback(self, gpio_id, callback, edge='both', pull_up_down=_GPIO.PUD_OFF, threaded_callback=False, debounce_timeout_ms=None): """ Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. """ gpio_id = _GPIO.channel_to_gpio(gpio_id) debug("Adding callback for GPIO %s" % gpio_id) if not edge in ["falling", "rising", "both", "none"]: raise AttributeError("'%s' is not a valid edge." % edge) if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]: raise AttributeError("'%s' is not a valid pull_up_down." % edge) # Make sure the gpio_id is valid if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, \ RPIO.GPIO_LIST_R3)): raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id) # Require INPUT pin setup; and set the correct PULL_UPDN if RPIO.gpio_function(int(gpio_id)) == RPIO.IN: RPIO.set_pullupdn(gpio_id, pull_up_down) else: debug("- changing gpio function from %s to INPUT" % \ (GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))])) RPIO.setup(gpio_id, RPIO.IN, pull_up_down) # Prepare the callback (wrap in Thread if needed) cb = callback if not threaded_callback else \ partial(_threaded_callback, callback) # Prepare the /sys/class path of this gpio path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id) # If initial callback for this GPIO then set everything up. Else make # sure the edge detection is the same. if gpio_id in self._map_gpioid_to_callbacks: with open(path_gpio + "edge", "r") as f: e = f.read().strip() if e != edge: raise AttributeError(("Cannot add callback for gpio %s:" " edge detection '%s' not compatible with existing" " edge detection '%s'.") % (gpio_id, edge, e)) # Check whether edge is the same, else throw Exception debug("- kernel interface already setup for GPIO %s" % gpio_id) self._map_gpioid_to_callbacks[gpio_id].append(cb) else: # If kernel interface already exists unexport first for clean setup if os.path.exists(path_gpio): if self._show_warnings: warn("Kernel interface for GPIO %s already exists." % \ gpio_id) debug("- unexporting kernel interface for GPIO %s" % gpio_id) with open(_SYS_GPIO_ROOT + "unexport", "w") as f: f.write("%s" % gpio_id) time.sleep(0.1) # Export kernel interface /sys/class/gpio/gpioN with open(_SYS_GPIO_ROOT + "export", "w") as f: f.write("%s" % gpio_id) self._gpio_kernel_interfaces_created.append(gpio_id) debug("- kernel interface exported for GPIO %s" % gpio_id) # Configure gpio as input with open(path_gpio + "direction", "w") as f: f.write("in") # Configure gpio edge detection with open(path_gpio + "edge", "w") as f: f.write(edge) debug(("- kernel interface configured for GPIO %s " "(edge='%s', pullupdn=%s)") % (gpio_id, edge, \ _PULL_UPDN[pull_up_down])) # Open the gpio value stream and read the initial value f = open(path_gpio + "value", 'r') val_initial = f.read().strip() debug("- inital gpio value: %s" % val_initial) f.seek(0) # Add callback info to the mapping dictionaries self._map_fileno_to_file[f.fileno()] = f self._map_fileno_to_gpioid[f.fileno()] = gpio_id self._map_fileno_to_options[f.fileno()] = { "debounce_timeout_s": debounce_timeout_ms / 1000.0 if \ debounce_timeout_ms else 0, "interrupt_last": 0, "edge": edge } self._map_gpioid_to_fileno[gpio_id] = f.fileno() self._map_gpioid_to_callbacks[gpio_id] = [cb] # Add to epoll self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR)
def function[add_interrupt_callback, parameter[self, gpio_id, callback, edge, pull_up_down, threaded_callback, debounce_timeout_ms]]: constant[ Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. ] variable[gpio_id] assign[=] call[name[_GPIO].channel_to_gpio, parameter[name[gpio_id]]] call[name[debug], parameter[binary_operation[constant[Adding callback for GPIO %s] <ast.Mod object at 0x7da2590d6920> name[gpio_id]]]] if <ast.UnaryOp object at 0x7da1b2347730> begin[:] <ast.Raise object at 0x7da1b2346e30> if <ast.UnaryOp object at 0x7da1b2346800> begin[:] <ast.Raise object at 0x7da1b2347bb0> if <ast.UnaryOp object at 0x7da1b2344220> begin[:] <ast.Raise object at 0x7da1b2344460> if compare[call[name[RPIO].gpio_function, parameter[call[name[int], parameter[name[gpio_id]]]]] equal[==] name[RPIO].IN] begin[:] call[name[RPIO].set_pullupdn, parameter[name[gpio_id], name[pull_up_down]]] variable[cb] assign[=] <ast.IfExp object at 0x7da18bcc9930> variable[path_gpio] assign[=] binary_operation[constant[%sgpio%s/] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bcca080>, <ast.Name object at 0x7da18bcc9d50>]]] if compare[name[gpio_id] in name[self]._map_gpioid_to_callbacks] begin[:] with call[name[open], parameter[binary_operation[name[path_gpio] + constant[edge]], constant[r]]] begin[:] variable[e] assign[=] call[call[name[f].read, parameter[]].strip, parameter[]] if compare[name[e] not_equal[!=] name[edge]] begin[:] <ast.Raise object at 0x7da18bccafe0> call[name[debug], parameter[binary_operation[constant[- kernel interface already setup for GPIO %s] <ast.Mod object at 0x7da2590d6920> name[gpio_id]]]] call[call[name[self]._map_gpioid_to_callbacks][name[gpio_id]].append, parameter[name[cb]]]
keyword[def] identifier[add_interrupt_callback] ( identifier[self] , identifier[gpio_id] , identifier[callback] , identifier[edge] = literal[string] , identifier[pull_up_down] = identifier[_GPIO] . identifier[PUD_OFF] , identifier[threaded_callback] = keyword[False] , identifier[debounce_timeout_ms] = keyword[None] ): literal[string] identifier[gpio_id] = identifier[_GPIO] . identifier[channel_to_gpio] ( identifier[gpio_id] ) identifier[debug] ( literal[string] % identifier[gpio_id] ) keyword[if] keyword[not] identifier[edge] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[raise] identifier[AttributeError] ( literal[string] % identifier[edge] ) keyword[if] keyword[not] identifier[pull_up_down] keyword[in] [ identifier[_GPIO] . identifier[PUD_UP] , identifier[_GPIO] . identifier[PUD_DOWN] , identifier[_GPIO] . identifier[PUD_OFF] ]: keyword[raise] identifier[AttributeError] ( literal[string] % identifier[edge] ) keyword[if] keyword[not] identifier[gpio_id] keyword[in] identifier[set] ( identifier[chain] ( identifier[RPIO] . identifier[GPIO_LIST_R1] , identifier[RPIO] . identifier[GPIO_LIST_R2] , identifier[RPIO] . identifier[GPIO_LIST_R3] )): keyword[raise] identifier[AttributeError] ( literal[string] % identifier[gpio_id] ) keyword[if] identifier[RPIO] . identifier[gpio_function] ( identifier[int] ( identifier[gpio_id] ))== identifier[RPIO] . identifier[IN] : identifier[RPIO] . identifier[set_pullupdn] ( identifier[gpio_id] , identifier[pull_up_down] ) keyword[else] : identifier[debug] ( literal[string] %( identifier[GPIO_FUNCTIONS] [ identifier[RPIO] . identifier[gpio_function] ( identifier[int] ( identifier[gpio_id] ))])) identifier[RPIO] . identifier[setup] ( identifier[gpio_id] , identifier[RPIO] . identifier[IN] , identifier[pull_up_down] ) identifier[cb] = identifier[callback] keyword[if] keyword[not] identifier[threaded_callback] keyword[else] identifier[partial] ( identifier[_threaded_callback] , identifier[callback] ) identifier[path_gpio] = literal[string] %( identifier[_SYS_GPIO_ROOT] , identifier[gpio_id] ) keyword[if] identifier[gpio_id] keyword[in] identifier[self] . identifier[_map_gpioid_to_callbacks] : keyword[with] identifier[open] ( identifier[path_gpio] + literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[e] = identifier[f] . identifier[read] (). identifier[strip] () keyword[if] identifier[e] != identifier[edge] : keyword[raise] identifier[AttributeError] (( literal[string] literal[string] literal[string] )%( identifier[gpio_id] , identifier[edge] , identifier[e] )) identifier[debug] ( literal[string] % identifier[gpio_id] ) identifier[self] . identifier[_map_gpioid_to_callbacks] [ identifier[gpio_id] ]. identifier[append] ( identifier[cb] ) keyword[else] : keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path_gpio] ): keyword[if] identifier[self] . identifier[_show_warnings] : identifier[warn] ( literal[string] % identifier[gpio_id] ) identifier[debug] ( literal[string] % identifier[gpio_id] ) keyword[with] identifier[open] ( identifier[_SYS_GPIO_ROOT] + literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( literal[string] % identifier[gpio_id] ) identifier[time] . identifier[sleep] ( literal[int] ) keyword[with] identifier[open] ( identifier[_SYS_GPIO_ROOT] + literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( literal[string] % identifier[gpio_id] ) identifier[self] . identifier[_gpio_kernel_interfaces_created] . identifier[append] ( identifier[gpio_id] ) identifier[debug] ( literal[string] % identifier[gpio_id] ) keyword[with] identifier[open] ( identifier[path_gpio] + literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( literal[string] ) keyword[with] identifier[open] ( identifier[path_gpio] + literal[string] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[edge] ) identifier[debug] (( literal[string] literal[string] )%( identifier[gpio_id] , identifier[edge] , identifier[_PULL_UPDN] [ identifier[pull_up_down] ])) identifier[f] = identifier[open] ( identifier[path_gpio] + literal[string] , literal[string] ) identifier[val_initial] = identifier[f] . identifier[read] (). identifier[strip] () identifier[debug] ( literal[string] % identifier[val_initial] ) identifier[f] . identifier[seek] ( literal[int] ) identifier[self] . identifier[_map_fileno_to_file] [ identifier[f] . identifier[fileno] ()]= identifier[f] identifier[self] . identifier[_map_fileno_to_gpioid] [ identifier[f] . identifier[fileno] ()]= identifier[gpio_id] identifier[self] . identifier[_map_fileno_to_options] [ identifier[f] . identifier[fileno] ()]={ literal[string] : identifier[debounce_timeout_ms] / literal[int] keyword[if] identifier[debounce_timeout_ms] keyword[else] literal[int] , literal[string] : literal[int] , literal[string] : identifier[edge] } identifier[self] . identifier[_map_gpioid_to_fileno] [ identifier[gpio_id] ]= identifier[f] . identifier[fileno] () identifier[self] . identifier[_map_gpioid_to_callbacks] [ identifier[gpio_id] ]=[ identifier[cb] ] identifier[self] . identifier[_epoll] . identifier[register] ( identifier[f] . identifier[fileno] (), identifier[select] . identifier[EPOLLPRI] | identifier[select] . identifier[EPOLLERR] )
def add_interrupt_callback(self, gpio_id, callback, edge='both', pull_up_down=_GPIO.PUD_OFF, threaded_callback=False, debounce_timeout_ms=None): """ Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. """ gpio_id = _GPIO.channel_to_gpio(gpio_id) debug('Adding callback for GPIO %s' % gpio_id) if not edge in ['falling', 'rising', 'both', 'none']: raise AttributeError("'%s' is not a valid edge." % edge) # depends on [control=['if'], data=[]] if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]: raise AttributeError("'%s' is not a valid pull_up_down." % edge) # depends on [control=['if'], data=[]] # Make sure the gpio_id is valid if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, RPIO.GPIO_LIST_R3)): raise AttributeError('GPIO %s is not a valid gpio-id.' % gpio_id) # depends on [control=['if'], data=[]] # Require INPUT pin setup; and set the correct PULL_UPDN if RPIO.gpio_function(int(gpio_id)) == RPIO.IN: RPIO.set_pullupdn(gpio_id, pull_up_down) # depends on [control=['if'], data=[]] else: debug('- changing gpio function from %s to INPUT' % GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]) RPIO.setup(gpio_id, RPIO.IN, pull_up_down) # Prepare the callback (wrap in Thread if needed) cb = callback if not threaded_callback else partial(_threaded_callback, callback) # Prepare the /sys/class path of this gpio path_gpio = '%sgpio%s/' % (_SYS_GPIO_ROOT, gpio_id) # If initial callback for this GPIO then set everything up. Else make # sure the edge detection is the same. if gpio_id in self._map_gpioid_to_callbacks: with open(path_gpio + 'edge', 'r') as f: e = f.read().strip() if e != edge: raise AttributeError("Cannot add callback for gpio %s: edge detection '%s' not compatible with existing edge detection '%s'." % (gpio_id, edge, e)) # depends on [control=['if'], data=['e', 'edge']] # depends on [control=['with'], data=['f']] # Check whether edge is the same, else throw Exception debug('- kernel interface already setup for GPIO %s' % gpio_id) self._map_gpioid_to_callbacks[gpio_id].append(cb) # depends on [control=['if'], data=['gpio_id']] else: # If kernel interface already exists unexport first for clean setup if os.path.exists(path_gpio): if self._show_warnings: warn('Kernel interface for GPIO %s already exists.' % gpio_id) # depends on [control=['if'], data=[]] debug('- unexporting kernel interface for GPIO %s' % gpio_id) with open(_SYS_GPIO_ROOT + 'unexport', 'w') as f: f.write('%s' % gpio_id) # depends on [control=['with'], data=['f']] time.sleep(0.1) # depends on [control=['if'], data=[]] # Export kernel interface /sys/class/gpio/gpioN with open(_SYS_GPIO_ROOT + 'export', 'w') as f: f.write('%s' % gpio_id) # depends on [control=['with'], data=['f']] self._gpio_kernel_interfaces_created.append(gpio_id) debug('- kernel interface exported for GPIO %s' % gpio_id) # Configure gpio as input with open(path_gpio + 'direction', 'w') as f: f.write('in') # depends on [control=['with'], data=['f']] # Configure gpio edge detection with open(path_gpio + 'edge', 'w') as f: f.write(edge) # depends on [control=['with'], data=['f']] debug("- kernel interface configured for GPIO %s (edge='%s', pullupdn=%s)" % (gpio_id, edge, _PULL_UPDN[pull_up_down])) # Open the gpio value stream and read the initial value f = open(path_gpio + 'value', 'r') val_initial = f.read().strip() debug('- inital gpio value: %s' % val_initial) f.seek(0) # Add callback info to the mapping dictionaries self._map_fileno_to_file[f.fileno()] = f self._map_fileno_to_gpioid[f.fileno()] = gpio_id self._map_fileno_to_options[f.fileno()] = {'debounce_timeout_s': debounce_timeout_ms / 1000.0 if debounce_timeout_ms else 0, 'interrupt_last': 0, 'edge': edge} self._map_gpioid_to_fileno[gpio_id] = f.fileno() self._map_gpioid_to_callbacks[gpio_id] = [cb] # Add to epoll self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR)