content
stringlengths
22
815k
id
int64
0
4.91M
def data_coded_table(request, project_pk): """This returns the labeled data. Args: request: The POST request project_pk: Primary key of the project Returns: data: a list of data information """ project = Project.objects.get(pk=project_pk) data_objs = DataLabel.objects.filter(data__project=project, data__irr_ind=False) data = [] for d in data_objs: temp = { "Text": escape(d.data.text), "Label": d.label.name, "Coder": d.profile.__str__(), } data.append(temp) return Response({"data": data})
4,200
def when(name, converters=None): """When step decorator. :param name: Step name. :param converters: Optional `dict` of the argument or parameter converters in form {<param_name>: <converter function>}. :param parser: name of the step parser to use :param parser_args: optional `dict` of arguments to pass to step parser :raises: StepError in case of wrong configuration. """ return _step_decorator(WHEN, name, converters=converters)
4,201
def plot_roc_curve(y_true, predictions, title='Dementia'): """ Plots the ROC curve of many models together Parameters ---------- y_true : True labels predictions : Dictionary with one (key, value) par for each model's predictions. Returns ------- """ plt.figure(figsize=(8, 6)) for key, value in predictions.items(): fpr, tpr, _ = roc_curve(y_true, value) auc = roc_auc_score(y_true, value) plt.plot(fpr, tpr, label=f'{key} AUC: {auc:.3f}') plt.plot([0, 1], [0, 1], color='orange', linestyle='--') plt.xticks(np.arange(0.0, 1.1, step=0.1)) plt.xlabel('False positive rate', fontsize=15) plt.yticks(np.arange(0.0, 1.1, step=0.1)) plt.ylabel('True positive rate', fontsize=15) plt.title(f'ROC Curve {title}', fontweight='bold', fontsize=15) plt.legend(prop={'size': 13}, loc='lower right') plt.show()
4,202
def displacement(current: np.ndarray, previous: np.ndarray) -> np.array: """Computes the displacement vector between the centroids of two storms. :param current: the intensity-weighted centroid of the storm in the current time slice, given as a tuple. :param previous: the intensity-weighted centroid of the storm in the previous time slice, given as a tuple. :return: the displacement vector, as an array. """ return np.array([current[0] - previous[0], current[1] - previous[1]])
4,203
def update_datapackage(datapackage, mappings): """Update the field names and delete the `maps_to` properties.""" for i, resource in enumerate(datapackage['resources']): fields = [] for field in resource['schema']['fields']: fiscal_key = mappings[i][field['name']] if fiscal_key not in ('_unknown', '_ignored'): field.update({'name': fiscal_key}) del field['maps_to'] if 'translates_to' in field: del field['translates_to'] fields.append(field) resource['schema']['fields'] = fields return datapackage
4,204
def get_molpro_mol(logfile): """ Returns xyz file from molpro logfile. """ import pybel return pybel.readfile('mpo',logfile).next()
4,205
def tbody(content, accesskey:str ="", class_: str ="", contenteditable: str ="", data_key: str="", data_value: str="", dir_: str="", draggable: str="", hidden: str="", id_: str="", lang: str="", spellcheck: str="", style: str="", tabindex: str="", title: str="", translate: str=""): """ Returns a table body.\n `content`: Contents of the table body.\n """ g_args = global_args(accesskey, class_, contenteditable, data_key, data_value, dir_, draggable, hidden, id_, lang, spellcheck, style, tabindex, title, translate) return f"<tbody {g_args}>{content}</tbody>\n"
4,206
def create_controller(): """ 1. Check the token 2. Call the worker method 3. Show results """ minimum_buffer_min = 3 token_ok = views.ds_token_ok(minimum_buffer_min) if token_ok and 'envelope_id' in session: # 2. Call the worker method args = { 'account_id': session['ds_account_id'], 'envelope_id': session['envelope_id'], 'base_path': session['ds_base_path'], 'ds_access_token': session['ds_access_token'], } try: results = worker(args) except ApiException as err: error_body_json = err and hasattr(err, 'body') and err.body # we can pull the DocuSign error code and message from the response body error_body = json.loads(error_body_json) error_code = error_body and 'errorCode' in error_body and error_body['errorCode'] error_message = error_body and 'message' in error_body and error_body['message'] # In production, may want to provide customized error messages and # remediation advice to the user. return render_template('error.html', err=err, error_code=error_code, error_message=error_message ) return render_template("example_done.html", title="Get envelope status results", h1="Get envelope status results", message="Results from the Envelopes::get method:", json=json.dumps(json.dumps(results.to_dict())) ) elif not token_ok: flash('Sorry, you need to re-authenticate.') # We could store the parameters of the requested operation # so it could be restarted automatically. # But since it should be rare to have a token issue here, # we'll make the user re-enter the form data after # authentication. session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate')) elif not 'envelope_id' in session: return render_template("eg004_envelope_info.html", title="Envelope information", envelope_ok=False, source_file=path.basename(__file__), source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__), documentation=ds_config.DS_CONFIG['documentation'] + eg, show_doc=ds_config.DS_CONFIG['documentation'], )
4,207
def pack(number, word_size = None, endianness = None, sign = None, **kwargs): """pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str Packs arbitrary-sized integer. Word-size, endianness and signedness is done according to context. `word_size` can be any positive number or the string "all". Choosing the string "all" will output a string long enough to contain all the significant bits and thus be decodable by :func:`unpack`. `word_size` can be any positive number. The output will contain word_size/8 rounded up number of bytes. If word_size is not a multiple of 8, it will be padded with zeroes up to a byte boundary. Arguments: number (int): Number to convert word_size (int): Word size of the converted integer or the string 'all' (in bits). endianness (str): Endianness of the converted integer ("little"/"big") sign (str): Signedness of the converted integer (False/True) kwargs: Anything that can be passed to context.local Returns: The packed number as a string. Examples: >>> pack(0x414243, 24, 'big', True) b'ABC' >>> pack(0x414243, 24, 'little', True) b'CBA' >>> pack(0x814243, 24, 'big', False) b'\\x81BC' >>> pack(0x814243, 24, 'big', True) Traceback (most recent call last): ... ValueError: pack(): number does not fit within word_size >>> pack(0x814243, 25, 'big', True) b'\\x00\\x81BC' >>> pack(-1, 'all', 'little', True) b'\\xff' >>> pack(-256, 'all', 'big', True) b'\\xff\\x00' >>> pack(0x0102030405, 'all', 'little', True) b'\\x05\\x04\\x03\\x02\\x01' >>> pack(-1) b'\\xff\\xff\\xff\\xff' >>> pack(0x80000000, 'all', 'big', True) b'\\x00\\x80\\x00\\x00\\x00' """ if sign is None and number < 0: sign = True if word_size != 'all': kwargs.setdefault('word_size', word_size) kwargs.setdefault('endianness', endianness) kwargs.setdefault('sign', sign) with context.local(**kwargs): # Lookup in context if not found word_size = 'all' if word_size == 'all' else context.word_size endianness = context.endianness sign = context.sign if not isinstance(number, six.integer_types): raise ValueError("pack(): number must be of type (int,long) (got %r)" % type(number)) if sign not in [True, False]: raise ValueError("pack(): sign must be either True or False (got %r)" % sign) if endianness not in ['little', 'big']: raise ValueError("pack(): endianness must be either 'little' or 'big' (got %r)" % endianness) # Verify that word_size make sense if word_size == 'all': if number == 0: word_size = 8 elif number > 0: if sign == False: word_size = ((number.bit_length() - 1) | 7) + 1 else: word_size = (number.bit_length() | 7) + 1 else: if sign == False: raise ValueError("pack(): number does not fit within word_size") word_size = ((number + 1).bit_length() | 7) + 1 elif not isinstance(word_size, six.integer_types) or word_size <= 0: raise ValueError("pack(): word_size must be a positive integer or the string 'all'") if sign == True: limit = 1 << (word_size-1) if not -limit <= number < limit: raise ValueError("pack(): number does not fit within word_size") else: limit = 1 << word_size if not 0 <= number < limit: raise ValueError("pack(): number does not fit within word_size [%i, %r, %r]" % (0, number, limit)) # Normalize number and size now that we have verified them # From now on we can treat positive and negative numbers the same number = number & ((1 << word_size) - 1) byte_size = (word_size + 7) // 8 out = [] for _ in range(byte_size): out.append(_p8lu(number & 0xff)) number = number >> 8 if endianness == 'little': return b''.join(out) else: return b''.join(reversed(out))
4,208
def getSourceUrls(db): """获取未被爬取的文献来源链接""" sql = """ SELECT DISTINCT re_article_source.url_source FROM re_article_source LEFT JOIN source ON re_article_source.url_source = source.url WHERE source.url IS NULL """ # sql = 'SELECT DISTINCT re_article_source.url_source FROM re_article_source LEFT JOIN source ON re_article_source.url_article=source.url WHERE source.url is NULL' curr = db.cursor() curr.execute(sql) urls = [] for data in curr.fetchall(): url = data[0] urls.append(url) return urls
4,209
def ML_bump(x,v=None,logger=None): """ ML fit of the bump function Parameters ---------- x : (n,d) ndarray coML estimatearaites v : (n,) ndarray weight for each sample Returns ------- mu : (n,d) ndarray bump mean parameter (for each dimension) sigma : (n,d) ndarray bump std parameter (for each dimension) """ def ML_bump_1d(x,v,logger=None): def fit_f(param,x,v): mu,sigma = param inv_sigma = 1/sigma Z = sp.stats.norm.cdf(1,loc=mu,scale=sigma)-sp.stats.norm.cdf(0,loc=mu,scale=sigma) inv_Z = 1/Z phi_alpha = 1/np.sqrt(2*np.pi)*np.exp(-mu**2/2/sigma**2) phi_beta = 1/np.sqrt(2*np.pi)*np.exp(-(1-mu)**2/2/sigma**2) # Average likelihood if v is None: t1 = np.mean(x-mu) t2 = np.mean((x-mu)**2) else: t1 = np.sum((x-mu)*v) / np.sum(v) t2 = np.sum((x-mu)**2*v) / np.sum(v) l = -np.log(Z) - np.log(sigma) - t2/2/sigma**2 # Gradient d_c_mu = inv_sigma * (phi_alpha-phi_beta) d_c_sig = inv_sigma * (-mu*inv_sigma*phi_alpha - (1-mu)*inv_sigma*phi_beta) d_l_mu = -d_c_mu*inv_Z + t1*inv_sigma**2 d_l_sig = -d_c_sig*inv_Z - inv_sigma + t2*inv_sigma**3 grad = np.array([d_l_mu,d_l_sig],dtype=float) return l,grad ## gradient check #_,grad_ = fit_f([0.2,0.1],x,v) #num_dmu = (fit_f([0.2+1e-8,0.1],x,v)[0]-fit_f([0.2,0.1],x,v)[0]) / 1e-8 #num_dsigma = (fit_f([0.2,0.1+1e-8],x,v)[0]-fit_f([0.2,0.1],x,v)[0]) / 1e-8 #print('## Gradient check ##') #print('# param value: mu=%0.6f, sigma=%0.6f'%(0.2,0.1)) #print('# Theoretical grad: dmu=%0.8f, dsigma=%0.8f'%(grad_[0],grad_[1])) #print('# Numerical grad: dmu=%0.8f, dsigma=%0.8f\n'%(num_dmu,num_dsigma)) # If the variance is small and the mean is at center, # directly output the empirical mean and variance. if v is None: mu = np.mean(x) sigma = np.std(x) else: mu = np.sum(x*v)/np.sum(v) sigma = np.sqrt(np.sum((x-mu)**2*v)/np.sum(v)) if sigma<0.075 and np.min([1-mu,mu])>0.15: return mu,sigma param = np.array([mu,sigma]) lr = 0.01 max_step = 0.025 max_itr = 100 i_itr = 0 l_old = -10 while i_itr<max_itr: l,grad = fit_f(param,x,v) if np.absolute(l-l_old)<0.001: break else: l_old=l update = (grad*lr).clip(min=-max_step,max=max_step) param += update i_itr +=1 if np.isnan(param).any() or np.min([param[0],1-param[0],param[1]])<0: return np.mean(x),np.std(x) mu,sigma = param if sigma>0.25: sigma=1 return mu,sigma mu = np.zeros(x.shape[1],dtype=float) sigma = np.zeros(x.shape[1],dtype=float) for i in range(x.shape[1]): mu[i],sigma[i] = ML_bump_1d(x[:,i],v,logger=logger) return mu,sigma
4,210
def read_message_handler(stream): """ Send message to user if the opponent has read the message """ while True: packet = yield from stream.get() session_id = packet.get('session_key') user_opponent = packet.get('username') message_id = packet.get('message_id') if session_id and user_opponent and message_id is not None: user_owner = get_user_from_session(session_id) if user_owner: message = models.Message.objects.filter(id=message_id).first() if message: message.read = True message.save() logger.debug('Message ' + str(message_id) + ' is now read') opponent_socket = ws_connections.get( (user_opponent, user_owner.username)) if opponent_socket: yield from target_message(opponent_socket, {'type': 'opponent-read-message', 'username': user_opponent, 'message_id': message_id}) else: pass # message not found else: pass # invalid session id else: pass
4,211
def parse_arguments(args_to_parse): """ Parse the command line arguments. """ description = "Find targets which contain a None reference" parser = argparse.ArgumentParser(description=description) parser.add_argument( '-d', '--directory-to-search', type=str, required=True, help='Directory to search for anomylous target files' ) parser.add_argument( '-o', '--output-file', type=str, required=True, help='File to save the names of all occurences' ) args = parser.parse_args(args_to_parse) return args
4,212
def user_with_some_privileges(self, table_type, node=None): """Check that user with any permutation of ALTER INDEX subprivileges is able to alter the table for privileges granted, and not for privileges not granted. """ if node is None: node = self.context.node table_name = f"merge_tree_{getuid()}" user_name = f"user_{getuid()}" for permutation in permutations(table_type): privileges = alter_index_privileges(permutation) with When(f"granted={privileges}"): with table(node, table_name, table_type), user(node, user_name): with Given("I first grant the privileges needed"): node.query(f"GRANT {privileges} ON {table_name} TO {user_name}") with Then(f"I try to ALTER INDEX with given privileges"): alter_index_privilege_handler(permutation, table_name, user_name, node)
4,213
def function_f1a(x): """Function with one argument, returning one value. :type x: types.IntType :rtype: types.StringType """ return '{}'.format(x)
4,214
def is_port_in_use(hostname: str, port: Union[int, str]) -> bool: """ Check if TCP/IP `port` on `hostname` is in use """ with socket() as sock: try: sock.bind((hostname, int(port))) return False except OSError as err: if "Address already in use" in repr(err): return True raise err
4,215
def _pos_from_before_after( before: int, after: int, length: int, base0: bool ) -> int: """Get the position to insert from before and after""" if before is not None and after is not None: raise ValueError("Can't specify both `_before` and `_after`.") if before is None and after is None: return length if after is not None: return position_after(after, length, base0) return position_at(before, length, base0)
4,216
def prep_incorporation_correction_filing(session, business, original_filing_id, payment_id, option, name_change_with_new_nr): """Return a new incorporation correction filing prepped for email notification.""" filing_template = copy.deepcopy(CORRECTION_INCORPORATION) filing_template['filing']['business'] = {'identifier': business.identifier} for party in filing_template['filing']['incorporationApplication']['parties']: for role in party['roles']: if role['roleType'] == 'Completing Party': party['officer']['email'] = '[email protected]' filing_template['filing']['incorporationApplication']['contactPoint'] = {} filing_template['filing']['incorporationApplication']['contactPoint']['email'] = '[email protected]' filing_template['filing']['correction']['correctedFilingId'] = original_filing_id if not name_change_with_new_nr: del filing_template['filing']['incorporationApplication']['nameRequest']['legalName'] else: filing_template['filing']['incorporationApplication']['nameRequest']['nrNumber'] = 'NR 1234567' filing = create_filing(token=payment_id, filing_json=filing_template, business_id=business.id) filing.payment_completion_date = filing.filing_date filing.save() if option in ['COMPLETED', 'bn']: uow = versioning_manager.unit_of_work(session) transaction = uow.create_transaction(session) filing.transaction_id = transaction.id filing.save() return filing
4,217
def get_logger(): """ Return the custom showyourwork logger. Sets up the logging if needed. """ logger = logging.getLogger("showyourwork") # Add showyourwork stream & file handlers if not logger.handlers: # Root level logger.setLevel(logging.DEBUG) # Terminal: all messages stream_handler = ColorizingStreamHandler() stream_handler.setLevel(logging.INFO) logger.addHandler(stream_handler) try: LOGS = paths.user().logs except: # Can't resolve path to logs; assume we're not # in a showyourwork/git repo and fail silently. pass else: # File: all showyourwork messages msg_file = LOGS / "showyourwork.log" file_handler = logging.FileHandler(msg_file) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) return logger
4,218
def web_videos_random_archived(channel): """Play random archived video. Chooses random archived video from selected channel and redirects to its detail page view. Args: channel (str): YouTube channel ID. Returns: flask.Response: Selected video detail view. """ try: choice = random.choice([ video['snippet']['resourceId']['videoId'] for video in yt_get_channel_videos(channel) if video['archived'] is not None ]) except IndexError: return flask.redirect(flask.url_for('videos', channel = channel )) return flask.redirect(flask.url_for('videos', channel = channel, video = choice) )
4,219
def p_function_stmt(p): """function_stmt : FUNCTION subroutine_name | prefix_spec_list FUNCTION subroutine_name """ if len(p) > 3: p[0] = (p[3], p[0]) else: p[0] = (p[2], None)
4,220
def fundamental_mode_mfd_marcuse(wl, r, na): """Calculates the mode field diameter of the fundamental mode with vacuum wavelength wl using Marcuse's equation. :param wl: Wavelength of the mode :type wl: float :param r: Core radius :type r: float :param na: Core numerical aperture :type na: float :returns: Mode field diameter of the fundamental mode :rtype: float """ v = fiber_v_parameter(wl, r, na) return 2 * r * (0.65 + 1.619*v**(-3/2) + 2.879*v**(-6))
4,221
def create_pdf(source_image_file, ta_pages, config, output_filename ): """透明なテキストと画像入りのPDFを作成するメソッド""" print("processing pdf: {0}".format(output_filename)) is_normalized = False # PDFまたは画像をページ分割 if re.search(r'\.pdf$', source_image_file ) : images = convert_pdf_to_img(source_image_file, dpi=config.image_resolution) is_normalized = True elif re.search(r'\.tiff$', source_image_file) : images = divide_tiff_image(source_image_file) else: print("Non-support file type. Existed!", file=sys.stderr) sys.exit(-1) newPdfPage = canvas.Canvas(output_filename) newPdfPage.setPageSize(A4) newPdfPage.saveState() # 念の為 newPdfPage.setAuthor(config.author) newPdfPage.setTitle(config.title) newPdfPage.setSubject(config.subject) # 日本語用のフォントの登録(language packに含まれるもの) pdfmetrics.registerFont(UnicodeCIDFont('HeiseiMin-W3')) pdfmetrics.registerFont(UnicodeCIDFont('HeiseiKakuGo-W5')) # tiff file, PDF for i, image in enumerate(images): print(f"start page: {i}") print("image size: {}".format(image.size)) image_width , image_height = image.size ratio = image_width / image_height landscape_mode = False page_size = {} if ratio > 1.0 : landscape_mode = True newPdfPage.setPageSize(landscape(A4)) page_size['width'], page_size['height'] = landscape(A4) else: newPdfPage.setPageSize(A4) page_size['width'], page_size['height'] = A4 offset_y = 2.0 offset_x = -1.0 image_offset_x = 0 image_offset_y = 0 print("page size: {0}, {1}".format(page_size['width'], page_size['height'])) with tempfile.NamedTemporaryFile(mode='w+b',suffix='.jpg') as fp: image.save(fp.name,format='jpeg', quality=config.jpeg_quality) if config.image_embeded : newPdfPage.drawImage(fp.name, 0+image_offset_x, 0+image_offset_y, width=page_size['width'], height=page_size['height'], preserveAspectRatio=True, anchor='s') newPdfPage.setFont(config.default_fontname, 10) # 文字色と透明度の設定 newPdfPage.setFillColor(red, alpha=0.0) page = ta_pages[i] scale = 1.0 if landscape_mode : scale = min(page_size['height'] / image_height, page_size['width'] / image_width) else: scale = min(page_size['height'] / image_height, page_size['width'] / image_width) if is_normalized : scale = 1.0 for block in page.blocks : for p in block.paragraphs : for word in p.words : text = ''.join([t.text for t in word.symbols]) anchor_y = int(page_size['height'] * ( 1.0 - float(word.bounding_box.normalized_vertices[3].y) )) + offset_y anchor_x = int(page_size['width'] * float(word.bounding_box.normalized_vertices[3].x)) + offset_x text_height = int( page_size['height'] * (word.bounding_box.normalized_vertices[3].y - word.bounding_box.normalized_vertices[0].y)) font_size = text_height newPdfPage.setFont(config.default_fontname, font_size) newPdfPage.drawString(anchor_x, anchor_y, text) newPdfPage.showPage() else: for block in page.blocks : for p in block.paragraphs : for w in p.words : for node in w.symbols : #print(node) anchor_y = image_height - int(node.bounding_box.vertices[3].y) anchor_x = int(node.bounding_box.vertices[3].x) text_height = int(node.bounding_box.vertices[3].y) - int(node.bounding_box.vertices[0].y) font_size = config.font_adjustment * math.floor( text_height / (config.image_resolution / 72 ) ) newPdfPage.setFont(config.default_fontname, font_size) newPdfPage.drawString(scale * anchor_x, scale * anchor_y, node.text) newPdfPage.showPage() newPdfPage.save()
4,222
def generate_ansible_coverage_config(): # type: () -> str """Generate code coverage configuration for Ansible tests.""" coverage_config = ''' [run] branch = True concurrency = multiprocessing parallel = True omit = */python*/dist-packages/* */python*/site-packages/* */python*/distutils/* */pyshared/* */pytest */AnsiballZ_*.py */test/results/* ''' return coverage_config
4,223
def gauss_smooth_shift(input, shift, stddev, scale=1.0): """ smooths the input with gaussian smooothing with standarddeviation and shifts its delay positions :param input: The input array :param shift: the amount of indices to shift the result :param the stddev for the gaussian smoothing (in index count) :param scale: scale the input array first with scale :return: the smoothed and shifted array """ forcescale = False if isinstance(scale, np.ndarray): forcescale = True if (forcescale or np.abs(scale-1) > 1e-5): input = input*scale result = input if (stddev > 0.0): result = gaussian_filter1d(input, stddev, mode='nearest') result = np.roll(result, int(shift)) if (shift > 0): result[: int(shift)] = 0 #else: # backward roll can simply use the trailing values return result
4,224
def mel_to_hz(mel): """From Young et al. "The HTK book", Chapter 5.4.""" return 700.0 * (10.0**(mel / 2595.0) - 1.0)
4,225
def create_app(path=None, user_content=False, context=None, username=None, password=None, render_offline=False, render_wide=False, render_inline=False, api_url=None, title=None, text=None, autorefresh=None, quiet=None, grip_class=None): """ Creates an Grip application with the specified overrides. """ # Customize the app if grip_class is None: grip_class = Grip # Customize the reader if text is not None: display_filename = DirectoryReader(path, True).filename_for(None) source = TextReader(text, display_filename) elif path == '-': source = StdinReader() else: source = DirectoryReader(path) # Customize the renderer if render_offline: renderer = OfflineRenderer(user_content, context) elif user_content or context or api_url: renderer = GitHubRenderer(user_content, context, api_url) else: renderer = None # Optional basic auth auth = (username, password) if username or password else None # Create the customized app with default asset manager return grip_class(source, auth, renderer, None, render_wide, render_inline, title, autorefresh, quiet)
4,226
def get_simulator_version(): """ Get the installed version of XPP Returns: :obj:`str`: version """ result = subprocess.run(["xppaut", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) if result.returncode != 0: raise RuntimeError('XPP failed: {}'.format(result.stdout.decode("utf-8"))) return re.search(r"(\d+\.\d*|\d*\.\d+)", result.stdout.decode("utf-8")).group(0)
4,227
def enumerate_joint(variables, e, P): """Return the sum of those entries in P consistent with e, provided variables is P's remaining variables (the ones not in e).""" if not variables: return P[e] Y, rest = variables[0], variables[1:] return sum([enumerate_joint(rest, extend(e, Y, y), P) for y in P.values(Y)])
4,228
def fetch_gene_id(gene_id, ENSEMBL_REST_SERVER = GRCH37_ENSEMBL_REST_SERVER): """ Get gene details from name * string Returntype: Gene """ server = ENSEMBL_REST_SERVER ext = "/lookup/id/%s?content-type=application/json" % (gene_id) try: hash = postgap.REST.get(server, ext) return Gene( name = hash['display_name'], id = hash['id'], chrom = hash['seq_region_name'], tss = int(hash['start']) if hash['strand'] > 0 else int(hash['end']), biotype = hash['biotype'] ) except: return None
4,229
def allCategoriesJSON(): """ Generates JSON for all categories """ categories = db_session.query(Category).all() return jsonify(categories=[c.serialize for c in categories])
4,230
def get_rrule(rule, since, until): """ Compute an RRULE for the execution scheduler. :param rule: A dictionary representing a scheduling rule. Rules are of the following possible formats (e.g.): {'recurrence': '2 weeks', 'count': 5, 'weekdays': ['SU', 'MO', 'TH']} = run every 2 weeks, 5 times totally, only on sun. mon. or thu. {'count': 1'} = run exactly once, at the `since` time {'rrule': 'RRULE:FREQ=DAILY;INTERVAL=3'} = pass RRULE directly :param since: A datetime string representing the earliest time to schedule :param until: A datetime string representing the latest time to schedule :return: an iCalendar RRULE object """ since = _get_timestamp(since) until = _get_timestamp(until) if rule.get('rrule'): parsed_rule = rrule.rrulestr(rule['rrule'], dtstart=since, cache=True) if not parsed_rule._until: parsed_rule._until = until return parsed_rule if not rule.get('recurrence'): if rule.get('count') == 1: frequency = rrule.DAILY interval = 0 else: return else: interval, recurrence = parse_recurrence(rule['recurrence']) if not recurrence: return freqs = {'sec': rrule.SECONDLY, 'second': rrule.SECONDLY, 'min': rrule.MINUTELY, 'minute': rrule.MINUTELY, 'h': rrule.HOURLY, 'hour': rrule.HOURLY, 'd': rrule.DAILY, 'day': rrule.DAILY, 'w': rrule.WEEKLY, 'week': rrule.WEEKLY, 'mo': rrule.MONTHLY, 'month': rrule.MONTHLY, 'y': rrule.YEARLY, 'year': rrule.YEARLY} frequency = freqs[recurrence] weekdays = None if rule.get('weekdays'): weekdays = _get_weekdays(rule['weekdays']) if not weekdays: return rrule.rrule(freq=frequency, interval=interval, dtstart=since, until=until, count=rule.get('count'), cache=True) count = rule.get('count') rule_set = _get_rule_set_by_weekdays( frequency, interval, since, until, weekdays) return _cap_rule_set_by_occurrence_count(rule_set, count)
4,231
def site_id(request): """Site id of the site to test.""" return request.param if hasattr(request, 'param') else None
4,232
def raise_if(cond: bool, cls: type, msg: str): """Raise exception if cond is true.""" if cond: raise cls(msg)
4,233
def foo(): """Example function""" # TO-DO raise NotImplementedError
4,234
def parse(limit_string): """ parses a single rate limit in string notation (e.g. '1/second' or '1 per second' :param string limit_string: rate limit string using :ref:`ratelimit-string` :raise ValueError: if the string notation is invalid. :return: an instance of :class:`RateLimitItem` """ return list(parse_many(limit_string))[0]
4,235
def change_balance(email): """Change a user's balance.""" if not isinstance(request.json.get('change'), int): abort(400, {'message': 'The change in innopoints must be specified as an integer.'}) user = Account.query.get_or_404(email) if request.json['change'] != 0: new_transaction = Transaction(account=user, change=request.json['change']) db.session.add(new_transaction) try: db.session.commit() except IntegrityError as err: db.session.rollback() log.exception(err) abort(400, {'message': 'Data integrity violated.'}) notify(user.email, NotificationType.manual_transaction, { 'transaction_id': new_transaction.id, }) return NO_PAYLOAD
4,236
def install(): """ Install the package (in development mode). """ click.echo("Installing mitoviz...") subprocess.check_call(["pip", "install", "-e", "."]) click.echo("Done.")
4,237
def contact_infectivity_asymptomatic_40x70(): """ Real Name: b'contact infectivity asymptomatic 40x70' Original Eqn: b'contacts per person normal 40x70*infectivity per contact' Units: b'1/Day' Limits: (None, None) Type: component b'' """ return contacts_per_person_normal_40x70() * infectivity_per_contact()
4,238
def generate_resource_link(pid, resource_path, static=False, title=None): """ Returns a valid html link to a public resource within an autogenerated instance. Args: pid: the problem id resource_path: the resource path static: boolean whether or not it is a static resource title: the displayed text. Defaults to the path Returns: The html link to the resource. """ return '<a target=_blank href="/api/autogen/serve/{}?static={}&pid={}">{}</a>'.format( resource_path, "true" if static else "false", pid, resource_path if not title else title )
4,239
def test_df_upload(mock_put, la_uploader): """Check DataFrame upload.""" response = Response() response.status_code = 200 mock_put.return_value = response data_path = Path(_TEST_DATA).joinpath("syslog_data.csv") data = pd.read_csv(data_path) la_uploader.upload_df(data, "test")
4,240
def make_cointegrated(seed, n_samples, gamma): """ cointegrated pair: - x0_t = x0_t-1 + gauss[:, 0] - x1_t = gamma * x0_t + gauss[:, 1] for various gamma. cf: Hamilton [19.11.1, 19.11.2] """ np.random.seed(seed) x0 = np.random.randn(n_samples).cumsum() x1 = gamma * x0 + np.random.randn(n_samples) return np.stack([x0, x1], axis=1)
4,241
def test_process_data_path(tmp_raster_fixture): """ Checks the raster processing for multiple images. """ in_path, _ = tmp_raster_fixture img_file_list = [in_path] feature_list: List[Feature] = [] for img_path in img_file_list: bbox = [2.5, 1.0, 4.0, 5.0] geom = box(*bbox) in_properties = { "up42.data_path": str(Path(*img_path.parts[-2:])), "acquisitionDate": "2018-10-16T10:39:43.431Z", } feature_list.append(Feature(geometry=geom, bbox=bbox, properties=in_properties)) input_fc = FeatureCollection(feature_list) output_fc = RasterSharpener().process(input_fc) # Check that all features are derived assert len(output_fc["features"]) == 1 for feature in output_fc.features: # Check that file paths in metadata are relative feature_file = feature["properties"]["up42.data_path"] assert feature["properties"]["up42.data_path"] assert Path(feature_file).root == "" # Check that metadata is propagated assert feature["properties"]["acquisitionDate"] == "2018-10-16T10:39:43.431Z" # Check that feature outputs exist feature_path = Path("/tmp/output").joinpath(feature_file) assert feature_path.is_file() # Cleanup feature_path.unlink()
4,242
def web_index(): """主页""" news = db.session.query(HotHomeNews).to_dicts home = list() hot = list() temp = 30 for index, i in enumerate(news): temp -= random.randint(0, 2) i['date'] = '2021-04' + '-' + str(temp) if i['hot'] == 1: hot.append(i) else: home.append(i) return render_template('index.html', hot=hot, home=home)
4,243
def test_parametrize(): """Tests parametrizing a function""" @arg.parametrize(val=arg.val('vals')) def double(val): return val * 2 assert double(vals=[1, 2, 3]) == [2, 4, 6] # This should result in a lazy bind error with pytest.raises(arg.BindError): double(val=1) # Partial runs should be able to ignore parametrization assert double.partial(val=1) == 2
4,244
def F_to_C(Tf): """convertit une temperature de Fahrenheit en Celsius""" Tc = (Tf-32)*5/9 return Tc
4,245
def generate_mdn_sample_from_ouput(output, test_size,distribution = 'Normal', params = None): """ Using the output layer from the prediction on a fitted mdn model generate test_size number of samples. (Note output corresponds to a one-dimensional output). Parameters ---------- output : array layer of neural network ordered mixture weights (unscaled), variance (unscaled) and means test_size : int number of samples to draw from fitted mdn. deprecated. distribution: string distribution of output. Can be Normal, Gamma or Beta. Returns ---------- result : array sample from mixture distribution. """ ec.check_distribution(distribution) num_components = int(output.shape[1]/3) out_mu = output[:,:num_components] out_sigma = output[:,num_components:2*num_components] out_pi = output[:,2*num_components:] result = np.zeros(output.shape[0]) mu = 0 std = 0 idx = 0 for i,_ in enumerate(result): idx = np.random.choice(num_components, 1, p=out_pi[i]) if(distribution is 'Normal'): mu = out_mu[i,idx] std = np.sqrt(out_sigma[i,idx]) result[i] = mu + np.random.randn()*std elif(distribution is 'Gamma'): alpha = out_mu[i,idx] beta = out_sigma[i,idx] result[i] = np.random.gamma(alpha,1/beta) elif(distribution is 'Beta'): alpha = out_mu[i,idx] beta = out_sigma[i,idx] result[i] = np.random.beta(alpha,beta) elif(distribution is 'Poisson'): rate = out_mu[i,idx] result[i] = np.random.poisson(rate) elif(distribution is 'Binomial'): p = out_mu[i,idx] n = out_sigma[i,idx] result[i] = np.random.binomial(params['binomial_n'],p) else: raise NameError('{} not a distribution'.format(distribution)) return result
4,246
def infer_folding_rates(clusters, activation_energies, prefactors, G, temperatures): """ Takes Arrenius parameters and uses detailed balance to compute folding rates """ print('Inferring unknown folding rates from detailed balance...') Nclusters = len(clusters) folding_rates=np.nan*np.zeros((Nclusters, Nclusters, len(temperatures) )) unfolding_rates = np.nan*np.zeros((Nclusters, Nclusters, len(temperatures))) for b in range(Nclusters): for a in range(Nclusters): unfolding_rates[a, b,:] = prefactors[a,b]*np.exp(-activation_energies[a,b]/temperatures) for t, temp in enumerate(temperatures): if -np.log(unfolding_rates[a,b,t]) < (G[t,b] - G[t,a]): #barrier height is lower than free energy difference...typically this implies Arrhenius approximation is failing unfolding_rates[a,b,t] = np.exp(-( G[t,b] - G[t,a]) ) #Then we use the barrier height folding_rates[b,a,:]= unfolding_rates[a,b,:] * np.exp(G[:,b] - G[:,a]) #detailed balance! return folding_rates, unfolding_rates, temperatures
4,247
def create_cry_nqubit(qc: qiskit.QuantumCircuit, thetas: np.ndarray): """Create control Control-RY state Args: - qc (qiskit.QuantumCircuit): init circuit - thetas (np.ndarray): parameters Returns: - qiskit.QuantumCircuit """ for i in range(0, qc.num_qubits - 1, 2): qc.cry(thetas[i], i, i + 1) for i in range(1, qc.num_qubits - 1, 2): qc.cry(thetas[i], i, i + 1) qc.cry(thetas[qc.num_qubits - 1], qc.num_qubits - 1, 0) return qc
4,248
def get_region(h5_dset, reg_ref_name): """ Gets the region in a dataset specified by a region reference Parameters ---------- h5_dset : h5py.Dataset Dataset containing the region reference reg_ref_name : str / unicode Name of the region reference Returns ------- value : np.ndarray Data specified by the region reference. Note that a squeeze is applied by default. """ warn('pyUSID.io.reg.ref.get_region will be moved to pyNSID in the next ' 'pyUSID version.', FutureWarning) if not isinstance(reg_ref_name, (str, unicode)): raise TypeError('reg_ref_name should be a string') if not isinstance(h5_dset, h5py.Dataset): raise TypeError('h5_dset should be of type h5py.Dataset') # this may raise KeyErrors. Let it reg_ref = h5_dset.attrs[reg_ref_name] return np.squeeze(h5_dset[reg_ref])
4,249
def terminate_simulation_when(reqID, req, line): """Function implementing the 'terminate simulation when' statement.""" makeRequirement(RequirementType.terminateSimulationWhen, reqID, req, line)
4,250
def agent(game, n_ep, n_mcts, max_ep_len, lr, c, gamma, data_size, batch_size, temp, n_hidden_layers, n_hidden_units): """ Outer training loop """ seed_best = None a_best = None episode_returns = [] # storage timepoints = [] # environments env = make_game(game) is_atari = is_atari_game(env) mcts_env = make_game(game) if is_atari else None database = Database(max_size=data_size, batch_size=batch_size) model = Model(env=env, lr=lr, n_hidden_layers=n_hidden_layers, n_hidden_units=n_hidden_units) t_total = 0 # total steps r_best = -np.Inf for ep in range(n_ep): start = time.time() s = env.reset() r2 = 0.0 # Total return counter a_store = [] seed = np.random.randint(1e7) # draw some env seed env.seed(seed) if is_atari: mcts_env.reset() mcts_env.seed(seed) mcts = MCTS(root_index=s, model=model, na=model.action_dim, gamma=gamma) # the object responsible for MCTS searches for t in range(max_ep_len): # MCTS step mcts.search(n_mcts=n_mcts, c=c, env=env, mcts_env=mcts_env) # perform a forward search state, pi, v = mcts.return_results( temp) # extract the root output database.store((state, v, pi)) # Make the true step a = np.random.choice(len(pi), p=pi) a_store.append(a) s1, r, terminal, _ = env.step(a) r2 += r # total number of environment steps (counts the mcts steps) t_total += n_mcts if terminal: break else: mcts.forward(a, s1) # Finished episode episode_returns.append(r2) # store the total episode return timepoints.append( t_total) # store the timestep count of the episode return store_safely(os.getcwd(), 'result', {'r': episode_returns, 't': timepoints}) if r2 > r_best: a_best = a_store seed_best = seed r_best = r2 print( 'Finished episode {}, total return: {}, total time: {} sec'.format( ep, np.round(r2, 2), np.round((time.time() - start), 1))) # Train database.reshuffle() for epoch in range(1): for sb, v_batch, pi_batch in database: model.train(sb, v_batch, pi_batch) # return results return episode_returns, timepoints, a_best, seed_best, r_best
4,251
def butter_bandpass_filter(data, lowcut, highcut, sample_rate, order): """ Bandpass filter the data using Butterworth IIR filters. Two digital Butterworth IIR filters with the specified order are created, one highpass filter for the lower critical frequency and one lowpass filter for the higher critical frequency. Both filters use second-order sections (SOS). Then first the highpass filter is applied on the given data and on its result the lowpass filter is applied. Both filters are applied as forward-backward digital filters to correct the non-linear phase. Parameters ---------- data : ndarray The data to be filtered; format (n_samples,) lowcut : float The lower critical frequency highcut : float The higher critical frequency sample_rate : float The sampling rate of the given data order : int The order of the used filters Returns ------- data : ndarray the bandpass filtered data; format (n_samples,) """ sos_high = butter(order, lowcut, btype='hp', fs=sample_rate, output='sos') sos_low = butter(order, highcut, btype='lp', fs=sample_rate, output='sos') return sosfiltfilt(sos_low, sosfiltfilt(sos_high, data, padlen=3 * order), padlen=3 * order)
4,252
def _check_tensor_info(*tensors, size, dtype, device): """Check if sizes, dtypes, and devices of input tensors all match prescribed values.""" tensors = list(filter(torch.is_tensor, tensors)) if dtype is None and len(tensors) == 0: dtype = torch.get_default_dtype() if device is None and len(tensors) == 0: device = torch.device("cpu") sizes = [] if size is None else [size] sizes += [t.shape for t in tensors] dtypes = [] if dtype is None else [dtype] dtypes += [t.dtype for t in tensors] devices = [] if device is None else [device] devices += [t.device for t in tensors] if len(sizes) == 0: raise ValueError(f"Must either specify `size` or pass in `W` or `H` to implicitly define the size.") if not all(i == sizes[0] for i in sizes): raise ValueError(f"Multiple sizes found. Make sure `size` and `W` or `H` are consistent.") if not all(i == dtypes[0] for i in dtypes): raise ValueError(f"Multiple dtypes found. Make sure `dtype` and `W` or `H` are consistent.") if not all(i == devices[0] for i in devices): raise ValueError(f"Multiple devices found. Make sure `device` and `W` or `H` are consistent.") # Make sure size is a tuple (not a torch.Size) for neat repr-printing purposes. return tuple(sizes[0]), dtypes[0], devices[0]
4,253
def generate_dataset(df, n_past, n_future): """ df : Dataframe n_past: Number of past observations n_future: Number of future observations Returns: X: Past steps Y: Future steps (Sequence target) Z: Sequence category""" # Split the dataframe with respect to IDs series_ids = dict(tuple(df.groupby('ID'))) # Dict of ids as keys and x,y,id as values train_data, target_data, target_category = list(), list(), list() for id in series_ids.keys(): X, Y, Z= list(), list(), list() # Drop the column ids and convert the pandas into arrays series = series_ids[id].drop(columns = ['ID']).to_numpy() for window_start in range(len(series)): past_end = window_start + n_past future_end = past_end + n_future if not future_end > len(series): # slicing the past and future parts of the window past, future = series[window_start:past_end, :], series[past_end:future_end, :] X.append(past) Y.append(future) # For each sequence length set target category Z.append(int(id)) train_data.extend(np.array(X)) target_data.extend(np.array(Y)) target_category.extend(np.array(Z)) return train_data, target_data, target_category
4,254
def test_build_feedstock_default(mocker): """ Tests that the default arguments for 'build_feedstock' generate the correct 'conda_build.api.build' input args. """ mocker.patch( 'os.getcwd', return_value="/test/test_recipe" ) mocker.patch( 'os.path.exists', return_value=False ) expect_recipe = os.path.join(os.getcwd(),'recipe') expect_config = {'variant_config_files' : [utils.DEFAULT_CONDA_BUILD_CONFIG], 'output_folder' : utils.DEFAULT_OUTPUT_FOLDER} mocker.patch( 'conda_build.api.build', side_effect=(lambda x, **kwargs: helpers.validate_conda_build_args(x, expect_recipe=expect_recipe, expect_config=expect_config, **kwargs)) ) arg_input = [] assert build_feedstock.build_feedstock(arg_input) == 0
4,255
def safe_mkdir(path): """ Create a directory if there isn't one already Deprecated in favor of os.makedirs(path, exist_ok=True) """ try: os.mkdir(path) except OSError: pass
4,256
def setup_exps_rllib(flow_params, n_cpus, n_rollouts, reward_specification=None, policy_graphs=None, policy_mapping_fn=None, policies_to_train=None): """Return the relevant components of an RLlib experiment. Parameters ---------- flow_params : dict flow-specific parameters (see flow/utils/registry.py) n_cpus : int number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration policy_graphs : dict, optional TODO policy_mapping_fn : function, optional TODO policies_to_train : list of str, optional TODO Returns ------- str name of the training algorithm str name of the gym environment to be trained dict training configuration parameters """ from ray import tune from ray.tune.registry import register_env try: from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon alg_run = "PPO" agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) config["seed"] = 17 config["num_workers"] = 7 #n_cpus - 1 config["train_batch_size"] = horizon * n_rollouts config["sgd_minibatch_size"] = min(16 * 1024, config["train_batch_size"]) config["gamma"] = GAMMA # discount rate #fcnet_hiddens = [int(sys.argv[5])] * int(sys.argv[6]) config["model"].update({"fcnet_hiddens": tune.grid_search([[], [4, 4], [16, 16], [64, 64], [256, 256]])}) #config["model"].update({"fcnet_hiddens": tune.grid_search([[4], [8], [8, 8], [16, 16], [64, 64]])}) #[32, 32, 32] config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 config["vf_clip_param"] = 10000 config["num_sgd_iter"] = 10 config["horizon"] = horizon config["framework"] = "torch" config["callbacks"] = RewardCallback config["log_level"] = "ERROR" # save the flow params for replay flow_json = json.dumps( flow_params, cls=FlowParamsEncoder, sort_keys=True, indent=4) config['env_config']['flow_params'] = flow_json config['env_config']['run'] = alg_run # multiagent configuration if policy_graphs is not None: config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: config['multiagent'].update( {'policy_mapping_fn': tune.function(policy_mapping_fn)}) if policies_to_train is not None: config['multiagent'].update({'policies_to_train': policies_to_train}) create_env, gym_name = make_create_env(params=flow_params, reward_specification=reward_specification) # Register as rllib env register_env(gym_name, create_env) return alg_run, gym_name, config
4,257
def upsampling2D( inputs: Optional[tf.Tensor] = None, size: Tuple[int, int] = (2, 2), mode: Literal['pad', 'nearest', 'bilinear'] = 'nearest', name: Optional[str] = None, ) -> Union[tf.Tensor, Resampling2D]: """ Upsampling""" layer = Resampling2D(size, mode, name=name) if inputs is None: return layer return layer(inputs)
4,258
def sns_msg_body_user_notify_topic(message, autoscale_group, instance_id, details=None): """ Purpose: To prepare dict with correct values for user topic Parameters: message, group name, instance_id, details Returns: dict Raises: """ # Constructing a JSON object as per AWS SNS requirement sns_message = { "description": message, "autoscale_group": autoscale_group, "instance_id": instance_id, "details": details } logger.debug("Prepared message body: " + json.dumps(sns_message, separators=(',', ':'))) return sns_message
4,259
def moderator_name(): """Return the name of the test game moderator.""" return 'Hannah'
4,260
def sync_model(sender, app_config, **kwargs): """Push django model data to google fusion table.""" if django.apps.apps.ready: for model_string in app_settings.MODELS_TO_SYNC: model_class = django.apps.apps.get_model(model_string) signals.post_save.connect(push_data, sender=model_class)
4,261
def course(x=0, y=0): """ Faire avancer le cavalier autant que possible. """ HISTORIQUE.append((x, y)) last_move = (0, 0) afficher(last_move) while True: (x, y) = HISTORIQUE[-1] poss = proposer(x, y) if poss == []: input("BLOQUE ! Seul choix possible : arrière." + "\n" * 13) (dx, dy) = (0, 0) # on est coincé, donc : retour en arrière else: presenter(poss, last_move) try: (dx, dy) = choisir(poss, last_move) except StopIteration: break if (dx, dy) == (0, 0): # Retour en arrière if len(HISTORIQUE) > 1: # Seulement si c'est possible ! rem_x, rem_y = HISTORIQUE.pop() new_x = rem_x - HISTORIQUE[-1][0] new_y = rem_y - HISTORIQUE[-1][1] last_move = (new_x, new_y) else: HISTORIQUE.append((x + dx, y + dy)) last_move = (0, 0) afficher(last_move) print("Fin")
4,262
def delete_downloads(): """Delete all downloaded examples to free space or update the files.""" shutil.rmtree(geoist.EXAMPLES_PATH) os.makedirs(geoist.EXAMPLES_PATH) return True
4,263
def test_raises_when_not_on_correct_branch(git_server, qisrc_action, record_messages): """ Test Raises When Not On Correct Branch """ git_server.create_repo("foo") git_server.switch_manifest_branch("devel") git_server.change_branch("foo", "devel") qisrc_action("init", git_server.manifest_url, "--branch", "devel") git_worktree = TestGitWorkTree() foo_proj = git_worktree.get_git_project("foo") git = TestGit(foo_proj.path) git.checkout("-B", "perso") error = qisrc_action("rebase", "--branch", "master", "--all", raises=True) assert " * foo" in error assert record_messages.find("skipped")
4,264
def reversebits2(max_bits, num): """ Like reversebits1, plus small optimization regarding bit index calculation. """ rev_num = 0 high_shift = max_bits - 1 low_shift = 0 for _ in range(0, (max_bits + 1) // 2): low_bit = (num & (1 << low_shift)) >> low_shift high_bit = (num & (1 << high_shift)) >> high_shift rev_num |= low_bit << high_shift rev_num |= high_bit << low_shift high_shift -= 1 low_shift += 1 return rev_num
4,265
def _get_builder_cls( ds_to_build: str, ) -> Tuple[Type[tfds.core.DatasetBuilder], Dict[str, str]]: """Infer the builder class to build. Args: ds_to_build: Dataset argument. Returns: builder_cls: The dataset class to download and prepare kwargs: """ # 1st case: Requested dataset is a path to `.py` script path = _search_script_path(ds_to_build) if path is not None: # Dynamically load user dataset script with tfds.core.utils.add_sys_path(path.parent): builder_cls = tfds.core.community.builder_cls_from_module(path.stem) return builder_cls, {} # 2nd case: Dataset is registered through imports. # Extract `name/config:version` extract_name_and_kwargs = tfds.core.naming.dataset_name_and_kwargs_from_name_str builder_name, builder_kwargs = extract_name_and_kwargs(ds_to_build) builder_cls = tfds.builder_cls(builder_name) builder_kwargs = typing.cast(Dict[str, str], builder_kwargs) return builder_cls, builder_kwargs
4,266
def stop_logging() -> None: """ Stop logging output to file This function will clear the `atomica_file_handler` and close the last-opened log file. If file logging has not started, this function will return normally without raising an error """ for handler in logger.handlers: if handler.name == "atomica_file_handler": handler.close() logger.removeHandler(handler) # Don't terminate the loop, if by some change there is more than one handler # (not supposed to happen though) then we would want to close them all
4,267
def quick_sort(data, start, end, draw_data, time_delay): """Quicksort function with a modification that allows for drawing of the sorted data onto the canvas Color information: - rectangles that are swapped are light green, - to the left and to the right of the pivot in the partitioned list, rectangles are yellow, - rectangle that represents the pivot is red. Args: data (list): list of random data to sort start (int): index of the first item from which we want to sort end (int): index of the last item we want to sort draw_data (function): function that allows for data drawing onto the canvas time_delay (float): an amount of time that the app will wait for between the next sort Returns: int: index of the number by which the list will be split in quicksort """ if start < end: pivot_position = partition(data, start, end, draw_data, time_delay) draw_data( data, [ col.YELLOW if x >= start and x < pivot_position else col.RED if x == pivot_position else col.YELLOW if x > pivot_position and x <= end else col.BLUE for x in range(len(data)) ], ) sleep(time_delay) quick_sort(data, start, pivot_position - 1, draw_data, time_delay) draw_data( data, [ col.YELLOW if x >= pivot_position + 1 and x < end else col.RED if x == pivot_position else col.YELLOW if x < pivot_position else col.BLUE for x in range(len(data)) ], ) sleep(time_delay) quick_sort(data, pivot_position + 1, end, draw_data, time_delay) # draw_data(data, [col.BLUE for x in range(len(data))]) draw_data(data, list(map(lambda x: col.BLUE, data)))
4,268
def keypoints_to_bbox(keypoints_list, image): """Prepare bboxes from keypoints for object tracking. args: keypoints_list (np.ndarray): trtpose keypoints list return: bboxes (np.ndarray): bbox of (xmin, ymin, width, height) """ bboxes = [] img_h, img_w = image.shape[:2] for idx, keypoints in enumerate(keypoints_list): keypoints = np.where(keypoints[:, 1:] !=0, keypoints[:, 1:], np.nan) keypoints[:, 0] *= img_w keypoints[:, 1] *= img_h xmin = np.nanmin(keypoints[:,0]) ymin = np.nanmin(keypoints[:,1]) xmax = np.nanmax(keypoints[:,0]) ymax = np.nanmax(keypoints[:,1]) bbox = expand_bbox(xmin, xmax, ymin, ymax, img_w, img_h) # discard bbox with width and height == 0 if bbox[2] < 1 or bbox[3] < 1: continue bboxes.append(bbox) return np.asarray(bboxes)
4,269
def view(): """ WIP: View admins. """ if current_user.is_admin(): admins = UserMetadata.select().where(UserMetadata.key == 'admin') postcount = SubPost.select(SubPost.uid, fn.Count(SubPost.pid).alias('post_count')).group_by(SubPost.uid).alias( 'post_count') commcount = SubPostComment.select(SubPostComment.uid, fn.Count(SubPostComment.cid).alias('comment_count')).group_by( SubPostComment.uid).alias('j2') users = User.select(User.name, User.status, User.uid, User.joindate, postcount.c.post_count.alias('post_count'), commcount.c.comment_count) users = users.join(postcount, JOIN.LEFT_OUTER, on=User.uid == postcount.c.uid) users = users.join(commcount, JOIN.LEFT_OUTER, on=User.uid == commcount.c.uid) users = users.where(User.uid << [x.uid for x in admins]).order_by(User.joindate.asc()).dicts() return render_template('admin/users.html', users=users, admin_route='admin.view') else: abort(404)
4,270
def seamAnalysis(analysis, img1, img2, mask=None, linktype=None, arguments=dict(), directory='.'): """ Perform SIFT regardless of the global change status. If neighbor mask is is constructed, indicating the seams can be calculated, then mark as not Global. :param analysis: :param img1: :param img2: :param mask: :param linktype: :param arguments: :param directory: :return: """ forcedSiftAnalysis(analysis, img1, img2, mask=mask, linktype=linktype, arguments=arguments, directory=directory) if 'neighbor mask' in arguments: analysis['global'] = 'no'
4,271
def get_market_offers(session, ids, base_market_url=BASE_MARKET_URL): """\nMain function for interaction with this library. \nProvided a sequence of Character Ids, returns a dictionary of offers for each. \ Requires a session which has already authenticated with Urban Rivals. \nOptional: provide a base market URL for proxy. Must end with a "?" \ Ex: "http://example.com?" >>>get_market_offers(session, [1400, 1423, 1764]) {1400: Offer, 1423: Offer, 1764: Offer} >>>get_market_offers(session, ["1301", "1543"]) {"1301": Offer, "1543": Offer} """ if len(ids) < 1: raise ValueError("Ids cannot be empty") if not base_market_url.endswith("?"): raise ValueError("URL must end with a question mark") market = { char_id: _html_to_soup( session.get( _get_offer_list(char_id, base_market_url) )) for char_id in map(_clean_input, ids) } return {char_id :_find_offers(market[char_id]) for char_id in map(_clean_input, ids) }
4,272
def band_listing(request): """A view of all bands.""" bands = Band.objects.all() return render(request, 'bands/band_listing.html', {'bands': bands})
4,273
def test_noisy(): """ tests function to add noise to a colour image. """ timage = np.zeros((10, 10, 1), np.uint8) image = utilities.noisy_image(timage) assert image.shape == (10, 10, 1)
4,274
def check_mismatched_bracket_type(path: str) -> Optional[BracketErrorType]: """ Check for miss matched brackets :param path: path to file :return: Type of miss match or None if there is none """ file_as_string = utils.read_file(path) brackets_count = utils.count_brackets(file_as_string) normal_brackets_are_even = brackets_count[0] % 2 == 0 square_brackets_are_even = brackets_count[1] % 2 == 0 curly_brackets_are_even = brackets_count[2] % 2 == 0 if not normal_brackets_are_even and not square_brackets_are_even: return BracketErrorType.NORMAL_SQUARE elif not normal_brackets_are_even and not curly_brackets_are_even: return BracketErrorType.NORMAL_CURLY elif not curly_brackets_are_even and not square_brackets_are_even: return BracketErrorType.CURLY_SQUARE return None
4,275
def create_initiator(cluster: str, headers_inc: str) -> None: """Create a Initiator Group""" print() show_svm(cluster, headers_inc) print() svm_name = input( "Enter the name of the SVM on which the volume needs to be created:- ") igroup_name = input( "Enter the name of the Igroup that you would like to create : ") initiator_name = input( "Enter the name of the Initiator that you would like to add in the InitiatorGroup :") os_type2 = input("Enter the OS-TYPE :") payload2 = { "initiators": [ { "name": initiator_name } ], "name": igroup_name, "os_type": os_type2, "svm": { "name": svm_name } } url = "https://{}/api/protocols/san/igroups".format(cluster) try: response = requests.post( url, headers=headers_inc, json=payload2, verify=False) except requests.exceptions.HTTPError as err: print(str(err)) sys.exit(1) except requests.exceptions.RequestException as err: print(str(err)) sys.exit(1) url_text = response.json() if 'error' in url_text: print(url_text) sys.exit(1) print("Inititor group created successfully...")
4,276
def update(ctx, ticket_id, target, amount, account): """ Update staking time of a voting ballot. Changes the stake-lock duration of a voting ticket. Can update full amount of a ticket or a partial amount. If partial, result is two separate tickets, putting optional AMOUNT on the new ticket/time-target while the old ticket retains the remainder. This command can also be used to "free" tickets by updating the time target to "liquid" (except for fully-charged lock_forever tickets). EXAMPLE 1: If ticket 1.18.xxx has 1000 KES locked for 180 days, you could upgrade the entire ticket to a lock_forever ticket with: uptick ticket update 1.18.xxx lock_forever EXAMPLE 2: If ticket 1.18.yyy has 1500 KES locked for 180 days, you could free a third of it with: uptick ticket update --amount 500 KES 1.18.yyy liquid The release of KES will follow a power-down schedule. """ amount = Amount(*amount) if amount[0] is not None else None ctx.blockchain.blocking = True tx = ctx.blockchain.update_voting_ticket( ticket_id, target, amount, account ) tx.pop("trx", None) print_tx(tx) results = tx.get("operation_results", {}) if results: results = results[0][1] updates = results['updated_objects'] creates = results['new_objects'] removes = results['removed_objects'] monitor = updates + creates ticketword={True:"voting tickets", False:"voting ticket"} if updates: print("Updated existing %s: "%ticketword[len(updates)>1], end='') print(*updates, sep=', ') if creates: print("Created new %s: "%ticketword[len(creates)>1], end='') print(*creates, sep=', ') if removes: print("Removed %s: "%ticketword[len(removes)>1], end='') print(*removes, sep=', ') if monitor: print("Monitor your %s with: uptick info "%ticketword[len(monitor)>1], end='') print(*monitor, sep=' ')
4,277
def get_rank( day: int = day_idx, year: int = year ) -> Union[None, Tuple[str]]: """ Returns the rank for the current day. Arguments --------- day -- The day to get the rank for. year -- The year to get the rank for. Returns ------- The rank for the specified day and time for completion. """ # Get the leaderboard ranking r = requests.get( f'https://adventofcode.com/{year}/leaderboard/self', headers=headers, cookies=cookies ) data = r.text # Parse for the time/rank data = data.replace('&gt;', '>') ranks = re.findall( r'(\d+) +(\d\d:\d\d:\d\d|>24h) +(\d+) +(\d+)( +(\d\d:\d\d:\d\d|>24h) +(\d+) +(\d+))?', data ) rank_info = [t for t in ranks if t[0] == str(day)] if rank_info: rank_info = rank_info[0] else: return None # Reformat and grab the results time_1, rank_1 = rank_info[1:3] time_2, rank_2 = rank_info[5:7] if rank_1: rank_1 = int(rank_1) if rank_2: rank_2 = int(rank_2) return RankInfo(time_1, rank_1, time_2, rank_2)
4,278
def _check_flags(sorted_seq_to_filepath): """Ensure regional indicators are only in sequences of one or two, and never mixed.""" for seq, fp in sorted_seq_to_filepath.iteritems(): have_reg = None for cp in seq: is_reg = _is_regional_indicator(cp) if have_reg == None: have_reg = is_reg elif have_reg != is_reg: print >> sys.stderr, 'mix of regional and non-regional in %s' % fp if have_reg and len(seq) > 2: # We provide dummy glyphs for regional indicators, so there are sequences # with single regional indicator symbols. print >> sys.stderr, 'regional indicator sequence length != 2 in %s' % fp
4,279
def get_taste(dm): """ Get the classification of a matrix defining a tangent vector field of the form: | R | t | | - - - | | 0 | 0 | :param dm: input tangent matrix :return: number from 1 to 6 corresponding to taste. see randomgen_linear_by_taste. """ rot = dm[:2, :2] v, w = np.linalg.eig(rot) if v[0].imag < np.spacing(0) and v[1].imag < np.spacing(0): # Eigenvalues both real: l1 = v[0].real l2 = v[1].real if l1 > 0 and l2 > 0: # Taste 1 return 1 elif l1 < 0 and l2 < 0: # Taste 2 return 2 else: # Taste 3 return 3 else: # Complex conjugate eigenvalues if v[0].real > np.spacing(0): # Taste 4 return 4 elif v[0].real < np.spacing(0): # Taste 5 return 5 else: # Taste 6 - never get there in practice. return 6
4,280
def wait_for_sge_jobs(jids, wait_timeout=None, run_timeout=None): """ Wait for all sge job ids {jids} to complete before exiting. Return sge job ids that have been killed by qdel. If wait_timeout is set, qdel all jobs regardless job status after {wait_timeout} seconds have passed. If wait_timeout is None, jobs can qw or held for a long time when cluster is busy. If sge died and restarted, jobs will no longer be active and wait_for_sge_jobs should be OK to exit, however, in this case, upstream calls may not be aware of jobs are not completed. If run_timeout is set, qdel a job after it has been running for {run_timeout} seconds. If run_timeout is None, jobs can run forever unless wait_timeout is set. Note that if both wait_timeout and run_timeout are set, qdel a job when the earliest time out is reached. Parameters: jids - sge job ids that we are waiting for wait_timeout - maximum time in seconds waiting for sge jobs, regardless of their statuses. qdel it otherwise. If is None, no cap. run_timeout - maximum time in seconds that a sge job can be running, not counting qw or hold time. qdel it otherwise. If is None, no cap. """ count = 0 check_sge_every_n_seconds = 10 # check sge every n seconds. time_passed = 0 runtime_passed = dict({jid: 0 for jid in jids}) killed_jobs = [] # jobs that have been killed. while True: active_d = get_active_sge_jobs() not_done_jids = list(set(jids).intersection(set(active_d.keys()))) if len(not_done_jids) != 0: # some sge jobs are still running or qw, or held time.sleep(check_sge_every_n_seconds) time_passed += check_sge_every_n_seconds count += 1 if count % 100 == 0: logging.debug("Waiting for sge job to complete: %s.", ",".join(not_done_jids)) if wait_timeout is not None and time_passed >= wait_timeout: kill_sge_jobs(jids=not_done_jids) killed_jobs.extend(not_done_jids) break if run_timeout is not None: # update runtime_passed for jid in not_done_jids: if active_d[jid].startswith('r'): runtime_passed[jid] += check_sge_every_n_seconds to_kill_jids = [jid for jid in not_done_jids if runtime_passed[jid] >= run_timeout] kill_sge_jobs(jids=to_kill_jids) killed_jobs.extend(to_kill_jids) else: break return list(set(killed_jobs))
4,281
def _check_removal_required(submission: Submission, cfg: Config) -> Tuple[bool, bool]: """ Check whether the submission has to be removed and whether this is reported. Note that this function returns a Tuple of booleans, where the first is to signify whether the submission is to be removed and the latter whether a relevant report was issued for this decision. """ for item in submission.user_reports: if item[0] and any( reason in item[0] for reason in ( reports.original_post_deleted_or_locked, reports.post_violates_rules, ) ): return True, True linked_submission = cfg.r.submission(submission.id_from_url(submission.url)) if is_removed(linked_submission): return True, False return False, False
4,282
def subjects(request, unique_id,form=None): """ Enlists all the subjects of a classroom , subjects can be added by admins """ classroom = get_object_or_404(Classroom,unique_id=unique_id) #querysets members = classroom.members.all() subjects = Subject.objects.filter(classroom=classroom) admin_check = classroom.special_permissions.filter(username = request.user.username).exists() # Admins can add a subject and assign a teacher to it if admin_check and request.method=="POST": form = SubjectForm(request.POST) teacher = get_object_or_404(User,username=request.POST.get('teacher')) if form.is_valid(): subject=form.save(commit=False) subject.classroom=classroom subject.teacher = teacher subject.save() subject.upload_permission.add(teacher) recipients=User.objects.filter(username__in=classroom.members.values_list('username', flat=True)) url = reverse('subjects',kwargs={'unique_id':classroom.unique_id}) notify.send(sender=request.user,verb=f"subject {subject.subject_name} added in {classroom.class_name}", recipient=recipients,url=url) messages.add_message(request,messages.INFO,f"A new Subject {subject.subject_name} added") classroom.teacher.add(teacher) return redirect(url) else: form = SubjectForm() params = { 'subjects':subjects, 'form':form, 'classroom':classroom, 'is_admin':admin_check, 'members':members } return render(request,'subjects_list.html',params)
4,283
def retrieve_settings(skill_id: str) -> JSONStructure: """Retrieves skill's settings by leveraging the mycroft-api skill Send `skillmanager.list` message and wait for `mycroft.skills.list` message to appear on the bus. :param skill_id: Skill ID to retrieve the settings :type skill_id: str :return: Return the sanitized skill settings :rtype: JSONStructure """ status_code: int = status.HTTP_400_BAD_REQUEST msg: str = "unable to retrieve skill settings" try: skills: Skills = retrieve_list() for key in skills["results"]: if skills["results"][key]['id'] == skill_id: payload: Dict = { "type": "mycroft.api.skill_settings", "data": { "app_key": settings.app_key, "skill": skill_id } } info: JSONStructure = ws_send( payload, "mycroft.api.skill_settings.answer") if requirements(): if info["context"]["authenticated"]: return sanitize({"results": info["data"]}) status_code = status.HTTP_401_UNAUTHORIZED msg = "unable to authenticate with mycroft-api skill" raise Exception status_code = status.HTTP_401_UNAUTHORIZED msg = "mycroft-api skill is not installed on mycroft core" raise Exception status_code = status.HTTP_404_NOT_FOUND msg = f"skill {skill_id} not found" raise Exception except Exception as err: raise HTTPException( status_code=status_code, detail=msg) from err
4,284
def make_project(alias='project', root=None, **kwargs): """Initialize a project for testing purposes The initialized project has a few operations and a few jobs that are in various points in the workflow defined by the project. """ init(alias=alias, root=root, template='testing') project = signac.init_project(name=alias, root=root) signac.testing.init_jobs(project, **kwargs) return project
4,285
def _get_sensors_data(driver_info): """Get sensors data. :param driver_info: node's driver info :raises: FailedToGetSensorData when getting the sensor data fails. :returns: returns a dict of sensor data group by sensor type. """ try: ipmicmd = ipmi_command.Command(bmc=driver_info['address'], userid=driver_info['username'], password=driver_info['password']) ret = ipmicmd.get_sensor_data() except Exception as e: LOG.error(_LE("IPMI get sensor data failed for node %(node_id)s " "with the following error: %(error)s"), {'node_id': driver_info['uuid'], 'error': e}) raise exception.FailedToGetSensorData( node=driver_info['uuid'], error=e) if not ret: return {} sensors_data = {} for reading in ret: # ignore the sensor data which has no sensor reading value if not reading.value: continue sensors_data.setdefault( reading.type, {})[reading.name] = { 'Sensor Reading': '%s %s' % (reading.value, reading.units), 'Sensor ID': reading.name, 'States': str(reading.states), 'Units': reading.units, 'Health': str(reading.health)} return sensors_data
4,286
def dominates(lhs, rhs): """Weak strict domination relation: lhs =] rhs and lhs [!= rhs.""" lhs_rhs = try_decide_less(lhs, rhs) rhs_lhs = try_decide_less(rhs, lhs) return rhs_lhs is True and lhs_rhs is False
4,287
def complex_to_xy(complex_point): """turns complex point (x+yj) into cartesian point [x,y]""" xy_point = [complex_point.real, complex_point.imag] return xy_point
4,288
def test_ap_interworking_element_update(dev, apdev): """Dynamic Interworking element update""" bssid = apdev[0]['bssid'] params = hs20_ap_params() params['hessid'] = bssid hapd = hostapd.add_ap(apdev[0], params) dev[0].hs20_enable() dev[0].scan_for_bss(bssid, freq="2412") bss = dev[0].get_bss(bssid) logger.info("Before update: " + str(bss)) if '6b091e0701020000000300' not in bss['ie']: raise Exception("Expected Interworking element not seen before update") # Update configuration parameters related to Interworking element hapd.set('access_network_type', '2') hapd.set('asra', '1') hapd.set('esr', '1') hapd.set('uesa', '1') hapd.set('venue_group', '2') hapd.set('venue_type', '8') if "OK" not in hapd.request("UPDATE_BEACON"): raise Exception("UPDATE_BEACON failed") dev[0].request("BSS_FLUSH 0") dev[0].scan_for_bss(bssid, freq="2412", force_scan=True) bss = dev[0].get_bss(bssid) logger.info("After update: " + str(bss)) if '6b09f20208020000000300' not in bss['ie']: raise Exception("Expected Interworking element not seen after update")
4,289
def setup_mock_accessory(controller): """Add a bridge accessory to a test controller.""" bridge = Accessories() accessory = Accessory.create_with_info( name="Koogeek-LS1-20833F", manufacturer="Koogeek", model="LS1", serial_number="12345", firmware_revision="1.1", ) accessory.aid = 1 service = accessory.add_service(ServicesTypes.LIGHTBULB) on_char = service.add_char(CharacteristicsTypes.ON) on_char.value = 0 bridge.add_accessory(accessory) return controller.add_device(bridge)
4,290
def match_array_placeholder(loc, term, element): """Determine if the JSPEC array placeholder matches the JSON element. Args: loc (str): The current location in the JSON term (JSPECArrayPlaceholder): The JSPEC array placeholder. element (obj): The Python native object representing a JSON element Returns: Result: The result of whether the JSPEC array placeholder matches the JSON element """ if isinstance(element, list): return GoodMatch() return BadMatch(loc, "expected an array")
4,291
def get_mask_index(timeDict, mask='Spine', use_B=False, noise_th=None): """ :param timeDict: timeDict to use :param mask: options are 'Spine' and 'Dendrite' :param use_B: Make masksB etc. :param noise_th: if None will return all mask index if float will return mean noise < then threshold :return: index of masks """ if use_B: b = 'B' else: b = '' masks = timeDict['Masks' + b] exclude = timeDict['excludeIndex' + b] indexs_all = np.where(masks.MaskType == mask)[0] indexs_good = np.setdiff1d(indexs_all, exclude) if noise_th is not None: noise = np.nanmean(timeDict['TCNoise' + b], axis=1) good_noise = np.where(noise < noise_th)[0] return np.intersect1d(indexs_good, good_noise) else: return indexs_good
4,292
def coverageSection(*coverItems): """Combine multiple coverage items into a single decorator. Args: *coverItems ((multiple) :class:`CoverItem`): coverage primitives to be combined. Example: >>> my_coverage = coverage.coverageSection( ... coverage.CoverPoint("x", ...), ... coverage.CoverPoint("y", ...), ... coverage.CoverCross("z", ...), ... ... ... ) >>> >>> @my_coverage >>> def decorated_fun(self, arg): ... ... """ def _nested(*decorators): def _decorator(f): for dec in reversed(*decorators): f = dec(f) return f return _decorator return _nested(coverItems)
4,293
def test_masks(dtype_device): """test if masks are applied from boundary conditions""" dtype, device = dtype_device lattice = Lattice(D2Q9, dtype=dtype, device=device) flow = Obstacle2D(10, 5, 100, 0.1, lattice, 2) flow.mask[1,1] = 1 streaming = StandardStreaming(lattice) simulation = Simulation(flow, lattice, None, streaming) assert simulation.streaming.no_stream_mask.any() assert simulation.no_collision_mask.any()
4,294
def get_operator_metatypes() -> List[Type[OperatorMetatype]]: """ Returns a list of the operator metatypes. :return: List of operator metatypes . """ return list(PT_OPERATOR_METATYPES.registry_dict.values())
4,295
def gsutil_downloader( cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs ) -> Generator[Dict[str, Any], Dict[str, Any], None]: """ Built-in downloader plugin for public gs:// URIs; registered by setup.cfg entry_points section TODO: adopt security credentials from runtime environment """ if uri == "gs://8675309": # hook for test coverage of exception handler raise RuntimeError("don't change your number") wdl = r""" task gsutil_cp { input { String uri String docker } command <<< set -euxo pipefail mkdir __out/ gsutil -q cp "~{uri}" __out/ >>> output { File file = glob("__out/*")[0] } runtime { cpu: 2 memory: "1G" docker: docker } } """ yield ( # pyre-ignore yield {"task_wdl": wdl, "inputs": {"uri": uri, "docker": cfg["download_gsutil"]["docker"]}} )
4,296
def fnl_fix_first_line(preprocessor: Preprocessor, string: str) -> str: """final action to ensures file starts with a non-empty non-whitespace line (if it is not empty)""" while string != "": pos = string.find("\n") if pos == -1: if string.isspace(): return preprocessor.replace_string(0, len(string), string, "", []) return string if string[:pos+1].isspace(): string = preprocessor.replace_string(0, pos+1, string, "", []) else: break return string
4,297
def rf_rasterize(geometry_col, bounds_col, value_col, num_cols_col, num_rows_col): """Create a tile where cells in the grid defined by cols, rows, and bounds are filled with the given value.""" jfcn = RFContext.active().lookup('rf_rasterize') return Column(jfcn(_to_java_column(geometry_col), _to_java_column(bounds_col), _to_java_column(value_col), _to_java_column(num_cols_col), _to_java_column(num_rows_col)))
4,298
def make_heatmap(ax, gs, is_sh=False, make_cbar=False): """Helper to make a heatmap.""" results = pd.DataFrame.from_dict(gs.cv_results_) results["params_str"] = results.params.apply(str) if is_sh: # SH dataframe: get mean_test_score values for the highest iter scores_matrix = results.sort_values("iter").pivot_table( index="param_gamma", columns="param_C", values="mean_test_score", aggfunc="last", ) else: scores_matrix = results.pivot( index="param_gamma", columns="param_C", values="mean_test_score" ) im = ax.imshow(scores_matrix) ax.set_xticks(np.arange(len(Cs))) ax.set_xticklabels(["{:.0E}".format(x) for x in Cs]) ax.set_xlabel("C", fontsize=15) ax.set_yticks(np.arange(len(gammas))) ax.set_yticklabels(["{:.0E}".format(x) for x in gammas]) ax.set_ylabel("gamma", fontsize=15) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") if is_sh: iterations = results.pivot_table( index="param_gamma", columns="param_C", values="iter", aggfunc="max" ).values for i in range(len(gammas)): for j in range(len(Cs)): ax.text( j, i, iterations[i, j], ha="center", va="center", color="w", fontsize=20, ) if make_cbar: fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) fig.colorbar(im, cax=cbar_ax) cbar_ax.set_ylabel("mean_test_score", rotation=-90, va="bottom", fontsize=15)
4,299