content
stringlengths
22
815k
id
int64
0
4.91M
def absModuleToDist(magApp, magAbs): """ Convert apparent and absolute magnitude into distance. Parameters ---------- magApp : float Apparent magnitude of object. magAbs : float Absolute magnitude of object. Returns ------- Distance : float The distance resulting from the difference in apparent and absolute magnitude [pc]. """ d = 10.0**(-(magAbs - magApp) / 5.0 + 1.0) return d
3,100
def decoded_anycli(**kwargs): """ Return the decoded return from AnyCLI request - Do not print anything :param kwargs: keyword value: value to display :return: return the result of AnyCLI in UTF-8 :Example: result = cli(url=base_url, auth=s, command="show vlan") decoded_anycli(result) """ value = kwargs.get('value', None) return base64.b64decode(value['result_base64_encoded']).decode('utf-8')
3,101
def job_results_html(request): """ Used for testing the update with debug toolbar. """ response = job_results(request) return render(request, 'ci/ajax_test.html', {'content': response.content})
3,102
def open_mcrae_nature_cohort(): """ get proband details for McRae et al., Nature 2017 McRae et al Nature 2017 542:433-438 doi: 10.1038/nature21062 Supplementary table S1. """ data = pandas.read_excel(url, sheet_name='Supplementary Table 1') data['Individual ID'] += '|DDD' phenotype = ['HP:0001249'] study = ['10.1038/nature21062'] persons = set() for i, row in data.iterrows(): person = Person(row['Individual ID'], row.Sex, phenotype, study) persons.add(person) persons = add_mock_probands(persons, 4293, 'ddd', 'DDD', phenotype, study) return persons
3,103
def copia_coords_alineadas(align1,align2,coords_molde,PDBname): """ Devuelve: 1) una lista con las coordenadas de coords_molde que se pueden copiar segun el alineamiento align1,align2. 2) una estimacion del RMSD segun la curva RMSD(A) = 0.40 e^{l.87(1-ID)} de Chothia & Lesk (1986) """ aanames = { "A":"ALA","C":"CYS","D":"ASP","E":"GLU","F":"PHE","G":"GLY", "H":"HIS","I":"ILE","K":"LYS","L":"LEU","M":"MET","N":"ASN","P":"PRO", "Q":"GLN","R":"ARG","S":"SER","T":"THR","V":"VAL","W":"TRP","Y":"TYR" } rmsd,identical = 0,0 total1,total2,total_model = -1,-1,0 length = len(align1) if(length != len(align2)): print "# copia_coords_alineadas: alineamientos tienen != longitud", return [] pdbfile = open(PDBname, 'w') print >> pdbfile, "HEADER comparative model\nREMARK alignment:\n", print >> pdbfile, "REMARK query : %s\n" % (align1), print >> pdbfile, "REMARK template: %s\n" % (align2), for r in range(0, length): conserved = False res1 = align1[r:r+1] res2 = align2[r:r+1] if(res1 != '-'): total1+=1 if(res2 != '-'): total2+=1 if(res1 == '-' or res2 == '-'): continue # salta los gaps total_model += 1.0; if(res1 == res2): conserved = True identical += 1.0 for atomo in coords_molde[total2].split("\n"): if(atomo == ''): break if(atomo[12:16] == ' CA ' or atomo[12:16] == ' C ' or \ atomo[12:16] == ' N ' or atomo[12:16] == ' O ' \ or conserved): print >> pdbfile, "%s%s%s%4d%s" % \ (atomo[0:17],aanames[res1],atomo[20:22],total1+1,atomo[26:]) print >> pdbfile, "TER\n", pdbfile.close() rmsd = 0.40 * exp(1.87*(1-(identical/total_model))) identical = (identical/total_model) return (total_model,identical,rmsd)
3,104
def get_amati_relationship(value='o'): """ Return the Amati relationship and it's 1 sigma dispersion as given by Tsutsui et al. (2009). :param value: a string that can be 'o', '+', or '-'. The default is set to 'o' for the actual Amati relationship. '+' gives the upper bound of uncertainty and '-' gives the lower bound of uncertainty. :return: returns arrays of the a and y values of the amati relation/ error in the relation """ #plot the amati relation given by: #http://iopscience.iop.org/article/10.1088/1475-7516/2009/08/015/pdf x=np.linspace(-3,3,100) #log(E_iso/10**52), for caluclation of E_p, add 52 to x @ end to get back normal values if value=='o': y=(1/2.01)*(x+3.87) #y is log(E_p/1keV) elif value=='+': y=(1/(2.01))*(x+(3.87+0.33)) elif value=='-': y=(1/(2.01))*(x+(3.87-0.33)) else: print('This isnt a correct option for value\n') return 1e52*10**x,10**y
3,105
def load(name, final=False, torch=False, prune_dist=None): """ Returns the requested dataset. :param name: One of the available datasets :param final: Loads the test/train split instead of the validation train split. In this case the training data consists of both training and validation. :return: A pair (triples, meta). `triples` is a numpy 2d array of datatype uint32 contianing integer-encoded triples. `meta` is an object of metadata containing the following fields: * e: The number of entities * r: The number of relations * i2r: """ if name == 'micro': return micro(final, torch) # -- a miniature dataset for unit testing if name in ['aifb', 'am1k', 'amplus', 'dblp', 'mdgenre', 'mdgender', 'dmgfull', 'dmg777k']: tic() data = Data(here(f'../datasets/{name}'), final=final, use_torch=torch) print(f'loaded data {name} ({toc():.4}s).') else: raise Exception(f'Dataset {name} not recognized.') if prune_dist is not None: tic() data = prune(data, n=prune_dist) print(f'pruned ({toc():.4}s).') return data
3,106
def is_paragraph_debian_packaging(paragraph): """ Return True if the `paragraph` is a CopyrightFilesParagraph that applies only to the Debian packaging """ return isinstance( paragraph, CopyrightFilesParagraph ) and paragraph.files.values == ['debian/*']
3,107
def update_b(b, action_prob, yr_val, predict_mode): """Update new shape parameters b using the regression and classification output. Args: b: current shape parameters values. [num_examples, num_shape_params]. action_prob: classification output. [num_actions]=[num_examples, 2*num_shape_params] yr_val: values of db to regress. yr=b-b_gt. [num_examples, num_shape_params] predict_mode: 0: Hard classification. Move regressed distance only in the direction with maximum probability. 1: Soft classification. Multiply classification probabilities with regressed distances. 2: Regression only. 3: Classification only. Returns: b_new: new b after update. [num_examples, num_shape_params] """ if predict_mode == 0: # Hard classification. Move regressed distance only in the direction with maximum probability. ind = np.argmax(np.amax(np.reshape(action_prob, (b.shape[0], b.shape[1], 2)), axis=2), axis=1) # ind = [num_examples] row_ind = np.arange(b.shape[0]) b[row_ind, ind] = b[row_ind, ind] - yr_val[row_ind, ind] elif predict_mode == 1: # Soft classification. Multiply classification probabilities with regressed distances. b = b - yr_val * np.amax(np.reshape(action_prob, (b.shape[0], b.shape[1], 2)), axis=2) elif predict_mode == 2: # Regression only. b = b - yr_val elif predict_mode == 3: # Classification only step = 1 action_prob_reshape = np.reshape(action_prob, (b.shape[0], b.shape[1], 2)) ind = np.argmax(np.amax(action_prob_reshape, axis=2), axis=1) # ind=[num_examples] row_ind = np.arange(b.shape[0]) is_negative = np.argmax(action_prob_reshape[row_ind, ind], axix=1) # is_negative=[num_examples] # Move b in either positive or negative direction b[row_ind[is_negative], ind[is_negative]] = b[row_ind[is_negative], ind[is_negative]] + step b[row_ind[np.logical_not(is_negative)], ind[np.logical_not(is_negative)]] = b[row_ind[np.logical_not(is_negative)], ind[np.logical_not(is_negative)]] - step return b
3,108
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_config(args, cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.merge_from_list(['MODEL.BUA.EXTRACT_FEATS',True]) cfg.merge_from_list(switch_extract_mode(args.extract_mode)) cfg.merge_from_list(set_min_max_boxes(args.min_max_boxes, args.mode)) cfg.freeze() default_setup(cfg, args) return cfg
3,109
def _earth_distance(time='now'): """ Return the distance between the Sun and the Earth at a specified time. Parameters ---------- time : {parse_time_types} Time to use in a parse_time-compatible format Returns ------- out : `~astropy.coordinates.Distance` The Sun-Earth distance """ return get_earth(time).radius
3,110
def test_pixel_sum_2D(model_class, mode): """ Test if the sum of all pixels corresponds nearly to the integral. """ if model_class == Box2D and mode == "center": pytest.skip("Non integrating mode. Skip integral test.") parameters = models_2D[model_class] model = create_model(model_class, parameters) values = discretize_model(model, models_2D[model_class]['x_lim'], models_2D[model_class]['y_lim'], mode=mode) assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001)
3,111
async def DELETE_Link(request): """HTTP method to delete a link""" log.request(request) app = request.app group_id = request.match_info.get('id') if not group_id: msg = "Missing group id" log.warn(msg) raise HTTPBadRequest(reason=msg) if not isValidUuid(group_id, obj_class="Group"): msg = f"Invalid group id: {group_id}" log.warn(msg) raise HTTPBadRequest(reason=msg) link_title = request.match_info.get('title') validateLinkName(link_title) username, pswd = getUserPasswordFromRequest(request) await validateUserPassword(app, username, pswd) domain = getDomainFromRequest(request) if not isValidDomain(domain): msg = f"domain: {domain}" log.warn(msg) raise HTTPBadRequest(reason=msg) bucket = getBucketForDomain(domain) await validateAction(app, domain, group_id, username, "delete") req = getDataNodeUrl(app, group_id) req += "/groups/" + group_id + "/links/" + link_title params = {} if bucket: params["bucket"] = bucket rsp_json = await http_delete(app, req, params=params) resp = await jsonResponse(request, rsp_json) log.response(request, resp=resp) return resp
3,112
def addBookRec(title, author, releaseDate, releasePlace, pages, ISBN): """ creates a new record in the database """ con = sqlite3.connect("library.db") cur = con.cursor() cur.execute("INSERT INTO book VALUES (NULL, ?, ?, ?, ?, ?, ?)", (title, author, releaseDate, releasePlace, pages, ISBN)) con.commit() con.close()
3,113
def test__additional_sign_plan__create_with_content_id(admin_user): """ Test that AdditionalSignPlan API endpoint POST request raises an error if any of the content instances have a id defined. Pre-existing content instances can not be assigned for newly created additional signs. """ client = get_api_client(user=get_user(admin=admin_user)) tsp = get_traffic_sign_plan() dt = get_traffic_control_device_type() ascp = get_additional_sign_content_plan(device_type=dt) data = { "parent": tsp.pk, "location": str(tsp.location), "owner": get_owner().pk, "content": [ { "id": str(ascp.pk), "text": "Test content", "order": 1, "device_type": str(dt.pk), } ], } response = client.post(reverse("v1:additionalsignplan-list"), data=data) response_data = response.json() asp = AdditionalSignPlan.objects.exclude(pk=ascp.parent.pk).first() if admin_user: assert response.status_code == status.HTTP_400_BAD_REQUEST assert response_data == { "content": [ { "id": [ ( "Creating new additional sign with pre-existing " "content instance is not allowed. Content objects " 'must not have "id" defined.' ) ] } ] } assert not asp else: assert response.status_code == status.HTTP_403_FORBIDDEN assert not asp assert AdditionalSignContentPlan.objects.count() == 1
3,114
def format_image(image): """ Function to format frame """ if len(image.shape) > 2 and image.shape[2] == 3: # determine whether the image is color image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: # Image read from buffer image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE) cascade_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') faces = cascade_classifier.detectMultiScale(image,scaleFactor = 1.3 ,minNeighbors = 5) if not len(faces) > 0: return None # initialize the first face as having maximum area, then find the one with max_area max_area_face = faces[0] for face in faces: if face[2] * face[3] > max_area_face[2] * max_area_face[3]: max_area_face = face face = max_area_face # extract ROI of face image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])] try: # resize the image so that it can be passed to the neural network image = cv2.resize(image, (48,48), interpolation = cv2.INTER_CUBIC) / 255. except Exception: print("----->Problem during resize") return None return image
3,115
def cpu_stats(): """Return various CPU stats as a named tuple.""" ctx_switches, interrupts, syscalls, traps = cext.cpu_stats() soft_interrupts = 0 return _common.scpustats(ctx_switches, interrupts, soft_interrupts, syscalls)
3,116
def chi_squared(source_frequency, target_frequency): """Calculate the Chi Squared statistic by comparing ``source_frequency`` with ``target_frequency``. Example: >>> chi_squared({'a': 2, 'b': 3}, {'a': 1, 'b': 2}) 0.1 Args: source_frequency (dict): Frequency map of the text you are analyzing target_frequency (dict): Frequency map of the target language to compare with Returns: Decimal value of the chi-squared statistic """ # Ignore any symbols from source that are not in target. # TODO: raise Error if source_len is 0? target_prob = frequency_to_probability(target_frequency) source_len = sum(v for k, v in source_frequency.items() if k in target_frequency) result = 0 for symbol, prob in target_prob.items(): symbol_frequency = source_frequency.get(symbol, 0) # Frequecy is 0 if it doesnt appear in source result += _calculate_chi_squared(symbol_frequency, prob, source_len) return result
3,117
def has_reacted(comment, user, reaction): """ Returns whether a user has reacted with a particular reaction on a comment or not. """ if user.is_authenticated: reaction_type = getattr(ReactionInstance.ReactionType, reaction.upper(), None) if not reaction_type: raise template.TemplateSyntaxError(ReactionError.TYPE_INVALID.format(reaction_type=reaction)) return ReactionInstance.objects.filter( user=user, reaction_type=reaction_type.value, reaction__comment=comment ).exists() return False
3,118
def u(debug_print: str, debug_from: str = None, end: bool = False): """ Updates the colourised string of the terminal with debug_print. If end is True the line is ended Parameters ---------- debug_print: str Test to be used updated the terminal with. debug_from: str, optional To specify where debug text is being called from. end: str, optional If true the line is ended. """ if debug_from is None: print_text = termcolor("[DEBUG]\t\t", 'blue') + debug_print else: print_text = termcolor("[DEBUG]\t\t", 'blue') + termcolor(debug_from + ":\t", 'green') + debug_print if not end: print('\r' + print_text, end='', flush=True) else: print('\r' + print_text, flush=True) # sys.stdout.write('\r' + print_text) # sys.stdout.flush()
3,119
def structure_query(compound, label='pyclassyfire'): """Submit a compound information to the ClassyFire service for evaluation and receive a id which can be used to used to collect results :param compound: The compound structures as line delimited inchikey or smiles. Optionally a tab-separated id may be prepended for each structure. :type compound: str :param label: A label for the query :type label: :return: A query ID number :rtype: int >>> structure_query('CCC', 'smiles_test') >>> structure_query('InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)') """ r = requests.post(url + '/queries.json', data='{"label": "%s", ' '"query_input": "%s", "query_type": "STRUCTURE"}' % (label, compound), headers={"Content-Type": "application/json"}) r.raise_for_status() return r.json()['id']
3,120
def test_colored_svg_cache(qtbot): """Make sure we're not recreating icons.""" icon1 = QColoredSVGIcon.from_resources('new_points') icon2 = QColoredSVGIcon.from_resources('new_points') assert icon1 is icon2 assert icon1.colored('red') is icon2.colored('red')
3,121
def ParseExistingMessageIntoMessage(message, existing_message, method): """Sets fields in message based on an existing message. This function is used for get-modify-update pattern. The request type of update requests would be either the same as the response type of get requests or one field inside the request would be the same as the get response. For example: 1) update.request_type_name = ServiceAccount get.response_type_name = ServiceAccount 2) update.request_type_name = updateInstanceRequest updateInstanceRequest.instance = Instance get.response_type_name = Instance If the existing message has the same type as the message to be sent for the request, then return the existing message instead. If they are different, find the field in the message which has the same type as existing_message, then assign exsiting message to that field. Args: message: the apitools message to construct a new request. existing_message: the exsting apitools message returned from server. method: APIMethod, the method to generate request for. Returns: A modified apitools message to be send to the method. """ if type(existing_message) == type(message): # pylint: disable=unidiomatic-typecheck return existing_message # For read-modify-update api calls, the field name would be the same level # or the next level of the request. # TODO(b/111069150): refactor this part, don't hard code. existing_message_name = type(existing_message).__name__ field_name = existing_message_name[0].lower() + existing_message_name[1:] field_path = '' if method.request_field != field_name: field_path += method.request_field field_path += '.' field_path += field_name SetFieldInMessage(message, field_path, existing_message) return message
3,122
def disable_logger(name: str): """Disable the logger with the given name.""" null = logging.NullHandler() null.setLevel(logging.DEBUG) logger = logging.getLogger(name) logger.addHandler(null) logger.propagate = False
3,123
def create(*, db_session, ticket_in: TicketCreate) -> Ticket: """Creates a new ticket.""" ticket = Ticket(**ticket_in.dict()) db_session.add(ticket) db_session.commit() return ticket
3,124
def error(msg: str) -> None: """Equivalent to ``log(msg, level=logging.ERROR)``. Args: msg: A message to log. """ log(msg, level=logging.ERROR)
3,125
def ls_volume(path): """Lists you files in a volume. Example: \b roro volume:ls <volume_name> lists all files in volume "volume_name" \b roro volume:ls <volume_name:dir> lists all filies at directory "dir" in volume "volume" """ path = path+':' if ':' not in path else path path = Path(path) project = projects.current_project() stat = project.ls(path) rows = [[item['mode'], item['size'], item['name']] for item in stat] click.echo(tabulate(rows, tablefmt='plain'))
3,126
def blur(img): """ :param img: SimpleImage, an original image. :return: img: SimpleImage, image with blurred effect. """ blank_img = SimpleImage.blank(img.width, img.height) for y in range(img.height): for x in range(img.width): blurred = blank_img.get_pixel(x, y) if x == 0 and y == 0: """ For 4 corners. The new RGB values of original pixel is the average RGB values of the original pixel and the other pixels around it. """ avg_red1 = (img.get_pixel(x, y).red + img.get_pixel(x + 1, y).red + img.get_pixel(x, y + 1).red + img.get_pixel(x + 1, y + 1).red) / 4 avg_green1 = (img.get_pixel(x, y).green + img.get_pixel(x + 1, y).green + img.get_pixel(x, y + 1).green + img.get_pixel(x + 1, y + 1).green) / 4 avg_blue1 = (img.get_pixel(x, y).blue + img.get_pixel(x + 1, y).blue + img.get_pixel(x, y + 1).blue + img.get_pixel(x + 1, y + 1).blue) / 4 blurred.red = avg_red1 blurred.green = avg_green1 blurred.blue = avg_blue1 elif x == 0 and y == blank_img.height - 1: avg_red2 = (img.get_pixel(x, y).red + img.get_pixel(x, y - 1).red + img.get_pixel(x + 1, y - 1).red + img.get_pixel(x + 1, y).red) / 4 avg_green2 = (img.get_pixel(x, y).green + img.get_pixel(x, y - 1).green + img.get_pixel(x + 1, y - 1).green + img.get_pixel(x + 1, y).green) / 4 avg_blue2 = (img.get_pixel(x, y).blue + img.get_pixel(x, y - 1).blue + img.get_pixel(x + 1, y - 1).blue + img.get_pixel(x + 1, y).blue) / 4 blurred.red = avg_red2 blurred.green = avg_green2 blurred.blue = avg_blue2 elif x == blank_img.width - 1 and y == 0: avg_red3 = (img.get_pixel(x, y).red + img.get_pixel(x - 1, y).red + img.get_pixel(x - 1, y + 1).red + img.get_pixel(x, y + 1).red) / 4 avg_green3 = (img.get_pixel(x, y).green + img.get_pixel(x - 1, y).green + img.get_pixel(x - 1, y + 1).green + img.get_pixel(x, y + 1).green) / 4 avg_blue3 = (img.get_pixel(x, y).blue + img.get_pixel(x - 1, y).blue + img.get_pixel(x - 1, y + 1).blue + img.get_pixel(x, y + 1).blue) / 4 blurred.red = avg_red3 blurred.green = avg_green3 blurred.blue = avg_blue3 elif x == blank_img.width - 1 and y == blank_img.height - 1: avg_red4 = (img.get_pixel(x, y).red + img.get_pixel(x, y - 1).red + img.get_pixel(x - 1, y - 1).red + img.get_pixel(x - 1, y).red) / 4 avg_green4 = (img.get_pixel(x, y).green + img.get_pixel(x, y - 1).green + img.get_pixel(x - 1, y - 1).green + img.get_pixel(x - 1, y).green) / 4 avg_blue4 = (img.get_pixel(x, y).blue + img.get_pixel(x, y - 1).blue + img.get_pixel(x - 1, y - 1).blue + img.get_pixel(x - 1, y).blue) / 4 blurred.red = avg_red4 blurred.green = avg_green4 blurred.blue = avg_blue4 elif x == 0 and 0 < y < blank_img.height - 1: """ For 4 edges. The new RGB values of original pixel is the average RGB values of the original pixel and the other pixels around it. """ avg_red5 = (img.get_pixel(x, y).red + img.get_pixel(x, y - 1).red + img.get_pixel(x + 1, y - 1).red + img.get_pixel(x + 1, y).red + img.get_pixel(x + 1, y + 1).red + img.get_pixel(x, y + 1).red) / 5 avg_green5 = (img.get_pixel(x, y).green + img.get_pixel(x, y - 1).green + img.get_pixel(x + 1, y - 1).green + img.get_pixel(x + 1, y).green + img.get_pixel(x + 1, y + 1).green + img.get_pixel(x, y + 1).green) / 5 avg_blue5 = (img.get_pixel(x, y).blue + img.get_pixel(x, y - 1).blue + img.get_pixel(x + 1, y - 1).blue + img.get_pixel(x + 1, y).blue + img.get_pixel(x + 1, y + 1).blue + img.get_pixel(x, y + 1).blue) / 5 blurred.red = avg_red5 blurred.green = avg_green5 blurred.blue = avg_blue5 elif x == blank_img.width - 1 and 0 < y < blank_img.height - 1: avg_red6 = (img.get_pixel(x, y).red + img.get_pixel(x, y - 1).red + img.get_pixel(x - 1, y - 1).red + img.get_pixel(x - 1, y).red + img.get_pixel(x - 1, y + 1).red + img.get_pixel(x, y + 1).red) / 6 avg_green6 = (img.get_pixel(x, y).green + img.get_pixel(x, y - 1).green + img.get_pixel(x - 1, y - 1).green + img.get_pixel(x - 1, y).green + img.get_pixel(x - 1, y + 1).green + img.get_pixel(x, y + 1).green) / 6 avg_blue6 = (img.get_pixel(x, y).blue + img.get_pixel(x, y - 1).blue + img.get_pixel(x - 1, y - 1).blue + img.get_pixel(x - 1, y).blue + img.get_pixel(x - 1, y + 1).blue + img.get_pixel(x, y + 1).blue) / 6 blurred.red = avg_red6 blurred.green = avg_green6 blurred.blue = avg_blue6 elif y == 0 and 0 < x < blank_img.width - 1: avg_red7 = (img.get_pixel(x, y).red + img.get_pixel(x - 1, y).red + img.get_pixel(x - 1, y + 1).red + img.get_pixel(x, y + 1).red + img.get_pixel(x + 1, y + 1).red + img.get_pixel(x + 1, y).red) / 6 avg_green7 = (img.get_pixel(x, y).green + img.get_pixel(x - 1, y).green + img.get_pixel(x - 1, y + 1).green + img.get_pixel(x, y + 1).green + img.get_pixel(x + 1, y + 1).green + img.get_pixel(x + 1, y).green) / 6 avg_blue7 = (img.get_pixel(x, y).blue + img.get_pixel(x - 1, y).blue + img.get_pixel(x - 1, y + 1).blue + img.get_pixel(x, y + 1).blue + img.get_pixel(x + 1, y + 1).blue + img.get_pixel(x + 1, y).blue) / 6 blurred.red = avg_red7 blurred.green = avg_green7 blurred.blue = avg_blue7 elif y == blank_img.height - 1 and 0 < x < blank_img.width - 1: avg_red8 = (img.get_pixel(x, y).red + img.get_pixel(x - 1, y).red + img.get_pixel(x - 1, y - 1).red + img.get_pixel(x, y - 1).red + img.get_pixel(x + 1, y - 1).red + img.get_pixel(x + 1, y).red) / 6 avg_green8 = (img.get_pixel(x, y).green + img.get_pixel(x - 1, y).green + img.get_pixel(x - 1, y - 1).green + img.get_pixel(x, y - 1).green + img.get_pixel(x + 1, y - 1).green + img.get_pixel(x + 1, y).green) / 6 avg_blue8 = (img.get_pixel(x, y).blue + img.get_pixel(x - 1, y).blue + img.get_pixel(x - 1, y - 1).blue + img.get_pixel(x, y - 1).blue + img.get_pixel(x + 1, y - 1).blue + img.get_pixel(x + 1, y).blue) / 6 blurred.red = avg_red8 blurred.green = avg_green8 blurred.blue = avg_blue8 else: """ For other area except the corners and edges. The new RGB values of original pixel is the average RGB values of the other pixels around it. """ avg_red9 = (img.get_pixel(x, y).red + img.get_pixel(x - 1, y).red + img.get_pixel(x + 1, y).red + img.get_pixel(x - 1, y - 1).red + img.get_pixel(x, y - 1).red + img.get_pixel(x + 1, y - 1).red + img.get_pixel(x - 1, y + 1).red + img.get_pixel(x, y + 1).red + img.get_pixel(x + 1, y + 1).red) / 9 avg_green9 = (img.get_pixel(x, y).green + img.get_pixel(x - 1, y).green + img.get_pixel(x + 1, y).green + img.get_pixel(x - 1, y - 1).green + img.get_pixel(x, y - 1).green + img.get_pixel(x + 1, y - 1).green + img.get_pixel(x - 1, y + 1).green + img.get_pixel(x, y + 1).green + img.get_pixel(x + 1, y + 1).red) / 9 avg_blue9 = (img.get_pixel(x, y).blue + img.get_pixel(x - 1, y).blue + img.get_pixel(x + 1, y).blue + img.get_pixel(x - 1, y - 1).blue + img.get_pixel(x, y - 1).blue + img.get_pixel(x + 1, y - 1).blue + img.get_pixel(x - 1, y + 1).blue + img.get_pixel(x, y + 1).blue + img.get_pixel(x + 1, y + 1).blue) / 9 blurred.red = avg_red9 blurred.green = avg_green9 blurred.blue = avg_blue9 return blank_img
3,127
def trans_pressure(src, dest="bar"): """ >>> """ return trans_basic_unit(src, dest, "pressure")
3,128
def run_cmd(command: list) -> None: """Run `command` using `subprocess.Popen()`.""" show_info(f"Command: {' '.join(command)}") if DRY_RUN: show_info("Dry run mode enabled - won't run") else: try: proc = subprocess.Popen(command, stdout=subprocess.PIPE) stdout = proc.communicate()[0] except Exception as exc: show_error(exc, exit=1) finally: return stdout.decode("utf-8").rstrip("\n")
3,129
def test_accelerated_bypass_method_against_old(c_ctrl_rr): """Confirm that my changes to the bypass method maintain the same result as the old method""" OLD_HTCONSTS = dassh.region_rodded.calculate_ht_constants(c_ctrl_rr) def _calc_coolant_byp_temp_old(self, dz): """Calculate the coolant temperatures in the assembly bypass channels at the axial level j+1 Parameters ---------- self : DASSH RoddedRegion object dz : float Axial step size (m) Notes ----- The coolant in the bypass channels is assumed to get no power from neutron/gamma heating (that contribution to coolant in the assembly interior is already small enough). """ # Calculate the change in temperature in each subchannel dT = np.zeros((self.n_bypass, self.subchannel.n_sc['bypass']['total'])) # self._update_coolant_byp_params(self.avg_coolant_byp_temp) for i in range(self.n_bypass): # This factor is in many terms; technically, the mass flow # rate is already accounted for in constants defined earlier # mCp = self.coolant.heat_capacity # starting index to lookup type is after all interior # coolant channels and all preceding duct and bypass # channels start = (self.subchannel.n_sc['coolant']['total'] + self.subchannel.n_sc['duct']['total'] + i * self.subchannel.n_sc['bypass']['total'] + i * self.subchannel.n_sc['duct']['total']) # end = start + self.subchannel.n_sc['bypass']['total'] for sci in range(0, self.subchannel.n_sc['bypass']['total']): # The value of sci is the PYTHON indexing # type_i = self.subchannel.type[sci + start] - 1 type_i = self.subchannel.type[sci + start] # Heat transfer to/from adjacent subchannels for adj in self.subchannel.sc_adj[sci + start]: # if adj == 0: if adj == -1: continue # type_a = self.subchannel.type[adj - 1] - 1 type_a = self.subchannel.type[adj] # Convection to/from duct wall # if type_a in [3, 4]: if 3 <= type_a <= 4: if sci + start > adj: # INTERIOR adjacent duct wall byp_conv_const = \ OLD_HTCONSTS[type_i][type_a][i][0] byp_conv_dT = \ (self.temp['duct_surf'][i, 1, sci] - self.temp['coolant_byp'][i, sci]) else: # EXTERIOR adjacent duct wall byp_conv_const = \ OLD_HTCONSTS[type_i][type_a][i][1] byp_conv_dT = \ (self.temp['duct_surf'][i + 1, 0, sci] - self.temp['coolant_byp'][i, sci]) dT[i, sci] += \ (self.coolant_byp_params['htc'][i, type_i - 5] * dz * byp_conv_const * byp_conv_dT / self.coolant.heat_capacity) # Conduction to/from adjacent coolant subchannels else: # sc_adj = adj - start - 1 sc_adj = adj - start dT[i, sci] += \ (self.coolant.thermal_conductivity * dz * OLD_HTCONSTS[type_i][type_a][i] * (self.temp['coolant_byp'][i, sc_adj] - self.temp['coolant_byp'][i, sci]) / self.coolant.heat_capacity) return dT dT = np.zeros(c_ctrl_rr.temp['coolant_byp'].shape) dT_old = dT.copy() dz = 0.01 start_temp = 623.15 for i in range(50): duct_surf_temp = \ (np.random.random(c_ctrl_rr.temp['duct_surf'].shape) + (start_temp + i * 1.0)) c_ctrl_rr.temp['duct_surf'] = duct_surf_temp dT_old += _calc_coolant_byp_temp_old(c_ctrl_rr, dz) dT += c_ctrl_rr._calc_coolant_byp_temp(dz) print(np.average(dT)) print(np.average(dT_old)) print('max abs diff: ', np.max(np.abs(dT - dT_old))) assert np.allclose(dT, dT_old)
3,130
def retrieve_config(): # TODO: is this being used? """Retrieve configuration data. Args: None Returns: dict: The dictionary with configuration settings """ config = {} # go 2 layer up util_path = Path(__file__).parents[3] config_path = util_path / 'configuration' / 'configuration-aml.variables.yml' config = read_config_file(config_path) return config['variables']
3,131
def vraec18(pretrained=False, **kwargs): """Constructs a _ResAE-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = _VRAEC(_VariationalBasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: try: model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False) except Exception as exp: logging.warning(exp) return model
3,132
def encode_zip(data): """Zip-compress data. Implies base64 encoding of zip data.""" zipped = zlib.compress(data) return encode_b64(zipped)
3,133
def create_class_mask(img, color_map, is_normalized_img=True, is_normalized_map=False, show_masks=False): """ Function to create C matrices from the segmented image, where each of the C matrices is for one class with all ones at the pixel positions where that class is present img = The segmented image color_map = A list with tuples that contains all the RGB values for each color that represents some class in that image is_normalized_img = Boolean - Whether the image is normalized or not If normalized, then the image is multiplied with 255 is_normalized_map = Boolean - Represents whether the color map is normalized or not, if so then the color map values are multiplied with 255 show_masks = Wherether to show the created masks or not """ if is_normalized_img and (not is_normalized_map): img *= 255 if is_normalized_map and (not is_normalized_img): img = img / 255 mask = [] hw_tuple = img.shape[:-1] for color in color_map: color_img = [] for idx in range(3): color_img.append(np.ones(hw_tuple) * color[idx]) color_img = np.array(color_img, dtype=np.uint8).transpose(1, 2, 0) mask.append(np.uint8((color_img == img).sum(axis = -1) == 3)) return np.array(mask)
3,134
def reconstruct_grid(mask, ds_dl): """ Reconstruction of 2d grid. Args: mask (ndarray): land mask used. ds_dl (ndarray): trained model prediction. """ landmask = np.argwhere(np.isnan(mask)) empty = np.zeros((ds_dl.shape[0], mask.shape[0], mask.shape[1])) counter = 0 for i, j in itertools.product(list(range(mask.shape[0])),list(range(mask.shape[1]))): if np.argwhere(np.logical_and(np.isin(landmask[:,0], i), np.isin(landmask[:,1], j))).shape[0] > 0: empty[:, i, j] = np.nan else: empty[:, i, j] = ds_dl[:, counter] counter += 1 return empty
3,135
def conv_kernel_initializer(shape, dtype=None): """卷积核初始化 和 tf.variance_scaling_initializer最大不同之处就是在于,tf.variance_scaling_initializer 使用的是 truncated norm, 但是却具有未校正的标准偏差,而这里使用正态分布。类似地,tf.initializers.variance_scaling使用带有校正后的标准偏差。 Args: shape: 卷积核的shape dtype: 卷积核的dtype Returns: 经过初始化后的卷积核 """ kernel_height, kernel_width, input_filters, out_filters = shape fan_out = int(kernel_height * kernel_width * out_filters) return tf.random.normal(shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
3,136
def pytest_addoption(parser): """Add support for the RP-related options. :param parser: Object of the Parser class """ group = parser.getgroup('reporting') group.addoption( '--rp-launch', action='store', dest='rp_launch', help='Launch name (overrides rp_launch config option)') group.addoption( '--rp-launch-id', action='store', dest='rp_launch_id', help='Use already existing launch-id. The plugin won\'t control the ' 'Launch status (overrides rp_launch_id config option)') group.addoption( '--rp-launch-description', action='store', dest='rp_launch_description', help='Launch description (overrides ' 'rp_launch_description config option)') group.addoption( '--rp-rerun', action='store_true', dest='rp_rerun', help='Marks the launch as the rerun') group.addoption( '--rp-rerun-of', action='store', dest='rp_rerun_of', help='ID of the launch to be marked as a rerun ' '(use only with rp_rerun=True)') group.addoption( '--rp-parent-item-id', action='store', dest='rp_parent_item_id', help='Create all test item as child items of the given ' '(already existing) item.') group.addoption( '--rp-project', action='store', dest='rp_project', help='Sets rp_project from command line' ) group.addoption( '--reportportal', action='store_true', dest='rp_enabled', default=False, help='Enable ReportPortal plugin' ) group.addoption( '--rp-log-level', dest='rp_log_level', default=None, help='Logging level for automated log records reporting' ) parser.addini( 'rp_log_level', default=None, help='Logging level for automated log records reporting' ) parser.addini( 'rp_uuid', help='UUID') parser.addini( 'rp_endpoint', help='Server endpoint') parser.addini( 'rp_project', help='Project name') parser.addini( 'rp_launch', default='Pytest Launch', help='Launch name') parser.addini( 'rp_launch_id', default=None, help='Use already existing launch-id. The plugin won\'t control ' 'the Launch status') parser.addini( 'rp_launch_attributes', type='args', help='Launch attributes, i.e Performance Regression') parser.addini( 'rp_tests_attributes', type='args', help='Attributes for all tests items, e.g. Smoke') parser.addini( 'rp_launch_description', default='', help='Launch description') parser.addini( 'rp_log_batch_size', default='20', help='Size of batch log requests in async mode') parser.addini( 'rp_ignore_errors', default=False, type='bool', help='Ignore Report Portal errors (exit otherwise)') parser.addini( 'rp_ignore_attributes', type='args', help='Ignore specified pytest markers, i.e parametrize') parser.addini( 'rp_hierarchy_dirs_level', default=0, help='Directory starting hierarchy level') parser.addini( 'rp_hierarchy_dirs', default=False, type='bool', help='Enables hierarchy for directories') parser.addini( 'rp_hierarchy_module', default=True, type='bool', help='Enables hierarchy for module') parser.addini( 'rp_hierarchy_class', default=True, type='bool', help='Enables hierarchy for class') parser.addini( 'rp_hierarchy_parametrize', default=False, type='bool', help='Enables hierarchy for parametrized tests') parser.addini( 'rp_issue_marks', type='args', default='', help='Pytest marks to get issue information') parser.addini( 'rp_issue_system_url', default='', help='URL to get issue description. Issue id ' 'from pytest mark will be added to this URL') parser.addini( 'rp_verify_ssl', default=True, type='bool', help='Verify HTTPS calls') parser.addini( 'rp_display_suite_test_file', default=True, type='bool', help="In case of True, include the suite's relative" " file path in the launch name as a convention of " "'<RELATIVE_FILE_PATH>::<SUITE_NAME>'. " "In case of False, set the launch name to be the suite name " "only - this flag is relevant only when" " 'rp_hierarchy_module' flag is set to False") parser.addini( 'rp_issue_id_marks', type='bool', default=True, help='Adding tag with issue id to the test') parser.addini( 'rp_parent_item_id', default=None, help="Create all test item as child items of the given " "(already existing) item.") parser.addini( 'retries', default='0', help='Amount of retries for performing REST calls to RP server') parser.addini( 'rp_rerun', default=False, help='Marks the launch as the rerun') parser.addini( 'rp_rerun_of', default='', help='ID of the launch to be marked as a rerun ' '(use only with rp_rerun=True)')
3,137
def prediction_func(data, g_data, grid_search, param_list): """Function for using dataset to train a model and predicting prices for a generated data. Parameter search is done using RandomizedSearchCV since it is computationally more efficientcompared to GridSearchCV. In param_list, learning_rate, subsample and max_depth, min_child_weight, gamma and colsample_bytree can be included. Args: | data (pd.Dataframe): the dataset including house features and prices | g_data (pd.Dataframe): randomly generated house features for prediction purposes | grid_search (bool): indicates whether model is trained with parameter search(True) or use default values(False) | param_list (list): the list of parameters to be included in parameter search Returns: the predicted prices for houses in g_data (np.array) """ # Base Model xgb_reg = xgb.XGBRegressor(n_treads=-1) if grid_search: # Search for best parameters in model params = { "learning_rate": [i / 20 for i in range(1, 11)], "min_child_weight": [i for i in range(3, 12)], "gamma": [i / 10.0 for i in range(3, 8)], "subsample": [i / 10.0 for i in range(7, 11)], "colsample_bytree": [i / 10.0 for i in range(6, 11)], "max_depth": [i for i in range(3, 8)], } # Only includes selected parameters params = {key: params[key] for key in param_list} xgb_reg = RandomizedSearchCV( estimator=xgb_reg, param_distributions=params, n_iter=5, cv=3, random_state=23, iid=False, ) xgb_reg.fit(data.drop("price", axis=1), data.price) return xgb_reg.predict(g_data)
3,138
def unfreeze_params(module, frozen_params): """Unfreeze params Args: module (torch.nn.Module): frozen_params: a list/tuple of strings, which define all the patterns of interests """ for name, params in module.named_parameters(): for pattern in frozen_params: assert isinstance(pattern, str) if re.search(pattern, name): params.requires_grad = True # print('Params %s is unfrozen.' % name)
3,139
def thv_to_zxy(theta, h): """Convert coordinates from (theta, h, v) to (z, x, y) space.""" cos_p = np.cos(theta) sin_p = np.sin(theta) srcx = +RADIUS * cos_p - h * sin_p srcy = +RADIUS * sin_p + h * cos_p detx = -RADIUS * cos_p - h * sin_p dety = -RADIUS * sin_p + h * cos_p return srcx, srcy, detx, dety
3,140
def get_most_stale_file(logpath=DEFAULT_PATH): """ returns the filename of the file in the fileset that was least recently backed up and the time of the last backup """ oldest_name = "" oldest_date = datetime.max for fstat in get_fileset_statlist(): last_backup = datetime.strptime( get_last_upload_times(fstat[STAT_KEYS.SOURCE], n_times=1)[0], TIME_FORMAT ) if last_backup < oldest_date: oldest_date = last_backup oldest_name = fstat[STAT_KEYS.SOURCE] return oldest_name, oldest_date
3,141
def add_user_tweets(username,tweets_qty=LIGHT_QTY): """Add a new user and their Tweets, or else error""" try: twitter_user=TWITTER.get_user(username) db_user= User(id=twitter_user.id, name=username) DB.session.add(db_user) tweets = twitter_user.timeline(count=tweets_qty,include_rts=False, tweet_mode='extended') if tweets: db_user.newest_tweet_id = tweets[0].id db_user.followers = twitter_user.followers_count db_user.following = twitter_user.friends_count db_user.available_tweets = len(tweets) db_user.oldest_tweet_id = tweets[-1].id db_user.oldest_tweet = tweets[-1].created_at db_user.last_tweeted = tweets[0].created_at for tweet in tweets: #Calculate embedding on the full tweet embedding = BASILICA.embed_sentence(tweet.full_text, model='twitter') db_tweet = Tweet(id=tweet.id, text=tweet.full_text[:300], embedding=embedding,tweet_time=tweet.created_at) db_user.tweets.append(db_tweet) DB.session.add(db_tweet) else: raise Exception("Player doesn't exist in Twitter world") except Exception as e: print('Error processing {}: {}'.format(username,e)) raise e else: DB.session.commit()
3,142
def get_instances(context: models.Context) -> Mapping[str, Instance]: """Get a list of Instance matching the given context, indexed by instance id.""" instances: Dict[str, Instance] = {} if not apis.is_enabled(context.project_id, 'compute'): return instances gce_api = apis.get_api('compute', 'v1', context.project_id) requests = [ gce_api.instances().list(project=context.project_id, zone=zone) for zone in get_gce_zones(context.project_id) ] items = apis_utils.batch_list_all( api=gce_api, requests=requests, next_function=gce_api.instances().list_next, log_text=f'listing gce instances of project {context.project_id}') for i in items: result = re.match( r'https://www.googleapis.com/compute/v1/projects/[^/]+/zones/([^/]+)/', i['selfLink']) if not result: logging.error('instance %s selfLink didn\'t match regexp: %s', i['id'], i['selfLink']) continue zone = result.group(1) labels = i.get('labels', {}) if not context.match_project_resource(location=zone, labels=labels): continue instances[i['id']] = Instance(project_id=context.project_id, resource_data=i) return instances
3,143
def UploadChanges(): """Upload changes, don't prompt.""" # TODO(jfb) Using the commit queue and avoiding git try + manual commit # would be much nicer. See '--use-commit-queue' return ExecCommand(['git', 'cl', 'upload', '--send-mail', '-f'])
3,144
def svn_fs_delete_fs(*args): """svn_fs_delete_fs(char const * path, apr_pool_t pool) -> svn_error_t""" return _fs.svn_fs_delete_fs(*args)
3,145
def translate_output(_output, n_classes, is_binary_classification=False): """ Gets matrix with one hot encoding where the 1 represent index of class. Parameters ---------- _output : theano.tensor.matrix Output sample. n_classes : int Number of classes (or size of one hot encoding rows) is_binary_classification : bool This flag means that model is for binary classification. Returns ------- theano.tensor.matrix Returns one hot encoding. """ if is_binary_classification: return T.sgn(_output) else: return to_one_hot(T.argmax(_output, axis=-1), n_classes)
3,146
def get_agent_config_vars(): """ Read and parse config.ini """ if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, 'config.ini'))): config_parser = ConfigParser.SafeConfigParser() config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, 'config.ini'))) try: file_path = config_parser.get('agent', 'file_path') # filters filters_include = config_parser.get('agent', 'filters_include') filters_exclude = config_parser.get('agent', 'filters_exclude') # message parsing json_top_level = config_parser.get('agent', 'json_top_level') # 'Event' #project_field = config_parser.get('agent', 'project_field') instance_field = config_parser.get('agent', 'instance_field') # 'System.Computer' device_field = config_parser.get('agent', 'device_field') # 'System.Provider.@Name' timestamp_field = config_parser.get('agent', 'timestamp_field') or 'timestamp' # 'System.TimeCreated.@SystemTime' timestamp_format = config_parser.get('agent', 'timestamp_format', raw=True) or 'epoch' data_fields = config_parser.get('agent', 'data_fields') except ConfigParser.NoOptionError: logger.error('Agent not correctly configured. Check config file.') sys.exit(1) if len(file_path) != 0: file_regex = r".*\.evtx$|.*\.evt$" files = file_path.split(',') if len(files) > 1: # get evtx files and files within directories logger.debug(files) files = [ i for j in map(lambda k: get_file_list_for_directory(k, file_regex), files) for i in j if i] else: files = get_file_list_for_directory(files[0], file_regex) else: logger.warning('Agent not correctly configured (file_path). Check config file.') sys.exit(1) # filters if len(filters_include) != 0: filters_include = filters_include.split('|') if len(filters_exclude) != 0: filters_exclude = filters_exclude.split('|') if len(data_fields) != 0: data_fields = data_fields.split(',') # timestamp format timestamp_format = timestamp_format.partition('.')[0] if '%z' in timestamp_format or '%Z' in timestamp_format: ts_format_info = strip_tz_info(timestamp_format) else: ts_format_info = {'strip_tz': False, 'strip_tz_fmt': '', 'timestamp_format': timestamp_format} # add parsed variables to a global config_vars = { 'files': files, 'filters_include': filters_include, 'filters_exclude': filters_exclude, 'data_format': 'JSON', 'json_top_level': json_top_level, 'project_field': '', 'instance_field': instance_field, 'device_field': device_field, 'data_fields': data_fields, 'timestamp_field': timestamp_field, 'timestamp_format': ts_format_info['timestamp_format'], 'strip_tz': ts_format_info['strip_tz'], 'strip_tz_fmt': ts_format_info['strip_tz_fmt'] } return config_vars else: logger.warning('No config file found. Exiting...') exit()
3,147
def test_product_weight(self): """Test new product weight being 20.""" prod = Product('Test Product') self.assertEqual(prod.weight, 20)
3,148
def _sphere_point_to_uv(point: Point) -> Vec2d: """Convert a 3D point on the surface of the unit sphere into a (u, v) 2D point""" u = atan2(point.y, point.x) / (2.0 * pi) return Vec2d( u=u if u >= 0.0 else u + 1.0, v=acos(point.z) / pi, )
3,149
def generate_formula_dict(materials_store, query=None): """ Function that generates a nested dictionary of structures keyed first by formula and then by task_id using mongo aggregation pipelines Args: materials_store (Store): store of materials Returns: Nested dictionary keyed by formula-mp_id with structure values. """ props = ["pretty_formula", "structure", "task_id", "magnetic_type"] results = list(materials_store.groupby("pretty_formula", properties=props, criteria=query)) formula_dict = {} for result in tqdm.tqdm(results): formula = result['_id']['pretty_formula'] task_ids = [d['task_id'] for d in result['docs']] structures = [d['structure'] for d in result['docs']] formula_dict[formula] = dict(zip(task_ids, structures)) return formula_dict
3,150
def default_config(): """Provides a default configuration file location.""" return os.path.expanduser('~/.config/discogstagger/discogs_tagger.conf')
3,151
def truncate(wirevector_or_integer, bitwidth): """ Returns a wirevector or integer truncated to the specified bitwidth :param wirevector_or_integer: Either a wirevector or and integer to be truncated :param bitwidth: The length to which the first argument should be truncated. :return: Returns a tuncated wirevector or integer as appropriate This function truncates the most significant bits of the input, leaving a result that is only "bitwidth" bits wide. For integers this is performed with a simple bitmask of size "bitwidth". For wirevectors the function calls WireVector.truncate and returns a wirevector of the specified bitwidth. Examples: :: truncate(9,3) # returns 3 (0b101 truncates to 0b101) truncate(5,3) # returns 3 (0b1001 truncates to 0b001) truncate(-1,3) # returns 7 (-0b1 truncates to 0b111) y = truncate(x+1, x.bitwidth) # y.bitwdith will equal x.bitwidth """ if bitwidth < 1: raise PyrtlError('bitwidth must be a positive integer') x = wirevector_or_integer try: return x.truncate(bitwidth) except AttributeError: return x & ((1 << bitwidth)-1)
3,152
def async_add_entities_config(hass, config, async_add_entities): """Set up light for KNX platform configured within platform.""" import xknx group_address_tunable_white = None group_address_tunable_white_state = None group_address_color_temp = None group_address_color_temp_state = None if config[CONF_COLOR_TEMP_MODE] == ColorTempModes.absolute: group_address_color_temp = config.get(CONF_COLOR_TEMP_ADDRESS) group_address_color_temp_state = config.get(CONF_COLOR_TEMP_STATE_ADDRESS) elif config[CONF_COLOR_TEMP_MODE] == ColorTempModes.relative: group_address_tunable_white = config.get(CONF_COLOR_TEMP_ADDRESS) group_address_tunable_white_state = config.get(CONF_COLOR_TEMP_STATE_ADDRESS) light = xknx.devices.Light( hass.data[DATA_XKNX].xknx, name=config[CONF_NAME], group_address_switch=config[CONF_ADDRESS], group_address_switch_state=config.get(CONF_STATE_ADDRESS), group_address_brightness=config.get(CONF_BRIGHTNESS_ADDRESS), group_address_brightness_state=config.get(CONF_BRIGHTNESS_STATE_ADDRESS), group_address_color=config.get(CONF_COLOR_ADDRESS), group_address_color_state=config.get(CONF_COLOR_STATE_ADDRESS), group_address_rgbw=config.get(CONF_RGBW_ADDRESS), group_address_rgbw_state=config.get(CONF_RGBW_STATE_ADDRESS), group_address_tunable_white=group_address_tunable_white, group_address_tunable_white_state=group_address_tunable_white_state, group_address_color_temperature=group_address_color_temp, group_address_color_temperature_state=group_address_color_temp_state, min_kelvin=config[CONF_MIN_KELVIN], max_kelvin=config[CONF_MAX_KELVIN], ) hass.data[DATA_XKNX].xknx.devices.add(light) async_add_entities([KNXLight(light)])
3,153
def save_spectra_model(spectra_model,element = None,name = None,filepath = None): """Save a SpectraModel object to .hdf5 Parameters ---------- spectra_model : SpectraModel instance SpectraModel to be saved. element: str Element that is being modeled. For example: 'C' or 'Nb' or 'O'. This should be in line with what the instrument spits out. For instance, dont name a model for Nb, Niobium because then the model will be saved in the Niobium3d folder, and other functionality will not work since the folder names rely on the way in which the instrument exports the data. Perhaps it would be good to add a list of possible element names, but that is not included yet name : str Name of the model. This can be anything and will be saved in the appropriate folder. filepath: str The loation of the folder can be specified. However, this is not recommended because to fully use the package and the GUI add ons the models should all be saved in the saved_models folder in the appropriately named subfolders. """ if (name is None and filepath is None): raise NameError('You must either name the model or specify the filepath with the name as the last entry in the path') if (not name is None and element is None): raise NameError('You must specify the element. Ex: "C", "Nb" and "O"') if (not name is None and not filepath is None): raise NameError('You must choose either a name or a filepath') if filepath is None: model_folder = element+spectra_model.orbital fpath = os.path.join(cfg.package_location,'XPyS/saved_models',model_folder,name) else: fpath = filepath # model_to_hdf5(fpath,model,pars,pairlist,spectra_model.element_ctrl) _spectramodel_to_hdf5(spectra_model,fpath) print('Saved model to: ',fpath)
3,154
def pcoef(xte, yte, rle, x_cre, y_cre, d2ydx2_cre, th_cre, surface): # Docstrings """evaluate the PARSEC coefficients""" # Initialize coefficients coef = np.zeros(6) # 1st coefficient depends on surface (pressure or suction) if surface.startswith('p'): coef[0] = -sqrt(2*rle) else: coef[0] = sqrt(2*rle) # Form system of equations A = np.array([ [xte**1.5, xte**2.5, xte**3.5, xte**4.5, xte**5.5], [x_cre**1.5, x_cre**2.5, x_cre**3.5, x_cre**4.5, x_cre**5.5], [1.5*sqrt(xte), 2.5*xte**1.5, 3.5*xte**2.5, 4.5*xte**3.5, 5.5*xte**4.5], [1.5*sqrt(x_cre), 2.5*x_cre**1.5, 3.5*x_cre**2.5, 4.5*x_cre**3.5, 5.5*x_cre**4.5], [0.75*(1/sqrt(x_cre)), 3.75*sqrt(x_cre), 8.75*x_cre**1.5, 15.75*x_cre**2.5, 24.75*x_cre**3.5] ]) B = np.array([ [yte - coef[0]*sqrt(xte)], [y_cre - coef[0]*sqrt(x_cre)], [tan(th_cre*pi/180) - 0.5*coef[0]*(1/sqrt(xte))], [-0.5*coef[0]*(1/sqrt(x_cre))], [d2ydx2_cre + 0.25*coef[0]*x_cre**(-1.5)] ]) # Solve system of linear equations # X = np.linalg.solve(A,B) X = np.linalg.lstsq(A,B)[0] # Gather all coefficients coef[1:6] = X[0:5,0] # Return coefficients return coef
3,155
def adjust_learning_rate(optimizer, org_lr, epoch, schedule=[20, 40], decay_rate=0.1): """Decay the learning rate based on schedule""" lr = org_lr for milestone in schedule: lr *= decay_rate if epoch >= milestone else 1. print("---> learning rate is set to {}".format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr
3,156
def cpu_usage(self, max_cpu_percentage=80): """Limit max cpu usage """ if psutil.cpu_percent() < max_cpu_percentage: hevlog.logging.debug('[cpu usage] {}%'.format(psutil.cpu_percent())) return True else: hevlog.logging.debug('[cpu usage] {}%'.format(psutil.cpu_percent())) return False
3,157
def search_fromCSV(Data,holy_array, bookmark = 0): """ holy array: array of words that is going to be compared with Twitter's text data """ print("Initializing crawler") WINDOW_SIZE = "1920,1080" chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.add_argument("--window-size=%s" % WINDOW_SIZE) chrome_options.add_argument('--no-sandbox') driver = webdriver.Chrome(chrome_options=chrome_options) try: for l in range(bookmark,len(Data)): # Here we loop the automated query cycle term = Data["Company"].iloc[l] foundation = datetime.datetime.strptime(Data["FundDate"].iloc[l],"%Y-%m-%d").date() - datetime.timedelta(17+30*(month_before_funding-1)) bracket = foundation - datetime.timedelta(30) bracko = bracket.strftime("%Y-%m-%d") # Replacing special characters that will be in query if "&" in term: term = term.replace("&","%26") if "#" in term: term = term.replace("#","%23") if "\\" in term: term = term.replace("\\","/") driver.get( 'https://twitter.com/search?q=\"' + term+'\"' + '%20until%3A{}%20since%3A{}'.format(foundation,bracko) +'&src=typed_query&f=live' ) # There must be a second between queries, at least. try: WebDriverWait(driver, 2.5).until( trinity_condition() ) # What's this, it just continues?? except TimeoutException: #Add case when no internet driver.quit() return False #Tie to database to mark a problem try: ak = time.perf_counter() WebDriverWait(driver, 5).until( EC.presence_of_element_located((By.XPATH,"//div[@data-testid='emptyState']")) ) u = driver.find_element_by_xpath("//div[@data-testid='emptyState']") um = pd.Series(Data["tweets"]) um[l] = 0 Data["tweets"] = um links = {("EMPTY_RESULT",Data["Company"].iloc[l],None,None,None,bracket)} print("ID: {}".format(Data["ID"].iloc[l])) print(links) except TimeoutException: # Put except links=set() u = True # This will need tweaking too count_scrap = time.perf_counter() while u == True: # Reference /html/body/div/div/div/div[2]/main/div/div/div/div[1]/div/div[2]/div/div/section/div/div/div[x]/div/div/article time.sleep(0.2) if math.trunc(driver.execute_script("return document.body.scrollHeight;") - driver.execute_script("return document.documentElement.scrollTop;") - driver.execute_script("return window.innerHeight;")) == 0: try: WebDriverWait(driver, 1.5).until( EC.presence_of_element_located((By.XPATH,"//div[@role='progressbar']")) ) # Add here what happens when Twitter loads eternally except TimeoutException: u = False try: WebDriverWait(driver, 5).until_not( EC.presence_of_element_located((By.XPATH,"//div[@role='progressbar']")) ) except TimeoutException: um = pd.Series(Data["tweets"]) um[l] = 0 Data["tweets"] = um u = False links = links.union(extract(driver)) # u=False or break?? # Why is this here? Relocate to extract if 1756 + driver.execute_script("return document.documentElement.scrollTop;") >= driver.execute_script("return document.body.scrollHeight;"): driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") else: driver.execute_script("window.scrollTo(0, document.documentElement.scrollTop + 1756*2);") try: u = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/main/div/div/div/div/div/div[2]/div/div/div/div[2]/div/span/span") driver.quit() return False except: print("{} links found, word: {} (ID: {}) in {} seconds".format(len(links),term,Data["ID"].iloc[l],time.perf_counter()-count_scrap)) print(links) uk = time.perf_counter() print("Search of {} items takes {} seconds".format(len(links),uk-ak)) lengths = pd.Series(Data["tweets"]) lengths[l] = len(links) Data["tweets"] = lengths print(Data[["Company","tweets"]]) acceptable = [t>14 for t in Data["tweets"]] ouaga = acceptable.count(True) if l +1 - bookmark== 0: perc = 0 else: perc = ouaga*100/(l+1- bookmark) print("Valid companies: {} out of {}. ({}%), with an average amount of Tweets of {}".format(ouaga, l+1-bookmark, perc, np.mean(Data["tweets"].iloc[acceptable]))) # Don't connect to database if no tweets gathered stops = nltk.corpus.stopwords.words("english") def clean(tokens,stops): tokens = pd.Series(x for x in tokens if not x in stops) l = nltk.wordnet.WordNetLemmatizer() lemmatized = [] for word, tag in nltk.pos_tag(tokens): if tag.startswith('NN'): pos = 'n' elif tag.startswith('VB'): pos = 'v' else: pos = 'a' lemmatized.append(l.lemmatize(word, pos)) return pd.Series(lemmatized) def select(lista, holy_array): if (list(lista.values) == list(clean(nltk.tokenize.word_tokenize("TweetsNotFound", "english"),stops).values) or list(lista.values) == list(clean(nltk.tokenize.word_tokenize("EMPTY_RESULT", "english"),stops).values)): return [j for j in lista] else: return [j for j in lista if j in holy_array.values] storage.add_tweets([(str(" ".join(select(clean(nltk.tokenize.word_tokenize(z[0]),stops),holy_array))),fiki.polarity_scores(z[0])["compound"],fiki.polarity_scores(z[0])["pos"],fiki.polarity_scores(z[0])["neg"],fiki.polarity_scores(z[0])["neu"], Data["Company"].iloc[l], z[3], z[2], z[1],bracket) for z in links]) reportime = (datetime.timedelta(hours=2) + datetime.datetime.now()).strftime("%H:%M:%S-%Y/%m/%d") k = open("data/errorLog.txt", "a") k.write("Checkpoint saved {} at time {} at company {} and company ID {} with {} tweets".format(bracko, reportime, Data["Company"].iloc[l], Data["ID"].iloc[l], len(links) ) + os.linesep) k.close() return True finally: pass print("Final Dataset") print(Data[["Company","tweets"]]) driver.quit()
3,158
def iscircular(linked_list): """ Determine whether the Linked List is circular or not Args: linked_list(obj): Linked List to be checked Returns: bool: Return True if the linked list is circular, return False otherwise """ slow_runner = linked_list.head fast_runner = linked_list.head while slow_runner != None and fast_runner.next != None: slow_runner = slow_runner.next fast_runner = fast_runner.next.next if slow_runner == fast_runner: return True return False
3,159
def show_board(grid): """ Prints the whole board. Joins rows together with dashed lines before printing. """ bar = ["-" * (len(grid) * 4 - 1)] board = concat(interleave(bar, [show_row(row) for row in grid])) print("\n".join(board))
3,160
def shape_extent_to_header(shape, extent, nan_value=-9999): """ Create a header dict with shape and extent of an array """ ncols = shape[1] nrows = shape[0] xllcorner = extent[0] yllcorner = extent[2] cellsize_x = (extent[1]-extent[0])/ncols cellsize_y = (extent[3]-extent[2])/nrows if cellsize_x != cellsize_y: raise ValueError('extent produces different cellsize in x and y') cellsize = cellsize_x header = {'ncols':ncols, 'nrows':nrows, 'xllcorner':xllcorner, 'yllcorner':yllcorner, 'cellsize':cellsize, 'NODATA_value':nan_value} return header
3,161
def build_encoder(opt, embeddings): """ Various encoder dispatcher function. Args: opt: the option in current environment. embeddings (Embeddings): vocab embeddings for this encoder. """ if opt.encoder_type == "transformer": return TransformerEncoder(opt.enc_layers, opt.rnn_size, opt.heads, opt.transformer_ff, opt.dropout, embeddings) elif opt.encoder_type == "cnn": return CNNEncoder(opt.enc_layers, opt.rnn_size, opt.cnn_kernel_width, opt.dropout, embeddings) elif opt.encoder_type == "mean": return MeanEncoder(opt.enc_layers, embeddings) else: # "rnn" or "brnn" return RNNEncoder(opt.wals_model, opt.rnn_type, opt.brnn, opt.enc_layers, opt.rnn_size, opt.wals_size, opt.dropout, embeddings, opt.bridge)
3,162
def load_rendered_images_object_type(resources_path, n_channels, mode="render"): """ Import images from the resources dir with certain number of channels :param resources_path: Dir path from were images are fetched :param n_channels: Number of colors for the images :return: """ path_list = list(glob.glob(resources_path + '/*.png')) file_list = [os.path.basename(x) for x in path_list] object_list = [] render_numbers = np.array([int(x.split("_")[-2]) for x in file_list]) x_train = np.array([imageio.imread(x)[:, :, :n_channels] for x in path_list]) if mode == "angles": labels = 2 * np.pi * render_numbers / np.amax(render_numbers) elif mode == "circle": angles = 2 * np.pi * render_numbers / np.amax(render_numbers) labels = np.zeros((len(angles), 2)) labels[:, 0] = np.cos(angles) labels[:, 1] = np.sin(angles) else: labels = render_numbers return x_train, labels
3,163
def efficientnet_b3b(in_size=(300, 300), **kwargs): """ EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946. Parameters: ---------- in_size : tuple of two ints, default (300, 300) Spatial size of the expected input image. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.keras/models' Location for keeping the model parameters. """ return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b", **kwargs)
3,164
def logged(class_): """Class-level decorator to insert logging. This assures that a class has a ``.log`` member. :: @logged class Something: def __init__(self, args): self.log(f"init with {args}") """ class_.log= logging.getLogger(class_.__qualname__) return class_
3,165
def makeDBTable (filepath, title, fields, values, kvpairs=None): """Create a spreadsheet containing the supplied data, rather as it would be stored in a database table. However, there is also a title line and there can be key-value lines at the head of the table, before the line with the field names. Normal table lines may not have empty first columns: such lines are ignored. The key-value lines are marked by '#' in the first column. The table data (<values>) is supplied as a list of line data. Each line is a list/tuple of the field values (in the correct order). The field names are provided as a separate list, <fields>. Also a title should be provided, as <title>. A number of key-value pairs may also, optionally, be provided as a list (<kvpairs>: [(key, value), ... ] """ sheet = NewSpreadsheet (None) sheet.setCell (0, 0, None) sheet.setCell (0, 1, title) row = 2 if kvpairs: for k, v in kvpairs: sheet.setCell (row, 0, '#') sheet.setCell (row, 1, k) sheet.setCell (row, 2, v) row += 1 row += 1 col = 0 for f in fields: sheet.setCell (row, col, f) col += 1 row += 2 for vrow in values: if vrow == None: # Empty line row += 1 continue col = 0 for v in vrow: sheet.setCell (row, col, v) col += 1 row += 1 sheet.save (filepath)
3,166
def TableInFirstNSStart(builder): """This method is deprecated. Please switch to Start.""" return Start(builder)
3,167
def dicom_to_nifti(dicom_input, output_file=None): """ This is the main dicom to nifti conversion function for ge images. As input ge images are required. It will then determine the type of images and do the correct conversion :param output_file: filepath to the output nifti :param dicom_input: directory with dicom files for 1 scan """ assert common.is_siemens(dicom_input) # remove duplicate slices based on position and data dicom_input = convert_generic.remove_duplicate_slices(dicom_input) # remove localizers based on image type dicom_input = convert_generic.remove_localizers_by_imagetype(dicom_input) # remove_localizers based on image orientation (only valid if slicecount is validated) dicom_input = convert_generic.remove_localizers_by_orientation(dicom_input) if _is_4d(dicom_input): logger.info('Found sequence type: MOSAIC 4D') return _mosaic_4d_to_nifti(dicom_input, output_file) grouped_dicoms = _classic_get_grouped_dicoms(dicom_input) if _is_classic_4d(grouped_dicoms): logger.info('Found sequence type: CLASSIC 4D') return _classic_4d_to_nifti(grouped_dicoms, output_file) logger.info('Assuming anatomical data') return convert_generic.dicom_to_nifti(dicom_input, output_file)
3,168
def testd3(): """Test method""" primitives = get_d3m_primitives() planner = LevelOnePlannerOld(primitives=primitives) pipelines = planner.generate_pipelines(20) for pipeline in pipelines: print(pipeline)
3,169
def remove_php_pool(domain): """This function removes the php pool of the domain""" filename = '/etc/php/7.0/fpm/pool.d/'+domain+'.conf' if os.path.isfile(filename): os.unlink(filename)
3,170
def lot_vectors_dense_internal( sample_vectors, sample_distributions, reference_vectors, reference_distribution, metric=cosine, max_distribution_size=256, chunk_size=256, spherical_vectors=True, ): """Efficiently compute linear optimal transport vectors for a block of data provided as a list of distributions and a corresponding list of arrays of vectors. Parameters ---------- sample_vectors: numba.typed.List of ndarrays A set of vectors for each distribution. sample_distributions: numba.typed.List of ndarrays A set of distributions (1d arrays that sum to one). The ith element of a given distribution is the probability mass on the ith row of the corresponding entry in the ``sample_vectors`` list. reference_vectors: ndarray The reference vector set for LOT reference_distribution: ndarray The reference distribution over the set of reference vectors metric: function(ndarray, ndarray) -> float The distance function to use for distance computation max_distribution_size: int (optional, default=256) The maximum size of a distribution to consider; larger distributions over more vectors will be truncated back to this value for faster performance. chunk_size: int (optional, default=256) Operations will be parallelised over chunks of the input. This specifies the chunk size. spherical_vectors: bool (optional, default=True) Whether the vectors live on an n-sphere instead of euclidean space and thus require some degree of spherical correction. Returns ------- lot_vectors: ndarray The raw linear optimal transport vectors correpsonding to the input. """ n_rows = len(sample_vectors) result = np.zeros((n_rows, reference_vectors.size), dtype=np.float64) n_chunks = (n_rows // chunk_size) + 1 for n in range(n_chunks): chunk_start = n * chunk_size chunk_end = min(chunk_start + chunk_size, n_rows) for i in range(chunk_start, chunk_end): row_vectors = sample_vectors[i].astype(np.float64) row_distribution = sample_distributions[i] if row_vectors.shape[0] > max_distribution_size: best_indices = np.argsort(-row_distribution)[:max_distribution_size] row_vectors = row_vectors[best_indices] row_distribution = row_distribution[best_indices] row_sum = row_distribution.sum() if row_sum > 0.0: row_distribution /= row_sum if row_vectors.shape[0] > reference_vectors.shape[0]: cost = chunked_pairwise_distance( row_vectors, reference_vectors, dist=metric ) else: cost = chunked_pairwise_distance( reference_vectors, row_vectors, dist=metric ).T current_transport_plan = transport_plan( row_distribution, reference_distribution, cost ) transport_images = ( current_transport_plan * (1.0 / reference_distribution) ).T @ row_vectors if spherical_vectors: l2_normalize(transport_images) transport_vectors = transport_images - reference_vectors if spherical_vectors: tangent_vectors = project_to_sphere_tangent_space( transport_vectors, reference_vectors ) l2_normalize(tangent_vectors) scaling = tangent_vectors_scales( transport_images, reference_vectors ) transport_vectors = tangent_vectors * scaling result[i] = transport_vectors.flatten() # Help the SVD preserve spherical data by sqrt entries if spherical_vectors: for i in range(result.shape[0]): for j in range(result.shape[1]): result[i, j] = np.sign(result[i, j]) * np.sqrt(np.abs(result[i, j])) return result
3,171
def _make_message(request, level, msg): """ Just add the message once """ if msg not in [m.message for m in get_messages(request)]: messages.add_message(request, level, msg)
3,172
def load( filename, rsc_file=None, rows=None, cols=None, band=1, **kwargs, ): """Load a file, either using numpy or rasterio""" if rsc_file: rsc_data = load_rsc(rsc_file) return load_stacked_img(filename, rsc_data=rsc_data, rows=rows, cols=cols) else: try: import rasterio as rio except ImportError: raise ValueError("Need to `conda install rasterio` to load gdal-readable") with rio.open(filename) as src: return src.read(band)
3,173
def simple_scan_network(): """ Do a simple network scan, which only works if your network configuration is 192.168.1.x """ base_ip = "192.168.1." addresses = ['127.0.0.1'] for index in range(1, 255): addresses.extend([base_ip + str(index)]) return addresses
3,174
def check_valid_file_or_folder(value): """verifies filename exists and isn't a link""" if value is not None: if not os.path.isfile(value) and not os.path.isdir(value): raise argparse.ArgumentTypeError("{} does not exist or is not a file/folder.". format(value)) check_for_link(value) return value
3,175
def fake_message_source(empty_zodb): """Fake message source to be not user dependent.""" UserSpecificRAMMessageSource = ( icemac.addressbook.browser.messages.messages .UserSpecificRAMMessageSource) with patch.object(UserSpecificRAMMessageSource, '_get_storage') as storage: storage.return_value = [] yield
3,176
def channel_lvlv_2jet(): """ Mostly based on table 8 of the combination paper for the uncertainties and table 9 for the event counts. """ channel = ROOT.RooStats.HistFactory.Channel( "HWWlvlv2Jet" ) container.append(channel) channel.SetData(55) background = ROOT.RooStats.HistFactory.Sample("background") background.SetValue(36*1.1) # background.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) # background.AddOverallSys("JES", 0.93, 1.07) channel.AddSample(background) container.append(background) signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH") signalGGFttH.SetValue(10.9*1.00*0.19) # increase by a factor for better agreement with ATLAS contour signalGGFttH.AddNormFactor("mu", 1, 0, 6) signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGFttH.AddNormFactor("muT_lvlv", 1, -5, 10) signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 0.96, 1.04) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH3in", 0.96, 1.04) signalGGFttH.AddOverallSys("QCDscale_Higgs_acceptance_2jet", 0.97, 1.03) signalGGFttH.AddOverallSys("UE_2jet", 0.95, 1.05) signalGGFttH.AddOverallSys("JES", 0.94, 1.06) channel.AddSample(signalGGFttH) container.append(signalGGFttH) signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH") signalVBFVH.SetValue(10.9*1.000*0.81) # increase by a factor for better agreement with ATLAS contour signalVBFVH.AddNormFactor("mu", 1, 0, 6) signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBFVH.AddNormFactor("muW_lvlv", 1, -5, 10) signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBFVH.AddOverallSys("UE_2jet", 0.95, 1.05) signalVBFVH.AddOverallSys("JES", 0.94, 1.06) channel.AddSample(signalVBFVH) container.append(signalVBFVH) return channel
3,177
def vtkVariantStrictEquality(s1, s2): """ Check two variants for strict equality of type and value. """ s1 = vtk.vtkVariant(s1) s2 = vtk.vtkVariant(s2) t1 = s1.GetType() t2 = s2.GetType() # check based on type if t1 != t2: return False v1 = s1.IsValid() v2 = s2.IsValid() # check based on validity if (not v1) and (not v2): return True elif v1 != v2: return False # extract and compare the values r1 = getattr(s1, _variant_method_map[t1])() r2 = getattr(s2, _variant_method_map[t2])() return (r1 == r2)
3,178
def is_network_failure(error): """Returns True when error is a network failure.""" return ((isinstance(error, RETRY_URLLIB_EXCEPTIONS) and error.code in RETRY_HTTP_CODES) or isinstance(error, RETRY_HTTPLIB_EXCEPTIONS) or isinstance(error, RETRY_SOCKET_EXCEPTIONS) or isinstance(error, RETRY_REQUESTS_EXCEPTIONS) or is_retriable_requests_httperror(error))
3,179
def predict(model, X, threshold=0.5): """Generate NumPy output predictions on a dataset using a given model. Args: model (torch model): A Pytroch model X (dataloader): A dataframe-based gene dataset to predict on """ X_tensor, _ = convert_dataframe_to_tensor(X, []) model.eval() with torch.no_grad(): y_pred = (model(X_tensor) >= threshold).int().numpy() return y_pred
3,180
def test_series_info(): """test generation of infos DataFrame.""" series.load_info('External_File_Info.txt') info = series.info assert round(info.at[4, 'time (unix)']) == 1599832405
3,181
def view_mphn_pkl(infile, N=10, viruses=False): """ Reading in metaphlan database pkl file and printing # Sequence formatting: (NCBI_taxid)(UniRef90_cluster)(CDS_name) # Note that the UniRef90 clusterID does NOT include "Uniref90_" """ logging.info('Reading in: {}'.format(infile)) db = pickle.load(bz2.open(infile, 'r')) m = '-- taxonomy of geach genome: [taxonomy, tax-ids_per-tax-level, genome_length] --' logging.info(m) cnt = 0 for tax in db['taxonomy'].keys(): if viruses is False and 'k__Viruses' in tax or 'k__Viroids' in tax: continue else: cnt += 1 if cnt > N: break print([tax, db['taxonomy'][tax]]) m = '-- marker gene: [gene_id : {ext: [], score: float, clade: str, len: gene_len, taxon: str}] --' logging.info(m) logging.info(' "gene_id" matches the sequence headers of the marker fasta') logging.info(' ext: GCA-IDs of non-species genomes that have markers') cnt = 0 for mkr in db['markers'].keys(): if viruses is False and 'k__Viruses' in db['markers'][mkr]['taxon']: continue else: cnt += 1 if cnt > N: break print([mkr, db['markers'][mkr]]) exit(0)
3,182
def color_image( img: np.ndarray, unique_colors=True, threshold=100, approximation_accuracy=150 ) -> np.ndarray: """ This function detects simple shapes in the image and colors them. Detected figures will be also subscribed in the final image. The function can detect triangles, quadrilateral, and circles; any other figure will be marked "UNEXPECTED". The algorithm uses OpenCV to find contours on a grayscale version of the image. Then it uses a polygon approximation algorithm to reduce the number of vertices in contours. The resulted polygons are used to identify and color figures in the image. parameters: img - image with figures to color unique_colors - flag to color all figures in unique colores independent of the number of vertices. The default behavior is coloring all the figures of the same type in one color threshold - background threshold for a grayscale image, using that the algo will separate figures from the background approximation_accuracy - accuracy of polygon approximation for detected contours output: the image with colored and subscribed figures """ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # apply threshold thresholded_im = np.zeros(img.shape[:2], dtype=np.uint8) thresholded_im[gray > threshold] = 255 contours, _ = cv2.findContours( thresholded_im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) if unique_colors: colors = gen_colors(len(contours)) for i, contour in enumerate(contours): # find positions of vertices to count them # we need some value to estimate approximation accuracy - let it be perimeter object_perimeter = cv2.arcLength(contour, closed=True) approx = cv2.approxPolyDP( contour, epsilon=object_perimeter / approximation_accuracy, closed=True ) n_vertices = len(approx) # find object centers # M = cv2.moments(contour) x, y = approx.squeeze().mean(axis=0).astype(int) # offset to the left for x x = (x + 2 * approx[:, 0, 0].min()) // 3 # COLORING PART # highlight contours cv2.drawContours(img, [contour], 0, (255, 255, 255), 4) # fill the object if unique_colors: color = colors[i].tolist() else: color = get_color_for_figure(n_vertices) cv2.fillPoly(img, pts=[contour], color=color) # subscribe the figure print_figure_name(img, n_vertices, (x, y)) return img
3,183
def restore_model(pb_path): """Restore the latest model from the given path.""" subdirs = [x for x in Path(pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) predict_fn = predictor.from_saved_model(latest_model) return predict_fn
3,184
def _generate_resolution_shells(low, high): """Generate 9 evenly spaced in reciprocal space resolution shells from low to high resolution, e.g. in 1/d^2.""" dmin = (1.0 / high) * (1.0 / high) dmax = (1.0 / low) * (1.0 / low) diff = (dmin - dmax) / 8.0 shells = [1.0 / math.sqrt(dmax)] for j in range(8): shells.append(1.0 / math.sqrt(dmax + diff * (j + 1))) return shells
3,185
def add_ports_from_markers_square( component: Component, pin_layer: Layer = (69, 0), port_layer: Optional[Layer] = None, orientation: Optional[int] = 90, min_pin_area_um2: float = 0, max_pin_area_um2: float = 150 * 150, pin_extra_width: float = 0.0, port_names: Optional[Tuple[str, ...]] = None, port_name_prefix: str = "o", ) -> Component: """add ports from markers center in port_layer squared Args: component: to read polygons from and to write ports to pin_layer: for port markers port_layer: for the new created port orientation: in degrees 90: north, 0: east, 180: west, 270: south min_pin_area_um2: ignores pins with area smaller than min_pin_area_um2 max_pin_area_um2: ignore pins for area above certain size pin_extra_width: 2*offset from pin to straight port_names: names of the ports (defaults to {i}) """ port_markers = read_port_markers(component, [pin_layer]) port_names = port_names or [ f"{port_name_prefix}{i+1}" for i in range(len(port_markers.polygons)) ] layer = port_layer or pin_layer for port_name, p in zip(port_names, port_markers.polygons): dy = snap_to_grid(p.ymax - p.ymin) dx = snap_to_grid(p.xmax - p.xmin) x = p.x y = p.y if dx == dy and max_pin_area_um2 > dx * dy > min_pin_area_um2: component.add_port( port_name, midpoint=(x, y), width=dx - pin_extra_width, orientation=orientation, layer=layer, ) return component
3,186
def P(Document, *fields, **kw): """Generate a MongoDB projection dictionary using the Django ORM style.""" __always__ = kw.pop('__always__', set()) projected = set() omitted = set() for field in fields: if field[0] in ('-', '!'): omitted.add(field[1:]) elif field[0] == '+': projected.add(field[1:]) else: projected.add(field) if not projected: # We only have exclusions from the default projection. names = set(getattr(Document, '__projection__', Document.__fields__) or Document.__fields__) projected = {name for name in (names - omitted)} projected |= __always__ if not projected: projected = {'_id'} return {unicode(traverse(Document, name, name)): True for name in projected}
3,187
def test_decision_tree(): """Check the interface to the Decisiontree class.""" model = "decision_tree" run_test(model) run_prediction(model)
3,188
def generate_txt(url, split_, number=None, version="3.0.0"): """ generate txt file of cnn_dailymail dataset Args: url (str): directory of dataset txt file. split_ (str): test or train. number (int): top-n number of samples from dataset version (str): "3.0.0" by default """ cnn = load_dataset("cnn_dailymail", version, split=split_) if number == -1: number = len(cnn) f = open(url + split_ + '.txt', 'w') for idx in range(number): article = cnn[idx]['article'] article = article.replace('\n', ' ') highlights = cnn[idx]['highlights'] highlights = highlights.replace('\n', ' ') f.write(article + "\t" + highlights + '\n') f.close()
3,189
async def new_scope( *, deadline: Optional[float] = None, timeout: Optional[float] = None ) -> AsyncIterator["Scope"]: """Creates a scope in which asynchronous tasks can be launched. This is inspired by the concept of "nurseries" in trio: https://trio.readthedocs.io/en/latest/reference-core.html#nurseries-and-spawning We define the lifetime of a scope using an `async with` statement. Inside this block we can then spawn new asynchronous tasks which will run in the background, and the block will only exit when all spawned tasks are done. If an error is raised by the code in the block itself or by any of the spawned tasks, all other background tasks will be interrupted and the block will raise an error. Args: deadline: Absolute time in epoch seconds when the scope should exit. timeout: Time in seconds from now when the scope should exit. If both deadline and timeout are given, the actual deadline will be whichever one will elapse first. """ main_task = impl.current_task() scheduler = main_task.scheduler tasks: Set[impl.Task] = set() async def finish_tasks(): while True: await impl.any_ready(tasks) tasks.intersection_update(scheduler.active_tasks) if not tasks: break if timeout is not None: if deadline is None: deadline = scheduler.time() + timeout else: deadline = min(deadline, scheduler.time() + timeout) if deadline is not None: main_task.push_deadline(deadline) try: yield Scope(main_task, scheduler, tasks) await finish_tasks() except (impl.Interrupt, Exception) as exc: # Interrupt remaining tasks. for task in tasks: if not task.done: task.interrupt(main_task, RuntimeError("scope exited")) # Finish remaining tasks while ignoring further interrupts. main_task.interruptible = False await finish_tasks() main_task.interruptible = True # If interrupted, raise the underlying error but suppress the context # (the Interrupt itself) when displaying the traceback. if isinstance(exc, impl.Interrupt): exc = exc.error exc.__suppress_context__ = True raise exc finally: if deadline is not None: main_task.pop_deadline()
3,190
def get_page_url(skin_name, page_mappings, page_id): """ Returns the page_url for the given page_id and skin_name """ fallback = '/' if page_id is not None: return page_mappings[page_id].get('path', '/') return fallback
3,191
def get_paragraph_head(source, maxlength, bullet_num=-1, bullet=False): """Return the paragraph text of specific length, optionally prefix a bullet. Args: source(str, PreProcessed, etree._Element) maxlength(int) Kwargs: bullet(bool): False by default, otherwise prefix paragraph text with either '* )' or '##)' where # corresponds to a zero padded integer. bullet_num(int): By default, the bullet is un-numerated, otherwise it will take the bullet number. """ if bullet_num > -1: bullet = True if not bullet: bullet_s = "" else: if bullet_num < 0: bullet_s = "* ) " else: bullet_s = f"{bullet_num:02d}) " if isinstance(source, PreProcessed): string = str(source.pre_italic) elif isinstance(source, etree._Element): string = source.xpath("string()") # TODO PostProcessed condition else: string = str(source) string = f"{bullet_s}{string}" if maxlength != 30: print(f"*** maxlength: {maxlength}") short = textwrap.shorten(string, width=maxlength, placeholder=" ...") return short
3,192
def manual_edit(filepath, xy, find = (0,0,0)): """ Offers a manual method through which sections of input images can be silenced. Parameters ---------- filepath : string A filepath for images to be selected from. Must be a path to a file, not a directory or other ``glob`` parseable structure. xy : tuple A tuple of restraint tuples for the polygon to be silenced. This can be either generated by setting the output of ``md.manual_find`` to a list or developing your own algorithm. find : RGB tuple, default: (0,0,0) A value that indicates silenced noise. Usually is considered the background color of the input image, often ``(0,0,0)``. Notes ----- This allows users to silence polygon coordinates after then pass them through their own package scripts, or predeveloped scripts like ``md.manual_merge`` or ``md.manual_find``. Examples -------- >>> restraints = [(473,91),(214,601),(764,626)] >>> md.manual_edit("/example/directory/file.png", xy = restraints) #removing a triangle from input image md.manual_edit - Image 1 Save:0:00:01 .. figure:: edited.png :scale: 50 % :align: center ``md.manual_edit`` output image """ files = glob.glob(filepath) restraints = xy for index,item in enumerate(files): with Image.open(files[index]) as im: startTime = datetime.datetime.now().replace(microsecond=0) name = ntpath.basename(files[index]) size = len(name) mod_string = name[:size - 4] print(mod_string) draw = ImageDraw.Draw(im) draw.polygon(restraints, fill=find, outline=find) im.save(mod_string + "_clean" + ".PNG") endTime = datetime.datetime.now().replace(microsecond=0) durationTime = endTime - startTime print("md.manual_edit - Image " + str(index+1) + " Save:" + str(durationTime))
3,193
def create_form(erroneous_form=None): """Show a form to create a guest server.""" party_id = _get_current_party_id_or_404() setting = guest_server_service.get_setting_for_party(party_id) form = erroneous_form if erroneous_form else CreateForm() return { 'form': form, 'domain': setting.domain, }
3,194
async def record_responses(cassette, vcr_request, response): """Because aiohttp follows redirects by default, we must support them by default. This method is used to write individual request-response chains that were implicitly followed to get to the final destination. """ for i, past_response in enumerate(response.history): aiohttp_request = past_response.request_info # No data because it's following a redirect. past_request = Request( aiohttp_request.method, str(aiohttp_request.url), # Record body of first request, rest are following a redirect. None if i else vcr_request.body, _serialize_headers(aiohttp_request.headers), ) await record_response(cassette, past_request, past_response) # If we're following redirects, then the last request-response # we record is the one attached to the `response`. if response.history: aiohttp_request = response.request_info vcr_request = Request( aiohttp_request.method, str(aiohttp_request.url), None, _serialize_headers(aiohttp_request.headers), ) await record_response(cassette, vcr_request, response)
3,195
def apply(task, args, kwargs, **options): """Apply the task locally. This will block until the task completes, and returns a :class:`celery.result.EagerResult` instance. """ args = args or [] kwargs = kwargs or {} task_id = options.get("task_id", gen_unique_id()) retries = options.get("retries", 0) task = tasks[task.name] # Make sure we get the instance, not class. default_kwargs = {"task_name": task.name, "task_id": task_id, "task_retries": retries, "task_is_eager": True, "logfile": None, "delivery_info": {"is_eager": True}, "loglevel": 0} supported_keys = fun_takes_kwargs(task.run, default_kwargs) extend_with = dict((key, val) for key, val in default_kwargs.items() if key in supported_keys) kwargs.update(extend_with) trace = TaskTrace(task.name, task_id, args, kwargs, task=task) retval = trace.execute() return EagerResult(task_id, retval, trace.status, traceback=trace.strtb)
3,196
def exp_post_expansion_function(expansion: Expansion) -> Optional[Callable]: """Return the specified post-expansion function, or None if unspecified""" return exp_opt(expansion, 'post')
3,197
def test_allclose_perm(_): """Test allclose_perm accurately detected permutation""" one, two = np.random.random((2, 10, 4)) three = one.copy() np.random.shuffle(three) assert utils.allclose_perm(one, three) assert not utils.allclose_perm(one, two)
3,198
def return_(x): """Implement `return_`.""" return x
3,199