code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def fetchall_datatypes(self, **kwargs): <NEW_LINE> <INDENT> sql = "SELECT * FROM datatype" <NEW_LINE> return self.fetchall_astropy(sql)
Fetch all existing datatypes from the database
625941b6be7bc26dc91cd429
def save_service(self, service): <NEW_LINE> <INDENT> if not self.initialized: <NEW_LINE> <INDENT> raise NotInitializedError <NEW_LINE> <DEDENT> if not isinstance(service, SSSDService): <NEW_LINE> <INDENT> raise TypeError <NEW_LINE> <DEDENT> name = service.get_name() <NEW_LINE> index = self.delete_option('section', name) <NEW_LINE> addkw = [] <NEW_LINE> for option,value in service.get_all_options().items(): <NEW_LINE> <INDENT> if (type(value) == list): <NEW_LINE> <INDENT> value = ', '.join(value) <NEW_LINE> <DEDENT> addkw.append( { 'type' : 'option', 'name' : option, 'value' : str(value) } ) <NEW_LINE> <DEDENT> self.add_section(name, addkw, index)
Save the changes made to the service object back to the SSSDConfig object. service_object: The SSSDService object to save to the configuration. === Returns === No return value === Errors === NotInitializedError: This SSSDConfig object has not had import_config() or new_config() run on it yet. TypeError: service_object was not of the type SSSDService
625941b631939e2706e4cc94
@app.route('/gdisconnect') <NEW_LINE> def gdisconnect(): <NEW_LINE> <INDENT> access_token = login_session.get('access_token') <NEW_LINE> if access_token is None: <NEW_LINE> <INDENT> response = make_response( json.dumps('Current user not connected.'), 401) <NEW_LINE> response.headers['Content-Type'] = 'application/json' <NEW_LINE> return response <NEW_LINE> <DEDENT> url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token <NEW_LINE> h = httplib2.Http() <NEW_LINE> result = h.request(url, 'GET')[0] <NEW_LINE> if result['status'] == '200': <NEW_LINE> <INDENT> del login_session['access_token'] <NEW_LINE> del login_session['gplus_id'] <NEW_LINE> del login_session['username'] <NEW_LINE> del login_session['email'] <NEW_LINE> del login_session['picture'] <NEW_LINE> del login_session['user_id'] <NEW_LINE> response = make_response(json.dumps('Successfully disconnected.'), 200) <NEW_LINE> response.headers['Content-Type'] = 'application/json' <NEW_LINE> flash('Successfully disconnected.') <NEW_LINE> return redirect(url_for('show_catalog')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> response = make_response(json.dumps('Failed to revoke token for given user.', 400)) <NEW_LINE> response.headers['Content-Type'] = 'application/json' <NEW_LINE> flash('Failed to revoke token for given user.') <NEW_LINE> return redirect(url_for('show_catalog'))
Disconnects the client from the server. Args: None Returns: Redirect to catelog.htlm through show_catelog()
625941b6009cb60464c631e0
@pytest.mark.django_db(transaction=True) <NEW_LINE> def test_cached_db_backend_log_session_access_not_recently_used(api_session): <NEW_LINE> <INDENT> backend = CachedDBSessionAccessBackend() <NEW_LINE> with pytest.raises(SessionExpiredError): <NEW_LINE> <INDENT> backend.log_session_access(api_session, INACTIVITY_TIMEOUT, False) <NEW_LINE> <DEDENT> cache_key = backend._get_key(api_session) <NEW_LINE> assert cache.get(cache_key) is None <NEW_LINE> assert api_session.accesses.count() == 0
Test when user has not accessed
625941b6377c676e91271fcd
def accept(address): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> listener = multiprocessing.connection.Listener(address, authkey=USERPREF['authkey']) <NEW_LINE> conn = listener.accept() <NEW_LINE> break <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> listener.close() <NEW_LINE> del listener <NEW_LINE> raise Exception(sys.exc_info()[1]) <NEW_LINE> <DEDENT> <DEDENT> client = listener.last_accepted <NEW_LINE> return Connection(conn), client[0]
Accept a connection and return a Connection object.
625941b68a43f66fc4b53e8d
def get_numeric_cardinality(factor,exclude=None): <NEW_LINE> <INDENT> if exclude != None: <NEW_LINE> <INDENT> fscope = list(factor.scope()) <NEW_LINE> for var in exclude: <NEW_LINE> <INDENT> fscope.var(exclude) <NEW_LINE> <DEDENT> card = factor.get_cardinality(fscope) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> card = factor.get_cardinality(factor.scope()) <NEW_LINE> <DEDENT> num_cardinality = 1 <NEW_LINE> for i in card: <NEW_LINE> <INDENT> num_cardinality *= card[i] <NEW_LINE> <DEDENT> return num_cardinality
Input: factor, list of variables to exclude Get the numeric cardinality of a cpd subject to variables to exclude
625941b64a966d76dd550e2f
def test_input_4(): <NEW_LINE> <INDENT> helper.cleanup() <NEW_LINE> shutil.copyfile(process.TESTING_DIRECTORY + '/test_history_3', process.HISTORY_FILE) <NEW_LINE> file_path = process.TESTING_DIRECTORY + '/test_input_4' <NEW_LINE> assert (helper.run_processing_engine(file_path) == 0) <NEW_LINE> helper.cleanup()
Test with a valid input file: - Single tweet - 2 companies (AMD and GM) - Negative sentiment - History file: base
625941b6507cdc57c6306af6
def get_all_data(self): <NEW_LINE> <INDENT> self.check_validity() <NEW_LINE> return GetAllData(*self.ipcon.send_request(self, BrickIMU.FUNCTION_GET_ALL_DATA, (), '', 28, 'h h h h h h h h h h'))
Returns the data from :func:`Get Acceleration`, :func:`Get Magnetic Field` and :func:`Get Angular Velocity` as well as the temperature of the IMU Brick. If you want to get the data periodically, it is recommended to use the :cb:`All Data` callback and set the period with :func:`Set All Data Period`.
625941b6046cf37aa974cb6e
def weights_given_SR_diff( SR_diff, avg_correlation, confidence_interval, years_of_data, avg_SR=0.5, std=0.15, how_many_assets=2, ): <NEW_LINE> <INDENT> average_mean = avg_SR * std <NEW_LINE> asset1_mean = (SR_diff + avg_SR) * std <NEW_LINE> mean_difference = asset1_mean - average_mean <NEW_LINE> confident_mean_difference = calculate_confident_mean_difference( std, years_of_data, mean_difference, confidence_interval, avg_correlation ) <NEW_LINE> confident_asset1_mean = confident_mean_difference + average_mean <NEW_LINE> mean_list = [confident_asset1_mean] + [average_mean] * (how_many_assets - 1) <NEW_LINE> weights = optimise_using_correlation(mean_list, avg_correlation, std) <NEW_LINE> return list(weights)
Return the ratio of weight to 1/N weight for an asset with unusual SR :param SR_diff: Difference between the SR and the average SR. 0.0 indicates same as average :param avg_correlation: Average correlation amongst assets :param years_of_data: How long has this been going one :param avg_SR: Average SR to use for other asset :param confidence_interval: How confident are we about our mean estimate (i.e. cdf point) :param how_many_assets: .... are we optimising over (I only consider 2, but let's keep it general) :param std: Standard deviation to use :return: Ratio of weight, where 1.0 means no difference
625941b621a7993f00bc7b0c
def testClientStatsCollectionAlwaysHappensAfterHandleMessage(self): <NEW_LINE> <INDENT> now = 1000000 <NEW_LINE> self.client_communicator.client_worker.last_stats_sent_time = ( rdfvalue.RDFDatetime().FromSecondsFromEpoch(now)) <NEW_LINE> with test_lib.FakeTime(now): <NEW_LINE> <INDENT> self.client_communicator.client_worker.CheckStats() <NEW_LINE> <DEDENT> runs = [] <NEW_LINE> action_cls = actions.ActionPlugin.classes.get("GetClientStatsAuto") <NEW_LINE> with utils.Stubber(action_cls, "Run", lambda cls, _: runs.append(1)): <NEW_LINE> <INDENT> with test_lib.FakeTime(now + 30): <NEW_LINE> <INDENT> self.client_communicator.client_worker.CheckStats() <NEW_LINE> self.assertEqual(len(runs), 0) <NEW_LINE> <DEDENT> self.client_communicator.client_worker.HandleMessage( rdf_flows.GrrMessage()) <NEW_LINE> with test_lib.FakeTime(now + 59): <NEW_LINE> <INDENT> self.client_communicator.client_worker.CheckStats() <NEW_LINE> self.assertEqual(len(runs), 0) <NEW_LINE> <DEDENT> with test_lib.FakeTime(now + 61): <NEW_LINE> <INDENT> self.client_communicator.client_worker.CheckStats() <NEW_LINE> self.assertEqual(len(runs), 1)
Tests that client stats are collected more often when client is busy.
625941b69b70327d1c4e0bf7
def greedy_nearest_neighbour_heuristic(data): <NEW_LINE> <INDENT> stars, matrix, n, limit = data['star_list'], data['matrix'], len(data['matrix']), data['c_limit'] <NEW_LINE> tour = [0] <NEW_LINE> stars_in_tour = stars[0] <NEW_LINE> length_already_without_back = 0 <NEW_LINE> was_node_added = True <NEW_LINE> while was_node_added: <NEW_LINE> <INDENT> possible_points = zip(range(n), matrix[tour[-1]], stars) <NEW_LINE> possible_points = (point for point in possible_points if point[0] not in tour) <NEW_LINE> ratios = ((i, stars*1000/(dist+0.1)) for i, dist, stars in possible_points) <NEW_LINE> ratios = sorted(ratios, key=lambda k: k[1], reverse=True) <NEW_LINE> was_node_added = False <NEW_LINE> for node_nr, _ in ratios: <NEW_LINE> <INDENT> new_length = length_already_without_back + matrix[tour[-1]][node_nr] + matrix[node_nr][0] <NEW_LINE> if new_length <= limit: <NEW_LINE> <INDENT> tour.append(node_nr) <NEW_LINE> was_node_added = True <NEW_LINE> length_already_without_back = new_length - matrix[node_nr][0] <NEW_LINE> stars_in_tour += stars[node_nr] <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> tour_length = length_already_without_back + matrix[tour[-1]][0] <NEW_LINE> tour.append(0) <NEW_LINE> return {'tour' : tour, 'stars': stars_in_tour, 'length': tour_length}
This function tries to solve the Orientieering Problem starting at index 0. At each step, it calculates stars/kilometer for each neighbour and picks the neighbour with best stars/kilometer into its route Returns: A solution dictionary having keys: tour: like [0, 1, 2, 4, 0] stars: the amount of stars of the route length: length of the tour in cumulated distance
625941b6c432627299f04a67
def is_unnecessary(self, operator): <NEW_LINE> <INDENT> if operator in self.operator_flags: <NEW_LINE> <INDENT> return self.operator_flags[operator] & self.UNNECESSARY != 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
operatorが再起動及びexit_all()により不要となったものなら真を返す
625941b691af0d3eaac9b837
def create_user(self, email, password=None, **extra_fields): <NEW_LINE> <INDENT> if not email: <NEW_LINE> <INDENT> raise ValueError('Users must have an email address') <NEW_LINE> <DEDENT> user = self.model(email=self.normalize_email(email), **extra_fields) <NEW_LINE> user.set_password(password) <NEW_LINE> user.save(using=self.db) <NEW_LINE> return user
creates and saves a new user
625941b632920d7e50b27fef
def list_activity_types(domain=None, name=None, registrationStatus=None, nextPageToken=None, maximumPageSize=None, reverseOrder=None): <NEW_LINE> <INDENT> pass
Returns information about all activities registered in the specified domain that match the specified name and registration status. The result includes information like creation date, current status of the activity, etc. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows . See also: AWS API Documentation :example: response = client.list_activity_types( domain='string', name='string', registrationStatus='REGISTERED'|'DEPRECATED', nextPageToken='string', maximumPageSize=123, reverseOrder=True|False ) :type domain: string :param domain: [REQUIRED] The name of the domain in which the activity types have been registered. :type name: string :param name: If specified, only lists the activity types that have this name. :type registrationStatus: string :param registrationStatus: [REQUIRED] Specifies the registration status of the activity types to list. :type nextPageToken: string :param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. :type maximumPageSize: integer :param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. :type reverseOrder: boolean :param reverseOrder: When set to true , returns the results in reverse order. By default, the results are returned in ascending alphabetical order by name of the activity types. :rtype: dict :return: { 'typeInfos': [ { 'activityType': { 'name': 'string', 'version': 'string' }, 'status': 'REGISTERED'|'DEPRECATED', 'description': 'string', 'creationDate': datetime(2015, 1, 1), 'deprecationDate': datetime(2015, 1, 1) }, ], 'nextPageToken': 'string' } :returns: domain (string) -- [REQUIRED] The name of the domain in which the activity types have been registered. name (string) -- If specified, only lists the activity types that have this name. registrationStatus (string) -- [REQUIRED] Specifies the registration status of the activity types to list. nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default, the results are returned in ascending alphabetical order by name of the activity types.
625941b68e71fb1e9831d5d1
def get_random_emoji(): <NEW_LINE> <INDENT> emoji_choices = filter(lambda x: not x.startswith('.'), os.listdir('./emoji')) <NEW_LINE> rand_choice = random.choice(emoji_choices) <NEW_LINE> new_name = str(random.randint(0, 99999999)) + '.png' <NEW_LINE> new_dest = '{}/{}'.format(TMP_FOLDER, new_name) <NEW_LINE> copyfile('./emoji/{}'.format(rand_choice), new_dest) <NEW_LINE> return new_dest
Return a random emoji file from './emoji'. To avoid errors when using the same file more than once, each file is given a random name and copied into './tmp'
625941b68c3a8732951581e1
def test_td04ad_case1(self): <NEW_LINE> <INDENT> n = 2 <NEW_LINE> m = 2 <NEW_LINE> p = 3 <NEW_LINE> num = np.array([ [ [0.0, 0.0, 1.0 ], [ 1.0, 0.0, 0.0 ] ], [ [3.0, -1.0, 1.0 ], [ 0.0, 1.0, 0.0 ] ], [ [0.0, 0.0, 1.0], [ 0.0, 2.0, 0.0 ] ] ]) <NEW_LINE> p, m, d = num.shape <NEW_LINE> numc = np.zeros((max(1, m, p), max(1, m, p), d), dtype=float) <NEW_LINE> numc[:p,:m,:] = num <NEW_LINE> denc = np.array( [ [1.0, 0.4, 3.0], [ 1.0, 1.0, 0.0 ] ]) <NEW_LINE> indc = np.array( [ 2, 1 ], dtype=int) <NEW_LINE> denr = np.array( [ [1.0, 0.4, 3.0], [ 1.0, 1.0, 0.0 ], [1.0, 0.0, 0.0] ]) <NEW_LINE> indr = np.array( [ 2, 1, 0 ], dtype=int) <NEW_LINE> n, A, B, C, D = transform.td04ad('C', 2, 3, indc, denc, numc) <NEW_LINE> Ac = [ [-1, 0, 0], [ 0, -0.4, -0.3], [ 0, 10, 0]] <NEW_LINE> Bc = [ [0, -1] ,[ 1 , 0], [ 0, 0]] <NEW_LINE> Cc = [ [1, 0, 0.1], [-1, -2.2, -0.8], [ -2, 0, 0.1] ] <NEW_LINE> Dc = [ [0, 1], [ 3, 0], [ 0, 0]] <NEW_LINE> np.testing.assert_array_almost_equal(A, Ac) <NEW_LINE> np.testing.assert_array_almost_equal(B, Bc) <NEW_LINE> np.testing.assert_array_almost_equal(C, Cc) <NEW_LINE> np.testing.assert_array_almost_equal(D, Dc) <NEW_LINE> resr = transform.td04ad('R', 2, 3, indr, denr, num)
td04ad: Convert with both 'C' and 'R' options
625941b645492302aab5e0e3
def get_refactor(self): <NEW_LINE> <INDENT> if self.assertion: <NEW_LINE> <INDENT> return self.assertion.get_line(self.line_nr - self.assertion.line_nr) <NEW_LINE> <DEDENT> return self.get_original()
return the line in pytest formatting if available, else it returns the original line :return: line (str)
625941b6dc8b845886cb5358
def check_groups_exist(ctx, param, value): <NEW_LINE> <INDENT> groups = ctx.obj.api.relay_groups(*value) <NEW_LINE> raise_if_not_all_present(value, groups, "The following relay groups do not exist: {}") <NEW_LINE> return groups
Ensure that all given names refer to existing relay groups.
625941b6d53ae8145f87a09a
@all_parameters_as_numpy_arrays <NEW_LINE> def minimum(aabb): <NEW_LINE> <INDENT> return aabb[0].copy()
Returns the minimum point of the AABB.
625941b64c3428357757c14e
def tank_respawns(self) -> list: <NEW_LINE> <INDENT> return [obj for obj in self.map_objs if obj.tank_respawn]
Return a list of map_objs that includes all tank respawns currently in the match Returns: List of map_objs of found tank respawns
625941b694891a1f4081b8cb
def __init__(self, luthor, tokenfile): <NEW_LINE> <INDENT> self.luth = luthor
luthor: luthor file (on disk, not LuthorFile instance) to create a lexer from. tokenfile: file to import in the generated lexer
625941b676d4e153a657e953
def InertiaMatrix(x, y, z): <NEW_LINE> <INDENT> q = zeros((3,3), float) <NEW_LINE> q[0,0] = y**2 + z**2 <NEW_LINE> q[0,1] = -x * y <NEW_LINE> q[0,2] = -x * z <NEW_LINE> q[1,0] = -y * x <NEW_LINE> q[1,1] = x**2 + z**2 <NEW_LINE> q[1,2] = -y * z <NEW_LINE> q[2,0] = -z * x <NEW_LINE> q[2,1] = -z * y <NEW_LINE> q[2,2] = x**2 + y**2 <NEW_LINE> return q
Calculate the inertia matrix for a single mass sans mass.
625941b6d18da76e235322f5
def extensions(self): <NEW_LINE> <INDENT> open_position = [-1,-1] <NEW_LINE> y = 0 <NEW_LINE> grid_list = [] <NEW_LINE> for row in self.from_grid: <NEW_LINE> <INDENT> grid_list.append(list(row)) <NEW_LINE> <DEDENT> while open_position[0] == -1: <NEW_LINE> <INDENT> x = 0 <NEW_LINE> while open_position[1] == -1 and x < len(self.from_grid[y]): <NEW_LINE> <INDENT> if self.from_grid[y][x] == '*': <NEW_LINE> <INDENT> open_position = [y, x] <NEW_LINE> <DEDENT> x += 1 <NEW_LINE> <DEDENT> y += 1 <NEW_LINE> <DEDENT> possible_moves = [] <NEW_LINE> def list_to_tuple(grid): <NEW_LINE> <INDENT> result = [] <NEW_LINE> for rows in grid: <NEW_LINE> <INDENT> result.append(tuple(rows)) <NEW_LINE> <DEDENT> return tuple(result) <NEW_LINE> <DEDENT> def move_left(grid,open_position): <NEW_LINE> <INDENT> left = [a[:] for a in grid] <NEW_LINE> y = open_position[0] <NEW_LINE> x = open_position[1] <NEW_LINE> left[y][x] = left[y][x + 1] <NEW_LINE> left[y][x + 1] = "*" <NEW_LINE> return list_to_tuple(left) <NEW_LINE> <DEDENT> def move_right(grid,open_position): <NEW_LINE> <INDENT> right = [a[:] for a in grid] <NEW_LINE> y = open_position[0] <NEW_LINE> x = open_position[1] <NEW_LINE> right[y][x] = right[y][x - 1] <NEW_LINE> right[y][x - 1] = "*" <NEW_LINE> return list_to_tuple(right) <NEW_LINE> <DEDENT> def move_up(grid,open_position): <NEW_LINE> <INDENT> up = [a[:] for a in grid] <NEW_LINE> y = open_position[0] <NEW_LINE> x = open_position[1] <NEW_LINE> up[y][x] = up[y + 1][x] <NEW_LINE> up[y + 1][x] = "*" <NEW_LINE> return list_to_tuple(up) <NEW_LINE> <DEDENT> def move_down(grid,open_position): <NEW_LINE> <INDENT> down = [a[:] for a in grid] <NEW_LINE> y = open_position[0] <NEW_LINE> x = open_position[1] <NEW_LINE> down[y][x] = down[y - 1][x] <NEW_LINE> down[y - 1][x] = "*" <NEW_LINE> return list_to_tuple(down) <NEW_LINE> <DEDENT> if open_position[0] != 0: <NEW_LINE> <INDENT> possible_moves.append(MNPuzzle(move_down(grid_list,open_position),self.to_grid)) <NEW_LINE> <DEDENT> if open_position[0] != len(self.from_grid) - 1: <NEW_LINE> <INDENT> possible_moves.append(MNPuzzle(move_up(grid_list,open_position),self.to_grid)) <NEW_LINE> <DEDENT> if open_position[1] != 0: <NEW_LINE> <INDENT> possible_moves.append(MNPuzzle(move_right(grid_list,open_position),self.to_grid)) <NEW_LINE> <DEDENT> if open_position[1] != len(self.from_grid[0]) - 1: <NEW_LINE> <INDENT> possible_moves.append(MNPuzzle(move_left(grid_list,open_position),self.to_grid)) <NEW_LINE> <DEDENT> return possible_moves
Returns a list of possible extensions to @rtype: list[MNPuzzle] >>> test = MNPuzzle([1,2,3,4,'*'],[1,2,3,4,'*']) >>> extension = test.extensions() >>> extension [1,2,3,*,4]
625941b663d6d428bbe44313
def generate_hosts(addr): <NEW_LINE> <INDENT> if "/" in addr: <NEW_LINE> <INDENT> hosts = netaddr.IPNetwork(addr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> hosts = netaddr.IPGlob(addr) <NEW_LINE> <DEDENT> return hosts
生成主机列表 :param addr: 地址 :type addr: str :return: 主机列表 :rtype: netaddr.ip.glob.IPGlob
625941b621a7993f00bc7b0d
def patch_quotas_groups( self, file_systems=None, groups=None, references=None, file_system_names=None, file_system_ids=None, gids=None, group_names=None, names=None, quota=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): <NEW_LINE> <INDENT> kwargs = dict( file_system_names=file_system_names, file_system_ids=file_system_ids, gids=gids, group_names=group_names, names=names, quota=quota, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, ) <NEW_LINE> kwargs = {k: v for k, v in kwargs.items() if v is not None} <NEW_LINE> endpoint = self._quotas_api.api20_quotas_groups_patch_with_http_info <NEW_LINE> _process_references(file_systems, ['file_system_names', 'file_system_ids'], kwargs) <NEW_LINE> _process_references(groups, ['group_names'], kwargs) <NEW_LINE> _process_references(references, ['names'], kwargs) <NEW_LINE> return self._call_api(endpoint, kwargs)
Modify a quota for a group. Note that if you modify a group's quota to a lower value and that group's usage has already exceeded the new value, writes will automatically halt until usage decreases below the new quota setting. Args: file_systems (list[FixedReference], optional): A list of file_systems to query for. Overrides file_system_names and file_system_ids keyword arguments. groups (list[FixedReference], optional): A list of groups to query for. Overrides group_names keyword arguments. references (list[FixedReference], optional): A list of references to query for. Overrides names keyword arguments. file_system_names (list[str], optional): A list of file system names. If there is not at least one resource that matches each of the elements of `file_system_names`, then an error is returned. file_system_ids (list[str], optional): A list of file system IDs. If after filtering, there is not at least one resource that matches each of the elements of `file_system_ids`, then an error is returned. This cannot be provided together with the `file_system_names` query parameter. gids (list[int], optional): A list of group IDs. If there is not at least one resource that matches each of the elements of `gids`, then an error is returned. This cannot be provided together with `group_names` query parameter. group_names (list[str], optional): A list of group names. If there is not at least one resource that matches each of the elements of `group_names`, then an error is returned. This cannot be provided together with `gids` query parameter. names (list[str], optional): A list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. async_req (bool, optional): Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. _return_http_data_only (bool, optional): Returns only data field. _preload_content (bool, optional): Response is converted into objects. _request_timeout (int, optional): Total request timeout in seconds. Returns: ValidResponse: If the call was successful. ErrorResponse: If the call was not successful. Raises: PureError: If calling the API fails. ValueError: If a parameter is of an invalid type. TypeError: If invalid or missing parameters are used.
625941b6a8370b77170526c5
def add_file_handler(self): <NEW_LINE> <INDENT> file_handler = RotatingFileHandler(config.LOGGER_PATH,maxBytes=1024*1024*500,delay=False,backupCount=20) <NEW_LINE> file_handler.setLevel(logging.INFO) <NEW_LINE> file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]')) <NEW_LINE> logging.getLogger(self.logger_name).addHandler(file_handler)
输出到文件日志 :return:
625941b650812a4eaa59c149
def __init__(self, rating): <NEW_LINE> <INDENT> self._rating = rating
Wrap trueskill.Rating object.
625941b6925a0f43d2549c97
def simple_equality(cls: Type) -> Type: <NEW_LINE> <INDENT> eq_attr = '__eq__' <NEW_LINE> ne_attr = '__ne__' <NEW_LINE> has_eq = getattr(cls, eq_attr, None) is not getattr(object, eq_attr, None) <NEW_LINE> has_ne = getattr(cls, ne_attr, None) is not getattr(object, ne_attr, None) <NEW_LINE> if has_eq == has_ne: <NEW_LINE> <INDENT> raise ValueError('must define exactly one equality operation: == !=') <NEW_LINE> <DEDENT> elif has_eq: <NEW_LINE> <INDENT> def __ne__(a: object, b: object) -> bool: <NEW_LINE> <INDENT> return not a.__eq__(b) <NEW_LINE> <DEDENT> setattr(cls, ne_attr, __ne__) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> def __eq__(a: object, b: object) -> bool: <NEW_LINE> <INDENT> return not a.__ne__(b) <NEW_LINE> <DEDENT> setattr(cls, eq_attr, __eq__) <NEW_LINE> <DEDENT> return cls
Decorator that fills in the missing equality operation for a class. The missing equality operation (__eq__ or __ne__) is defined as the logical inverse of whichever operation is defined for the decorated class cls.
625941b682261d6c526ab2c7
def unpack_dosdate(self, offset): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> o = self._offset + offset <NEW_LINE> return dosdate(self._buf[o:o + 2], self._buf[o + 2:o + 4]) <NEW_LINE> <DEDENT> except struct.error: <NEW_LINE> <INDENT> raise OverrunBufferException(o, len(self._buf))
Returns a datetime from the DOSDATE and DOSTIME starting at the relative offset. Arguments: - `offset`: The relative offset from the start of the block. Throws: - `OverrunBufferException`
625941b621bff66bcd684779
def read_cash_flow_statements(self, format_value=False): <NEW_LINE> <INDENT> self._serial_printer.print_begin('Reading cash_flow_statements') <NEW_LINE> df = pd.read_csv(self.ys.cash_flow_statements.path) <NEW_LINE> self._serial_printer.print_end('Complete.') <NEW_LINE> if format_value: <NEW_LINE> <INDENT> df = self.format(df) <NEW_LINE> <DEDENT> self.ys.cash_flow_statements = df <NEW_LINE> return self.ys
Method for reading the data from the file. :param format_value: Whether or not to format the data.
625941b6fb3f5b602dac34b2
def connect(self): <NEW_LINE> <INDENT> import sys <NEW_LINE> self.ftp = MyFtp() <NEW_LINE> self.ftp.connect() <NEW_LINE> self.ftp.login() <NEW_LINE> self.ftp.__class__.encoding = sys.getfilesystemencoding()
Метод для соединения с ftp
625941b650812a4eaa59c14a
def parse(self, response): <NEW_LINE> <INDENT> all_urls = response.css("a::attr(href)").extract() <NEW_LINE> all_urls = [urljoin(response.url, url) for url in all_urls] <NEW_LINE> all_urls = filter(lambda x: True if x.startswith("https") else False, all_urls) <NEW_LINE> for url in all_urls: <NEW_LINE> <INDENT> match_obj = re.match("(.*zhihu.com/question/(\d+))(/|$).*?", url) <NEW_LINE> if match_obj: <NEW_LINE> <INDENT> request_url = match_obj.group(1) <NEW_LINE> yield scrapy.Request(request_url, headers=self.headers, callback=self.parse_question) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield scrapy.Request(url, headers=self.headers, callback=self.parse)
提取出html页面的所有url,并跟踪这些url进行一步爬取 如果提取的url中格式为 /question/xxx 就下载之后进入解析函数 :param response: :return:
625941b6be7bc26dc91cd42a
def __init__(self, debugger): <NEW_LINE> <INDENT> super(ThreadManager, self).__init__(debugger) <NEW_LINE> self.parser = Parser()
@type debugger: debugger.Debugger
625941b68e05c05ec3eea195
def xor(a,b): <NEW_LINE> <INDENT> assert len(a) == len(b) <NEW_LINE> x = [] <NEW_LINE> for i in range(len(a)): <NEW_LINE> <INDENT> x.append( chr(ord(a[i])^ord(b[i]))) <NEW_LINE> <DEDENT> return ''.join(x)
XOR two strings of same length
625941b655399d3f055884d7
def SetDirty(self, name): <NEW_LINE> <INDENT> self.__dict__['__dirty'].append(name)
Mark a flag as dirty so that accessing it will case a reparse.
625941b6293b9510aa2c30bd
def write(self, data_set, io_manager, location, write_metadata=True): <NEW_LINE> <INDENT> raise NotImplementedError
Write the DataSet to storage. Subclasses must override this method. It is up to the Formatter to decide when to overwrite completely, and when to just append or otherwise update the file(s). Args: data_set (DataSet): the data we are writing. io_manager (io_manager): base physical location to write to. location (str): the file location within the io_manager. write_metadata (bool): if True, then the metadata is written to disk
625941b6b57a9660fec336a4
def build(self,l,r,arr): <NEW_LINE> <INDENT> if(l>r): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if(l==r): <NEW_LINE> <INDENT> return TreeNode(arr[l],l,r) <NEW_LINE> <DEDENT> m = (l+r)//2 <NEW_LINE> temp = TreeNode(None,l,r) <NEW_LINE> temp.left = self.build(l,m,arr) <NEW_LINE> temp.right = self.build(m+1,r,arr) <NEW_LINE> temp.summ = temp.left.summ + temp.right.summ <NEW_LINE> return temp
TC: O(n)
625941b6c4546d3d9de72854
def __init__(self, email, principal=None): <NEW_LINE> <INDENT> body = M.GetDelegate( {'IncludePermissions': 'true'}, M.Mailbox(T.EmailAddress(email)) ) <NEW_LINE> kwargs = {} <NEW_LINE> if principal: <NEW_LINE> <INDENT> kwargs['impersonation'] = principal <NEW_LINE> <DEDENT> super().__init__(body, **kwargs)
Initialize the request. :param email: Email address of the user to query.
625941b6046cf37aa974cb6f
def experiment_info(observer): <NEW_LINE> <INDENT> now = datetime.datetime.now() <NEW_LINE> date = now.strftime("%Y-%m-%d %H:%M") <NEW_LINE> my_dlg = gui.Dlg(title="Go No-Go") <NEW_LINE> my_dlg.addText('Subject info') <NEW_LINE> my_dlg.addField('ID:') <NEW_LINE> my_dlg.addField('Age:') <NEW_LINE> my_dlg.addField('Sex:', choices=['MALE', "FEMALE"]) <NEW_LINE> my_dlg.addText('Observer info') <NEW_LINE> my_dlg.addField('Observer:', observer) <NEW_LINE> my_dlg.show() <NEW_LINE> if not my_dlg.OK: <NEW_LINE> <INDENT> exit(1) <NEW_LINE> <DEDENT> return my_dlg.data[0], my_dlg.data[2], my_dlg.data[1], my_dlg.data[3], date
okienko dialogowe na podczas uruchomienia procedury :param observer: observer_id :return: part_id, observer_id, date
625941b69c8ee82313fbb599
def get_delta_fmt(delta): <NEW_LINE> <INDENT> col = (0, 0, 0, 255) <NEW_LINE> n = abs(delta) <NEW_LINE> s = delta <NEW_LINE> if delta < 0: <NEW_LINE> <INDENT> sat = min(n/200 + 0.2, 1) <NEW_LINE> r, g, b = hsv_to_rgb(0, sat, 1) <NEW_LINE> col = (r, g, b, 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> s = "+"+str(n) <NEW_LINE> sat = min(n/100 + 0.2, 1) <NEW_LINE> r, g, b = hsv_to_rgb(1/3, sat, 1) <NEW_LINE> col = (r, g, b, 1) <NEW_LINE> <DEDENT> return "(" + str(s) + ")", col
arbitrary colour formatting of rank delta more red for bigger losses, more green for bigger gains
625941b6dd821e528d63afd0
def route_count_by_ipv(self, ipv): <NEW_LINE> <INDENT> return len(self.dyn_routes_by_ipv[ipv])
Return route table count for specified IP version on this VLAN.
625941b67d847024c06be0e4
def test_concurrent_count_udp_vanilla(self): <NEW_LINE> <INDENT> if self.socketType != socket.SOCK_DGRAM: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> X = [] <NEW_LINE> for i in self.ephemeral_ports(): <NEW_LINE> <INDENT> sd = self.socket() <NEW_LINE> sd.connect(self.remoteA) <NEW_LINE> self.assertEcho(sd) <NEW_LINE> X.append(sd) <NEW_LINE> <DEDENT> self.assertEqual(len(X), len(self.ephemeral_ports())) <NEW_LINE> sd = self.socket() <NEW_LINE> with self.assertRaises(OSError) as x: <NEW_LINE> <INDENT> sd.connect(self.remoteB) <NEW_LINE> <DEDENT> self.assertEqual(x.exception.errno, errno.EAGAIN)
#ephemeral to first host, EAGAIN later
625941b6cb5e8a47e48b78d4
@pytest.mark.parametrize( "user", [dummy_user_john, dummy_user_jane, dummy_user_alice, dummy_user_bob], ) <NEW_LINE> def test_list_permissions(user, acl_fixture): <NEW_LINE> <INDENT> from fastapi_permissions import list_permissions <NEW_LINE> result = list_permissions(user.principals, acl_fixture) <NEW_LINE> assert result == permission_results[user]
tests the list_permissions function
625941b6f8510a7c17cf9529
def write_config(self): <NEW_LINE> <INDENT> self.filename = "motors.ini" <NEW_LINE> config = SafeConfigParser() <NEW_LINE> if os.path.isfile(self.filename): <NEW_LINE> <INDENT> config.read(self.filename) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> config.add_section('motors') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print("Failed to add, could already exist") <NEW_LINE> <DEDENT> for item in self.menu_list.items(): <NEW_LINE> <INDENT> item_enum_str = str(item[0]).split('.') <NEW_LINE> item_min_str = "{}_MIN".format(item_enum_str[0]) <NEW_LINE> item_mid_str = "{}_MID".format(item_enum_str[0]) <NEW_LINE> item_max_str = "{}_MAX".format(item_enum_str[0]) <NEW_LINE> config.set( 'motors', item_min_str, str(self.core.servos[item[0]][0].servo_min)) <NEW_LINE> config.set( 'motors', item_mid_str, str(self.core.servos[item[0]][0].servo_mid)) <NEW_LINE> config.set( 'motors', item_max_str, str(self.core.servos[item[0]][0].servo_max))
Read the motor defaults from the config file.
625941b6d99f1b3c44c673bc
def codeComplete(self, path, line, column, unsaved_files=None, include_macros=False, include_code_patterns=False, include_brief_comments=False): <NEW_LINE> <INDENT> options = 0 <NEW_LINE> if include_macros: <NEW_LINE> <INDENT> options += 1 <NEW_LINE> <DEDENT> if include_code_patterns: <NEW_LINE> <INDENT> options += 2 <NEW_LINE> <DEDENT> if include_brief_comments: <NEW_LINE> <INDENT> options += 4 <NEW_LINE> <DEDENT> if unsaved_files is None: <NEW_LINE> <INDENT> unsaved_files = [] <NEW_LINE> <DEDENT> unsaved_files_array = 0 <NEW_LINE> if len(unsaved_files): <NEW_LINE> <INDENT> unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))() <NEW_LINE> for i,(name,value) in enumerate(unsaved_files): <NEW_LINE> <INDENT> if hasattr(value, "read"): <NEW_LINE> <INDENT> value = value.read() <NEW_LINE> <DEDENT> unsaved_files_array[i].name = name <NEW_LINE> unsaved_files_array[i].contents = value <NEW_LINE> unsaved_files_array[i].length = len(value) <NEW_LINE> <DEDENT> <DEDENT> ptr = conf.lib.clang_codeCompleteAt(self, path, line, column, unsaved_files_array, len(unsaved_files), options) <NEW_LINE> if ptr: <NEW_LINE> <INDENT> return CodeCompletionResults(ptr) <NEW_LINE> <DEDENT> return None
Code complete in this translation unit. In-memory contents for files can be provided by passing a list of pairs as unsaved_files, the first items should be the filenames to be mapped and the second should be the contents to be substituted for the file. The contents may be passed as strings or file objects.
625941b6ab23a570cc24ffa4
def test_basic_reduction_completes(self): <NEW_LINE> <INDENT> wks = ISISIndirectDiffractionReduction(InputFiles=['IRS26176.RAW'], Instrument='IRIS', Mode='diffspec', SpectraRange=[105, 112]) <NEW_LINE> self.assertTrue(isinstance(wks, WorkspaceGroup), 'Result workspace should be a workspace group.') <NEW_LINE> self.assertEqual(len(wks), 1) <NEW_LINE> self.assertEqual(wks.getNames()[0], 'iris26176_diffspec_red') <NEW_LINE> red_ws = wks[0] <NEW_LINE> self.assertEqual(red_ws.getAxis(0).getUnit().unitID(), 'dSpacing') <NEW_LINE> self.assertEqual(red_ws.getNumberHistograms(), 1)
Sanity test to ensure the most basic reduction actually completes.
625941b63c8af77a43ae35c3
def linearInterpolate(self, other, t): <NEW_LINE> <INDENT> return Vec4(self._rtval.linearInterpolate('Vec4', ks.rtVal('Scalar', t)))
Linearly interpolates this vector with another one based on a scalar blend value (0.0 to 1.0). Args: other (Vec4): vector to blend to. t (float): Blend value. Returns: Vec4: New vector blended between this and the input vector.
625941b66e29344779a6243a
@active_list_required <NEW_LINE> def add_remove_item(request): <NEW_LINE> <INDENT> list_name = lists.get_list_name(request) <NEW_LINE> action = request.POST.get('action', 'add') <NEW_LINE> content_type = request.POST.get('content_type', None) <NEW_LINE> object_id = request.POST.get('object_id', None) <NEW_LINE> ct = ContentType.objects.get(name=content_type) <NEW_LINE> co = ct.get_object_for_this_type(pk=object_id) <NEW_LINE> item_key = lists.make_item_key(co, ct) <NEW_LINE> if action == "add": <NEW_LINE> <INDENT> object_hash = lists.make_object_hash(co) <NEW_LINE> lists.add_item(list_name, item_key, object_hash) <NEW_LINE> <DEDENT> if action == "remove": <NEW_LINE> <INDENT> lists.remove_item(list_name, item_key) <NEW_LINE> <DEDENT> if hasattr(co, 'list_total_field'): <NEW_LINE> <INDENT> total_field = getattr(co, co.list_total_field) <NEW_LINE> list_total = lists.make_total(list_name, action, total_field) <NEW_LINE> <DEDENT> t = select_template(["blocks/ahah_list.html",]) <NEW_LINE> c = RequestContext(request, RequestContext(request)) <NEW_LINE> html = t.render(c) <NEW_LINE> if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest': <NEW_LINE> <INDENT> return HttpResponse(json.dumps( { 'total' : list_total, 'html' : html, 'action' : action, 'list_item_id' : item_key, })) <NEW_LINE> <DEDENT> res = HttpResponseRedirect(request.META['HTTP_REFERER']) <NEW_LINE> return res
Expects POST data with the following values: * content_type * object_id Only content types (models) with a valid `LIST_ENABLED` attribute will be allowed
625941b64527f215b584c280
def set_background(self, background=None): <NEW_LINE> <INDENT> if background is not None: <NEW_LINE> <INDENT> self.background.update(background) <NEW_LINE> keys = self.background.keys() <NEW_LINE> if not background: <NEW_LINE> <INDENT> self.background = {} <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> background = self.background <NEW_LINE> keys = self.background.keys() <NEW_LINE> <DEDENT> for channelidx in keys: <NEW_LINE> <INDENT> bgcolor = background.get(channelidx, None) <NEW_LINE> groupidx = self.get_groupidx(channelidx) <NEW_LINE> if groupidx is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> group = self.get_group(groupidx) <NEW_LINE> channel = self.get_channel(channelidx) <NEW_LINE> index = self.index(channel.row(), 0, parent=group.index) <NEW_LINE> index1 = self.index(channel.row(), 1, parent=group.index) <NEW_LINE> if index.isValid(): <NEW_LINE> <INDENT> item = index.internalPointer() <NEW_LINE> item.bgcolor = bgcolor <NEW_LINE> self.dataChanged.emit(index, index1)
Set the background of some channels. The argument is a dictionary channelidx ==> color index.
625941b624f1403a9260098e
def remove_group_user(self, u_name, c_name): <NEW_LINE> <INDENT> return self.remove_group(u_name, 'user', c_name)
Args: u_name: user to removed the group from c_name: group to remove Return: ``True`` if the group was removed from the profile ``False`` otherwise
625941b6d486a94d0b98df73
def test_properties(self): <NEW_LINE> <INDENT> for mode in ADDRESSING_MODE_TABLE: <NEW_LINE> <INDENT> name, syntax, formatter = mode <NEW_LINE> addrmode = get_addressing_mode(name) <NEW_LINE> self.assertIsNotNone(addrmode) <NEW_LINE> self.assertEqual(addrmode.name, name.strip()) <NEW_LINE> self.assertEqual(addrmode.syntax, syntax.strip()) <NEW_LINE> if formatter: <NEW_LINE> <INDENT> self.assertEqual(addrmode.formatter.__name__, formatter.__func__.__name__) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.assertIsNone(addrmode.formatter)
Test AbstractAddressingMode for its properties.
625941b60383005118ecf409
def open(self): <NEW_LINE> <INDENT> console.output("The doors seem to be sealed shut. " "Hopefully you'll be able to open them when you leave.") <NEW_LINE> return False
The doors will refuse to open.
625941b6d7e4931a7ee9dd40
def _closest_file(self, file_name='settings.py', path='.', prev_path=None): <NEW_LINE> <INDENT> if path == prev_path: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> path = os.path.abspath(path) <NEW_LINE> settings_file = os.path.join(path, file_name) <NEW_LINE> if os.path.exists(settings_file): <NEW_LINE> <INDENT> return settings_file <NEW_LINE> <DEDENT> return self._closest_file(file_name=file_name, path=os.path.dirname(path), prev_path=path)
return the path of the closest settings.py file :param file_name: default get file name :param path: :param prev_path: :return:
625941b6baa26c4b54cb0f48
def __init__(self, **kwargs): <NEW_LINE> <INDENT> self.etatRequisCibleDirect = kwargs.get('etat_requis', "").split("|") <NEW_LINE> if self.etatRequisCibleDirect[-1] == "": <NEW_LINE> <INDENT> self.etatRequisCibleDirect = [] <NEW_LINE> <DEDENT> self.etatRequisCibles = kwargs.get('etat_requis_cibles', "").split("|") <NEW_LINE> if self.etatRequisCibles[-1] == "": <NEW_LINE> <INDENT> self.etatRequisCibles = [] <NEW_LINE> <DEDENT> self.etatRequisLanceur = kwargs.get('etat_requis_lanceur', "").split("|") <NEW_LINE> if self.etatRequisLanceur[-1] == "": <NEW_LINE> <INDENT> self.etatRequisLanceur = [] <NEW_LINE> <DEDENT> self.consommeEtat = kwargs.get('consomme_etat', False) <NEW_LINE> self.ciblesPossibles = kwargs.get( 'cibles_possibles', "Allies|Ennemis|Lanceur").split("|") <NEW_LINE> self.ciblesExclues = kwargs.get('cibles_exclues', "").split("|") <NEW_LINE> self.ciblesPossiblesDirect = kwargs.get('cibles_possibles_direct', "|".join(self.ciblesPossibles)).split("|") <NEW_LINE> self.cibleNonRequise = kwargs.get('cible_non_requise', False) <NEW_LINE> if kwargs.get('zone', None) is not None: <NEW_LINE> <INDENT> self.typeZone = kwargs.get('zone', Zones.TypeZoneCercle(0)) <NEW_LINE> del kwargs["zone"] <NEW_LINE> kwargs["typeZone"] = self.typeZone.__class__.__name__.replace("TypeZone", "") <NEW_LINE> kwargs["tailleZone"] = int(self.typeZone.zonePO) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.typeZone = Zones.TypeZone.getZoneFromName(kwargs.get("typeZone", "Cercle"), kwargs.get("tailleZone", 0)) <NEW_LINE> <DEDENT> self.pile = kwargs.get("pile", True) <NEW_LINE> self.kwargs = kwargs
@summary: Initialise un Effet. @kwargs: Options de l'effets , possibilitées: etat_requis (string séparé par |, aucun par défaut), etat_requis_cibles (string séparé par |, aucun par défaut), etat_requis_lanceur (string séparé par |, aucun par défaut), consomme_etat (booléen, Faux par défaut), cibles_possibles (string, "Allies|Ennemis|Lanceur" par défaut) cibles_exclues (string, aucune par défaut) cible_non_requise (booléen, Faux par défaut). Indique si l'effet peut être lancé s'il n'a pas de cible direct (autrement dit si le sort est lancé sur une case vide). zone (Zone, Zones.TypeZoneCercle(0) par défaut = sort mono cible) @type: **kwargs
625941b69b70327d1c4e0bf8
def calc_statistics(self): <NEW_LINE> <INDENT> self._ydata = [] <NEW_LINE> for patt_ind in range(len(self._rb_pattern)): <NEW_LINE> <INDENT> self._ydata.append({}) <NEW_LINE> self._ydata[-1]['mean'] = np.mean(self._raw_data[patt_ind], 0) <NEW_LINE> if len(self._raw_data[patt_ind]) == 1: <NEW_LINE> <INDENT> self._ydata[-1]['std'] = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._ydata[-1]['std'] = np.std(self._raw_data[patt_ind], 0)
Extract averages and std dev from the raw data (self._raw_data). Assumes that self._calc_data has been run. Output into internal _ydata variable. ydata is a list of dictionaries (length number of patterns). Dictionary ydata[i]: * ydata[i]['mean'] is a numpy_array of length n; entry j of this array contains the mean probability of success over seeds, for vector length self._cliff_lengths[i][j]. * ydata[i]['std'] is a numpy_array of length n; entry j of this array contains the std of the probability of success over seeds, for vector length self._cliff_lengths[i][j].
625941b6c4546d3d9de72855
def _get_processor(v): <NEW_LINE> <INDENT> if context.executing_eagerly(): <NEW_LINE> <INDENT> if isinstance(v, ops.Tensor): <NEW_LINE> <INDENT> return _TensorProcessor(v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return _DenseResourceVariableProcessor(v) <NEW_LINE> <DEDENT> <DEDENT> if resource_variable_ops.is_resource_variable(v) and not v._in_graph_mode: <NEW_LINE> <INDENT> return _DenseResourceVariableProcessor(v) <NEW_LINE> <DEDENT> if v.op.type == "VarHandleOp": <NEW_LINE> <INDENT> return _DenseResourceVariableProcessor(v) <NEW_LINE> <DEDENT> if isinstance(v, variables.Variable): <NEW_LINE> <INDENT> return _RefVariableProcessor(v) <NEW_LINE> <DEDENT> if isinstance(v, ops.Tensor): <NEW_LINE> <INDENT> return _TensorProcessor(v) <NEW_LINE> <DEDENT> raise NotImplementedError("Trying to optimize unsupported type ", v)
The processor of v.
625941b66aa9bd52df036bc7
def is_unique(self) -> bool: <NEW_LINE> <INDENT> return (self.get_civ_id() != 0 and len(self.civ_lines) == 0 and self.get_enabling_research_id() > -1)
Groups are unique if they belong to a specific civ. :returns: True if the civ id is not Gaia's and no alternative lines for this unit line exist.
625941b6b545ff76a8913c44
def getCardTestStatesDates(cards, tests, attempts): <NEW_LINE> <INDENT> numTests = len(tests) <NEW_LINE> testsToInd = {} <NEW_LINE> for i in xrange(numTests): <NEW_LINE> <INDENT> testsToInd[tests[i].pk] = i <NEW_LINE> <DEDENT> state = {} <NEW_LINE> for card in cards: <NEW_LINE> <INDENT> state[card.pk] = [(0, 0)] * numTests <NEW_LINE> <DEDENT> for attempt in attempts: <NEW_LINE> <INDENT> if not attempt.revoked: <NEW_LINE> <INDENT> testInd = testsToInd[attempt.test_type_id]; <NEW_LINE> if not attempt.num_failed == 0: <NEW_LINE> <INDENT> if state[attempt.card_id][testInd][1] == 0: <NEW_LINE> <INDENT> state[attempt.card_id][testInd] = (2, attempt.date_tested) <NEW_LINE> <DEDENT> elif state[attempt.card_id][testInd][1] < attempt.date_tested: <NEW_LINE> <INDENT> state[attempt.card_id][testInd] = (2, attempt.date_tested) <NEW_LINE> <DEDENT> <DEDENT> elif not attempt.num_passed == 0 and state[attempt.card_id][testInd][0] == 0: <NEW_LINE> <INDENT> if state[attempt.card_id][testInd][1] == 0: <NEW_LINE> <INDENT> state[attempt.card_id][testInd] = (1, attempt.date_tested) <NEW_LINE> <DEDENT> elif state[attempt.card_id][testInd][1] < attempt.date_tested: <NEW_LINE> <INDENT> state[attempt.card_id][testInd] = (1, attempt.date_tested) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> cardStat = [] <NEW_LINE> for i in xrange(len(cards)): <NEW_LINE> <INDENT> card = cards[i] <NEW_LINE> curFail = [] <NEW_LINE> curPass = [] <NEW_LINE> curRem = [] <NEW_LINE> tempDict = {} <NEW_LINE> curState = state[card.pk] <NEW_LINE> for i in xrange(numTests): <NEW_LINE> <INDENT> if curState[i][0] == 0: <NEW_LINE> <INDENT> curRem.append((tests[i].name, curState[i][1])) <NEW_LINE> <DEDENT> elif curState[i][0] == 1: <NEW_LINE> <INDENT> curPass.append((tests[i].name, curState[i][1])) <NEW_LINE> <DEDENT> elif curState[i][0] == 2: <NEW_LINE> <INDENT> curFail.append((tests[i].name, curState[i][1])) <NEW_LINE> <DEDENT> <DEDENT> tempDict['barcode'] = card.barcode <NEW_LINE> tempDict['failed'] = curFail <NEW_LINE> tempDict['passed'] = curPass <NEW_LINE> tempDict['remaining'] = curRem <NEW_LINE> cardStat.append(tempDict) <NEW_LINE> <DEDENT> return cardStat
This function returns an array of cards and tests based on passes or fails
625941b630dc7b766590178f
def test_options(self): <NEW_LINE> <INDENT> class api: <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def is_production_mode(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> o = self.cls(api) <NEW_LINE> o.finalize() <NEW_LINE> assert type(o.options) is NameSpace <NEW_LINE> assert len(o.options) == 1 <NEW_LINE> options = ('target', 'files*') <NEW_LINE> ns = self.get_instance(options=options).options <NEW_LINE> assert type(ns) is NameSpace <NEW_LINE> assert len(ns) == len(options) + 1 <NEW_LINE> assert list(ns) == ['target', 'files', 'version'] <NEW_LINE> assert type(ns.target) is parameters.Str <NEW_LINE> assert type(ns.files) is parameters.Str <NEW_LINE> assert ns.target.required is True <NEW_LINE> assert ns.target.multivalue is False <NEW_LINE> assert ns.files.required is False <NEW_LINE> assert ns.files.multivalue is True
Test the ``ipalib.frontend.Command.options`` instance attribute.
625941b6627d3e7fe0d68c73
def update(self, volume_type, specs, **kwargs): <NEW_LINE> <INDENT> raise NotImplementedError()
Update the encryption type information for the specified volume type. :param volume_type: the volume type whose encryption type information must be updated :param specs: the encryption type specifications to update :return: an instance of :class: VolumeEncryptionType
625941b68e7ae83300e4adf0
def _find_component(graph, vertex, component): <NEW_LINE> <INDENT> for n in graph.neighbors(vertex): <NEW_LINE> <INDENT> if n not in component: <NEW_LINE> <INDENT> component.add(n) <NEW_LINE> _find_component(graph, n, component)
Function to aid in finding all vertices in the same component as vertex. Args: graph: Graph made up of vertices and edges. vertex: Single vertex in graph. component: Component containing vertex.
625941b6711fe17d825421a1
def list(self, bucket, prefix=None, marker=None, limit=None, delimiter=None): <NEW_LINE> <INDENT> options = { 'bucket': bucket, } <NEW_LINE> if marker is not None: <NEW_LINE> <INDENT> options['marker'] = marker <NEW_LINE> <DEDENT> if limit is not None: <NEW_LINE> <INDENT> options['limit'] = limit <NEW_LINE> <DEDENT> if prefix is not None: <NEW_LINE> <INDENT> options['prefix'] = prefix <NEW_LINE> <DEDENT> if delimiter is not None: <NEW_LINE> <INDENT> options['delimiter'] = delimiter <NEW_LINE> <DEDENT> url = 'http://{0}/list'.format(config.get_default('default_rsf_host')) <NEW_LINE> ret, info = self.__get(url, options) <NEW_LINE> eof = False <NEW_LINE> if ret and not ret.get('marker'): <NEW_LINE> <INDENT> eof = True <NEW_LINE> <DEDENT> return ret, eof, info
前缀查询: 1. 首次请求 marker = None 2. 无论 err 值如何,均应该先看 ret.get('items') 是否有内容 3. 如果后续没有更多数据,err 返回 EOF,marker 返回 None(但不通过该特征来判断是否结束) 具体规格参考: http://developer.qiniu.com/docs/v6/api/reference/rs/list.html Args: bucket: 空间名 prefix: 列举前缀 marker: 列举标识符 limit: 单次列举个数限制 delimiter: 指定目录分隔符 Returns: 一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"} 一个ReponseInfo对象 一个EOF信息。
625941b663f4b57ef0000f47
def itkScalarImageKmeansImageFilterIUS2IUS2_cast(*args): <NEW_LINE> <INDENT> return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2IUS2_cast(*args)
itkScalarImageKmeansImageFilterIUS2IUS2_cast(itkLightObject obj) -> itkScalarImageKmeansImageFilterIUS2IUS2
625941b656ac1b37e6264005
def length(data_hex): <NEW_LINE> <INDENT> n = len(bytes.fromhex(data_hex)) <NEW_LINE> if n >= 0 and n <= 127: <NEW_LINE> <INDENT> return '0' + '{0:07b}'.format(n) <NEW_LINE> <DEDENT> elif n >= 128 and n <= 255: <NEW_LINE> <INDENT> return '10000001' + '{0:08b}'.format(n) <NEW_LINE> <DEDENT> elif n >= 256 and n <= 65535: <NEW_LINE> <INDENT> return '10000010' + '{0:016b}'.format(n) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("ERROR: Data too long.") <NEW_LINE> sys.exit(1)
Retorna o comprimento, em bytes, de uma string hexdecimal. O comprimento (n) é devolvido em formato binário (b), tal que: - se 0 <= n <= 127, então b = 0[bin(n,7)] - se 128 <= n <= 255, então b = 10000001 [bin(n,8)] - se 256 <= n <= 65535, então b = 10000010 [bin(n,16)] - senão, exceção Note-se que [bin(x,k)] representa o valor de x em binário, com k bits. Parâmetros ---------- data_hex : str string com dados, no formato hexadecimal, cujo comprimento em bytes se pretende obter Retorna ------- length_bin : int comprimento de 'data_hex' em bytes, em formato binário Levanta ------- Exception se comprimento dos dados é superior a 65535
625941b62ae34c7f2600cf57
def deserialize(self, data): <NEW_LINE> <INDENT> self.index = 0 <NEW_LINE> return self.parse_deserialize(data)
Decodes your encoded data to tree. :type data: str :rtype: TreeNode
625941b6e1aae11d1e749ad8
def get_summoners_by_name(names): <NEW_LINE> <INDENT> summoners = cassiopeia.core.requests.data_store.get(cassiopeia.type.core.summoner.Summoner, names, "name") <NEW_LINE> missing = [] <NEW_LINE> loc = [] <NEW_LINE> for i in range(len(names)): <NEW_LINE> <INDENT> if not summoners[i]: <NEW_LINE> <INDENT> missing.append(names[i]) <NEW_LINE> loc.append(i) <NEW_LINE> <DEDENT> <DEDENT> if not missing: <NEW_LINE> <INDENT> return summoners <NEW_LINE> <DEDENT> new = cassiopeia.core.requests.call_with_ensured_size(cassiopeia.dto.summonerapi.get_summoners_by_name, 40, missing) <NEW_LINE> to_store = [] <NEW_LINE> for i in range(len(missing)): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> summoner = cassiopeia.type.core.summoner.Summoner(new[__standardize(missing[i])]) <NEW_LINE> to_store.append(summoner) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> summoner = None <NEW_LINE> <DEDENT> summoners[loc[i]] = summoner <NEW_LINE> <DEDENT> cassiopeia.core.requests.data_store.store(to_store, [summoner.id for summoner in to_store]) <NEW_LINE> cassiopeia.core.requests.data_store.store(to_store, [summoner.name for summoner in to_store]) <NEW_LINE> return summoners
Gets a bunch of summoners by name names list<str> the names of the summoners return list<Summoner> the summoners
625941b607d97122c41786b0
def _ratio_enum(anchor, ratios): <NEW_LINE> <INDENT> w, h, x_ctr, y_ctr = _whctrs(anchor) <NEW_LINE> size = w * h <NEW_LINE> size_ratios = size / ratios <NEW_LINE> hs = np.round(np.sqrt(size_ratios)) <NEW_LINE> ws = np.round(hs * ratios) <NEW_LINE> anchors = _mkanchors(ws, hs, x_ctr, y_ctr) <NEW_LINE> return anchors
Enumerate a set of anchors for each aspect ratio wrt an anchor.
625941b663d6d428bbe44314
def __init__(self, *nested_rules): <NEW_LINE> <INDENT> def _onerror(): <NEW_LINE> <INDENT> return abort(500) <NEW_LINE> <DEDENT> Rule.__init__(self, vfunc=is_request(), errfunc=_onerror, nested_rules=list(nested_rules))
Constructs a new Rule for validating requests. Any nested rules needed for validating parts of the request (such as headers, query string params, etc) should also be passed in. :param nested_rules: Any sub rules that also should be used for validation
625941b6fff4ab517eb2f25e
def transform(self, X): <NEW_LINE> <INDENT> return self.onehot.transform([[x] for x in self.label.transform(X)])
Transform using fitted label encoder and onehot encoder
625941b657b8e32f524832c5
def testGroups(self): <NEW_LINE> <INDENT> pass
Test Groups
625941b6507cdc57c6306af8
def get_service_class(service_name): <NEW_LINE> <INDENT> name = service_name.capitalize() <NEW_LINE> if name not in __services: <NEW_LINE> <INDENT> service_module = get_module_for_service(service_name) <NEW_LINE> cls = _get_service_class(service_module) <NEW_LINE> if cls is None or cls[0][0:-len(SERVICE)] != name: <NEW_LINE> <INDENT> raise ImportError(ERR_UNEXPECTED_SERVICE_CLASS_IN_MODULE.format(name, service_module, cls[0] if cls else "None")) <NEW_LINE> <DEDENT> __services[name] = cls <NEW_LINE> <DEDENT> return __services[name][1]
Gets the class that implements a service :param service_name: Name of the service :return: Class that implements the service
625941b6097d151d1a222c81
def random(self): <NEW_LINE> <INDENT> high, low = unpack('II', rand_bytes(8)) <NEW_LINE> high >>= 5 <NEW_LINE> low >>= 6 <NEW_LINE> return (high * 67108864.0 + low) * (1.0 / 9007199254740992.0)
Return a random float in the half-open interval [0, 1). Copied from Python's _randommodule.c. Returns ------- x : float
625941b632920d7e50b27ff1
def boundingRect(self): <NEW_LINE> <INDENT> extra = self._halfLength / 2.0 <NEW_LINE> return QRectF(self._origin, QSizeF(self._end.x() - self._origin.x(), self._end.y() - self._origin.y())) .normalized() .adjusted(-extra, -extra, extra, extra)
Public method to return the bounding rectangle. @return bounding rectangle (QRectF)
625941b696565a6dacc8f4fa
def vector_mean(vectors): <NEW_LINE> <INDENT> n = len(vectors) <NEW_LINE> return scalar_multiply(1/n, vector_sum(vectors))
compute the vector whose ith element is the mean of the ith elements of the input vectors
625941b6187af65679ca4f42
def self_destruct(self): <NEW_LINE> <INDENT> for bkfobj in self.bkup_file_objs: <NEW_LINE> <INDENT> bkfobj.self_destruct()
Delete all files associated with this instance
625941b6379a373c97cfa970
def deleteExistingBadDataTrainingFolder(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> path = 'Training_Raw_files_validated/' <NEW_LINE> if os.path.isdir(path + 'Bad_Raw/'): <NEW_LINE> <INDENT> shutil.rmtree(path + 'Bad_Raw/') <NEW_LINE> file = open("Training_Logs/GeneralLog.txt", 'a+') <NEW_LINE> self.logger.log(file,"BadRaw directory deleted before starting validation!!!") <NEW_LINE> file.close() <NEW_LINE> <DEDENT> <DEDENT> except OSError as s: <NEW_LINE> <INDENT> file = open("Training_Logs/GeneralLog.txt", 'a+') <NEW_LINE> self.logger.log(file,"Error while Deleting Directory : %s" %s) <NEW_LINE> file.close() <NEW_LINE> raise OSError
Method Name: deleteExistingBadDataTrainingFolder Description: This method deletes the directory made to store the bad Data. Output: None On Failure: OSError
625941b626068e7796caeafd
def forward(self, pred, target, normalize=False): <NEW_LINE> <INDENT> if normalize: <NEW_LINE> <INDENT> target = 2 * target - 1 <NEW_LINE> pred = 2 * pred - 1 <NEW_LINE> <DEDENT> dist = self.model.forward_pair(target, pred) <NEW_LINE> return dist
Pred and target are Variables. If normalize is on, assumes the images are between [0,1] and then scales thembetween [-1, 1] If normalize is false, assumes the images are already between [-1,+1] Inputs pred and target are Nx3xHxW Output pytorch Variable N long
625941b60c0af96317bb800f
def __setChildDict(self, child): <NEW_LINE> <INDENT> d = self[self._name] <NEW_LINE> d[child.getName()] = child.getDict()
私有方法来设置子对象的'child'的字典在内部字典
625941b6090684286d50eb05
def __get_special_fake_value(self, name, func_name, inner_type_index): <NEW_LINE> <INDENT> if func_name == 'pylist': <NEW_LINE> <INDENT> temp_list = list() <NEW_LINE> for i in range(self.list_value_count): <NEW_LINE> <INDENT> temp_list.append(self.get_optimum_fake_value(name, inner_type_index)) <NEW_LINE> <DEDENT> return temp_list <NEW_LINE> <DEDENT> elif func_name == 'object_id': <NEW_LINE> <INDENT> return str(ObjectId()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
获取特殊mock值
625941b6004d5f362079a15d
@events.route(namespace + '/venues', methods=['GET']) <NEW_LINE> def get_venues(): <NEW_LINE> <INDENT> venues = queries.random_venues(10) <NEW_LINE> venues = [venue_schema.dump(v).data for v in venues] <NEW_LINE> result = { 'success': True, 'data': { 'venues': venues } } <NEW_LINE> return jsonify(result)
Grab a sample list of venues
625941b67047854f462a1233
def calculate_variance_impurity(positives, negatives): <NEW_LINE> <INDENT> total = positives + negatives <NEW_LINE> if total == 0: <NEW_LINE> <INDENT> return 0.0 <NEW_LINE> <DEDENT> positive_ratio = positives / total <NEW_LINE> negative_ratio = negatives / total <NEW_LINE> result = round(positive_ratio * negative_ratio, 5) <NEW_LINE> return result
Calculate the variance impurity for data given positives and negatives Parameters ---------- positives: int Number of rows with positive attributes negatives Number of rows with negative attributes Returns ------- float
625941b6099cdd3c635f0a82
def test_lines(self): <NEW_LINE> <INDENT> filename = join_data_path('pagelist-lines.txt') <NEW_LINE> site = self.get_site() <NEW_LINE> titles = list(pagegenerators.TextfilePageGenerator(filename, site)) <NEW_LINE> self.assertEqual(len(titles), len(self.expected_titles)) <NEW_LINE> expected_titles = [ expected_title[self.title_columns[site.namespaces[page.namespace()].case]] for expected_title, page in zip(self.expected_titles, titles)] <NEW_LINE> self.assertPageTitlesEqual(titles, expected_titles)
Test TextfilePageGenerator with newlines.
625941b6283ffb24f3c55732
def gaussian(a, b, s): <NEW_LINE> <INDENT> return a * np.exp(-b * s**2)
General Gaussian
625941b6fbf16365ca6f5fe1
def __init__(self, name, aliases=None): <NEW_LINE> <INDENT> if not isinstance(name, str): <NEW_LINE> <INDENT> raise ValueError('Field name must be a string') <NEW_LINE> <DEDENT> if aliases is not None: <NEW_LINE> <INDENT> name = aliases.get(name, name) <NEW_LINE> <DEDENT> if self.PICK_SEP in name: <NEW_LINE> <INDENT> self._name, self._subname = name.split(self.PICK_SEP) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._name, self._subname = name, None
Create from field name. :param name: Field name. :type name: str :param aliases: Aliases for all fields :type aliases: dict :raise: ValueError for non-string name
625941b66e29344779a6243b
def forward(self, x): <NEW_LINE> <INDENT> x = self.fc(x) <NEW_LINE> x = self.dropout(x) <NEW_LINE> x = x.view(-1, self.conv_dim*4, 4, 4) <NEW_LINE> x = F.relu(self.t_conv1(x)) <NEW_LINE> x = F.relu(self.t_conv2(x)) <NEW_LINE> x = F.tanh(self.t_conv3(x)) <NEW_LINE> return x
Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output
625941b6d6c5a10208143e6c
def classMerge(data,y): <NEW_LINE> <INDENT> crtab = pd.crosstab(data, y, margins=True).drop('All') <NEW_LINE> crtab.columns = ['good', 'bad', 'total'] <NEW_LINE> crtab['bad_ratio'] = crtab['bad']/crtab['total'] <NEW_LINE> kmeans = KMeans(n_clusters=5, random_state=0).fit(crtab[['good','bad','bad_ratio']]) <NEW_LINE> dict_class = dict(zip(crtab.index.tolist(),kmeans.labels_)) <NEW_LINE> return dict_class
kmeans merge variable :param data: :param i: :param y: :return: dict_class
625941b6fff4ab517eb2f25f
def test_no_coalitions(self): <NEW_LINE> <INDENT> from nuorisovaalit.results import dhont_selection <NEW_LINE> district = self._populate(quota=3) <NEW_LINE> self._add_votes(district.candidates[0], 2) <NEW_LINE> self._add_votes(district.candidates[1], 3) <NEW_LINE> self._add_votes(district.candidates[2], 2) <NEW_LINE> self._add_votes(district.candidates[3], 4) <NEW_LINE> self._add_votes(district.candidates[4], 1) <NEW_LINE> self._add_votes(district.candidates[5], 2) <NEW_LINE> self._add_votes(district.candidates[6], 4) <NEW_LINE> self._add_votes(district.candidates[7], 3) <NEW_LINE> winners = dhont_selection(district) <NEW_LINE> self.assertEquals(8, len(winners)) <NEW_LINE> self.assertOrdering( district.candidates[2], district.candidates[6], district.candidates[5], district.candidates[7], district.candidates[0], district.candidates[4], district.candidates[3], district.candidates[1], ) <NEW_LINE> self.assertCandidate(winners[0], { 'name': u'6, Candidate', 'proportional_votes': Decimal('7'), 'absolute_votes': 4, }) <NEW_LINE> self.assertCandidate(winners[1], { 'name': u'3, Candidate', 'proportional_votes': Decimal('6'), 'absolute_votes': 4, }) <NEW_LINE> self.assertCandidate(winners[2], { 'name': u'1, Candidate', 'proportional_votes': Decimal('5'), 'absolute_votes': 3, }) <NEW_LINE> self.assertCandidate(winners[3], { 'name': u'5, Candidate', 'proportional_votes': Decimal('3.5'), 'absolute_votes': 2, }) <NEW_LINE> self.assertCandidate(winners[4], { 'name': u'2, Candidate', 'proportional_votes': Decimal('3'), 'absolute_votes': 2, }) <NEW_LINE> self.assertCandidate(winners[5], { 'name': u'7, Candidate', 'proportional_votes': Decimal('3'), 'absolute_votes': 3, }) <NEW_LINE> self.assertCandidate(winners[6], { 'name': u'0, Candidate', 'proportional_votes': Decimal('2.5'), 'absolute_votes': 2, }) <NEW_LINE> self.assertCandidate(winners[7], { 'name': u'4, Candidate', 'proportional_votes': Decimal('7') / Decimal('3'), 'absolute_votes': 1, })
Test the results when there are no coalitions.
625941b6adb09d7d5db6c5b9
def get_dataset(name: str, root: str, waveform_transform=None, utterance_transform=None, *args) -> Union[LIBRISPEECH, Numbers]: <NEW_LINE> <INDENT> if name == "librispeech": <NEW_LINE> <INDENT> return LIBRISPEECH(root=root, waveform_transform=waveform_transform, utterance_transform=utterance_transform, *args) <NEW_LINE> <DEDENT> elif name == "numbers": <NEW_LINE> <INDENT> return Numbers(root=root, waveform_transform=waveform_transform, utterance_transform=utterance_transform) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"Unknown dataset name: \"{name}\"")
Get dataset by its name :param name: One of {"librispeech", "numbers"} :param root: :param waveform_transform: audiomentations transform for waveform :param utterance_transform: transform for utterance :return: either LIBRISPEECH or Numbers dataset
625941b676d4e153a657e955
def peek_active_groups(self): <NEW_LINE> <INDENT> bytes = self._peek_active_groups_func(self.alprstream_pointer) <NEW_LINE> results = _convert_bytes_to_json(bytes) <NEW_LINE> return results
Check the grouping list for active groups (but don't remove any entries from the grouping queue). :return list results: All currently active groups.
625941b6a8ecb033257d2efc
def add_attachment(self, attach_id, data): <NEW_LINE> <INDENT> if attach_id in self.zf_filelist and attach_id not in self.deleted_files_attachments: <NEW_LINE> <INDENT> raise Exception("Attachment %s already in model." % attach_id) <NEW_LINE> <DEDENT> self.attachments[attach_id] = data <NEW_LINE> if attach_id not in self.modified_attachments: <NEW_LINE> <INDENT> self.modified_attachments.append(attach_id)
Add new attachment to the attachment folder. NOTE: For now only add new files and not new directories. NOTE: You can only add a new attachment, if you delete the old one first. TODO: Change to just overwrite. :param attach_id: :param data: :return:
625941b6925a0f43d2549c99
def set_name(self, name: str) -> None: <NEW_LINE> <INDENT> gdict = self._anydict <NEW_LINE> if len(gdict) == 1: <NEW_LINE> <INDENT> newdict = {name : [*gdict.values()][0]} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> newdict = {} <NEW_LINE> for i, (k,v) in enumerate(gdict.items()): <NEW_LINE> <INDENT> if k == GROUPBY_KEY_PREFIX+'_'+str(i): <NEW_LINE> <INDENT> k = name+'_'+str(i) <NEW_LINE> <DEDENT> newdict[k] = v <NEW_LINE> <DEDENT> <DEDENT> self._set_anydict(newdict)
If the grouping dict contains a single item, rename it. This will make categorical results consistent with groupby results if they've been constructed before being added to a dataset. Ensures that label names are consistent with categorical names. Parameters ---------- name : str The new name to use for the single column in the internal grouping dictionary. Examples -------- Single key Categorical added to a Dataset, grouping picks up name: >>> c = rt.Categorical(['a','a','b','c','a']) >>> print(c.get_name()) None >>> ds = rt.Dataset({'catcol':c}) >>> ds.catcol.sum(rt.arange(5)) *catcol col_0 ------- ----- a 5 b 2 c 3 Multikey Categorical, no names: >>> c = rt.Categorical([rt.FA(['a','a','b','c','a']), rt.FA([1,1,2,3,1])]) >>> print(c.get_name()) None >>> ds = rt.Dataset({'mkcol': c}) >>> ds.mkcol.sum(rt.arange(5)) *mkcol_0 *mkcol_1 col_0 -------- -------- ----- a 1 5 b 2 2 c 3 3 Multikey Categorical, already has names for its columns (names are preserved): >>> arr1 = rt.FA(['a','a','b','c','a']) >>> arr1.set_name('mystrings') >>> arr2 = rt.FA([1,1,2,3,1]) >>> arr2.set_name('myints') >>> c = rt.Categorical([arr1, arr2]) >>> ds = rt.Dataset({'mkcol': c}) >>> ds.mkcol.sum(rt.arange(5)) *mystrings *myints col_0 ---------- ------- ----- a 1 5 b 2 2 c 3 3
625941b623849d37ff7b2eb8
def set_envelope_level4(self, arg0, arg1): <NEW_LINE> <INDENT> return self.sampler.execute(self.set_envelope_level4_cmd, (arg0, arg1, ))
Set Envelope Level 4 (FILTER and AUX only)
625941b6eab8aa0e5d26d984
def transform(line, known_fields=ENRICHED_EVENT_FIELD_TYPES, add_geolocation_data=True): <NEW_LINE> <INDENT> return jsonify_good_event(line.split('\t'), known_fields, add_geolocation_data)
Convert a Snowplow enriched event TSV into a JSON
625941b64d74a7450ccd3fe8
def get_image(self, x, y, width, height): <NEW_LINE> <INDENT> image = pygame.Surface([width, height]).convert() <NEW_LINE> image.blit(self.sprite_sheet, (0, 0), (x, y, width, height)) <NEW_LINE> image.set_colorkey(constants.PURPLE) <NEW_LINE> return image
Grab a single image out of a larger spritesheet Pass in the x, y location of the sprite and the width and height of the sprite.
625941b6fb3f5b602dac34b4
def findSecondMinimumValue(self, root): <NEW_LINE> <INDENT> if not root.right: return None <NEW_LINE> secMin = root.val <NEW_LINE> while root.right.right: <NEW_LINE> <INDENT> root = root.right <NEW_LINE> secMin = root.val <NEW_LINE> <DEDENT> return secMin
:type root: TreeNode :rtype: int
625941b63617ad0b5ed67d25
def err(self,stderr,append=False): <NEW_LINE> <INDENT> return self._init_runner().err(stderr,append)
!Returns a new Runner that is like self in all ways except with a different stderr. @param stderr the stderr filename @param append if True, append to the file, otherwise truncate
625941b6187af65679ca4f43
def max_value(self, brd, alpha, beta, depth): <NEW_LINE> <INDENT> successors = self.get_successors(brd) <NEW_LINE> if depth == 0 or brd.get_outcome() != 0 or len(successors) == 0: <NEW_LINE> <INDENT> return self.heuristic(brd), -1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = -sys.maxsize+1 <NEW_LINE> col = -1 <NEW_LINE> for b in successors: <NEW_LINE> <INDENT> bValue = self.min_value(b[0], alpha, beta, depth-1)[0] <NEW_LINE> if bValue > value: <NEW_LINE> <INDENT> value = bValue <NEW_LINE> col = b[1] <NEW_LINE> <DEDENT> if value >= beta: <NEW_LINE> <INDENT> return value, col <NEW_LINE> <DEDENT> alpha = max(alpha, value) <NEW_LINE> <DEDENT> return value, col
Get the max value
625941b650812a4eaa59c14c
def draw_px_point(self, context): <NEW_LINE> <INDENT> color = (0, 0, 0, 1.0) <NEW_LINE> v1 = [self.center_area.x, self.center_area.y] <NEW_LINE> v2 = [self.mouse_pos.x, self.mouse_pos.y] <NEW_LINE> if self.mouse_pos != Vector([-1, -1]): <NEW_LINE> <INDENT> draw_stippled_line(v1, v2, 0.5, 4, color) <NEW_LINE> if hasattr(self, 'rot_prev'): <NEW_LINE> <INDENT> draw_arrows(v1, v2, 1.5, 12, color, angle_offset=90) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> draw_arrows(v1, v2, 1.5, 12, color)
Draws the handle seen when rotating or scaling
625941b626238365f5f0ec8f
def set_student_choices_list(query,student_choice): <NEW_LINE> <INDENT> index = 0 <NEW_LINE> for choice in student_choice: <NEW_LINE> <INDENT> for internship in query: <NEW_LINE> <INDENT> if internship.organization == choice.organization and internship.speciality == choice.speciality: <NEW_LINE> <INDENT> choice.maximum_enrollments = internship.maximum_enrollments <NEW_LINE> choice.selectable = internship.selectable <NEW_LINE> query[index] = 0 <NEW_LINE> <DEDENT> index += 1 <NEW_LINE> <DEDENT> query = [x for x in query if x != 0] <NEW_LINE> index = 0 <NEW_LINE> <DEDENT> query = [x for x in query if x != 0] <NEW_LINE> return query
Function to set the list of the student's choices Params : query : the list of all internships student_choice : the list of the internships choose by the student Check if the internships and the choice are the same, if yes put the param of the max enrollments and if there are selectable. Then delete the internships in the list of the list of all internships (because it's all ready in the choices list wich it display first)
625941b64d74a7450ccd3fe9
def validate_status_cd(df): <NEW_LINE> <INDENT> val = [x for x in df.status if x not in status_list] <NEW_LINE> if len(val) ==0 : <NEW_LINE> <INDENT> print('No Outliers found in Status Code column.') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Outliers found in Status Code column in rows: {}'.format(val)) <NEW_LINE> <DEDENT> return
Takes up the unique values from status code column in data.
625941b65fdd1c0f98dc0058