body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def compare(self, other, name='self'): 'Compare two objects and return list of differences' if (type(other) != type(self)): return [(('type(' + name) + ')')] diff_list = list() diff_list.extend(super(LamSlotWind, self).compare(other, name=name)) if (other._Ksfill != self._Ksfill): diff_list.append((name + '.Ksfill')) if (((other.winding is None) and (self.winding is not None)) or ((other.winding is not None) and (self.winding is None))): diff_list.append((name + '.winding None mismatch')) elif (self.winding is not None): diff_list.extend(self.winding.compare(other.winding, name=(name + '.winding'))) return diff_list
-6,851,858,460,286,233,000
Compare two objects and return list of differences
pyleecan/Classes/LamSlotWind.py
compare
IrakozeFD/pyleecan
python
def compare(self, other, name='self'): if (type(other) != type(self)): return [(('type(' + name) + ')')] diff_list = list() diff_list.extend(super(LamSlotWind, self).compare(other, name=name)) if (other._Ksfill != self._Ksfill): diff_list.append((name + '.Ksfill')) if (((other.winding is None) and (self.winding is not None)) or ((other.winding is not None) and (self.winding is None))): diff_list.append((name + '.winding None mismatch')) elif (self.winding is not None): diff_list.extend(self.winding.compare(other.winding, name=(name + '.winding'))) return diff_list
def __sizeof__(self): 'Return the size in memory of the object (including all subobject)' S = 0 S += super(LamSlotWind, self).__sizeof__() S += getsizeof(self.Ksfill) S += getsizeof(self.winding) return S
6,686,156,564,589,825,000
Return the size in memory of the object (including all subobject)
pyleecan/Classes/LamSlotWind.py
__sizeof__
IrakozeFD/pyleecan
python
def __sizeof__(self): S = 0 S += super(LamSlotWind, self).__sizeof__() S += getsizeof(self.Ksfill) S += getsizeof(self.winding) return S
def as_dict(self): 'Convert this object in a json seriable dict (can be use in __init__)' LamSlotWind_dict = super(LamSlotWind, self).as_dict() LamSlotWind_dict['Ksfill'] = self.Ksfill if (self.winding is None): LamSlotWind_dict['winding'] = None else: LamSlotWind_dict['winding'] = self.winding.as_dict() LamSlotWind_dict['__class__'] = 'LamSlotWind' return LamSlotWind_dict
-2,586,795,700,191,412,000
Convert this object in a json seriable dict (can be use in __init__)
pyleecan/Classes/LamSlotWind.py
as_dict
IrakozeFD/pyleecan
python
def as_dict(self): LamSlotWind_dict = super(LamSlotWind, self).as_dict() LamSlotWind_dict['Ksfill'] = self.Ksfill if (self.winding is None): LamSlotWind_dict['winding'] = None else: LamSlotWind_dict['winding'] = self.winding.as_dict() LamSlotWind_dict['__class__'] = 'LamSlotWind' return LamSlotWind_dict
def _set_None(self): 'Set all the properties to None (except pyleecan object)' self.Ksfill = None if (self.winding is not None): self.winding._set_None() super(LamSlotWind, self)._set_None()
-6,787,604,911,379,125,000
Set all the properties to None (except pyleecan object)
pyleecan/Classes/LamSlotWind.py
_set_None
IrakozeFD/pyleecan
python
def _set_None(self): self.Ksfill = None if (self.winding is not None): self.winding._set_None() super(LamSlotWind, self)._set_None()
def _get_Ksfill(self): 'getter of Ksfill' return self._Ksfill
6,475,719,411,689,306,000
getter of Ksfill
pyleecan/Classes/LamSlotWind.py
_get_Ksfill
IrakozeFD/pyleecan
python
def _get_Ksfill(self): return self._Ksfill
def _set_Ksfill(self, value): 'setter of Ksfill' check_var('Ksfill', value, 'float', Vmin=0, Vmax=1) self._Ksfill = value
-7,171,397,597,108,831,000
setter of Ksfill
pyleecan/Classes/LamSlotWind.py
_set_Ksfill
IrakozeFD/pyleecan
python
def _set_Ksfill(self, value): check_var('Ksfill', value, 'float', Vmin=0, Vmax=1) self._Ksfill = value
def _get_winding(self): 'getter of winding' return self._winding
2,205,193,926,475,906,000
getter of winding
pyleecan/Classes/LamSlotWind.py
_get_winding
IrakozeFD/pyleecan
python
def _get_winding(self): return self._winding
def _set_winding(self, value): 'setter of winding' if isinstance(value, str): value = load_init_dict(value)[1] if (isinstance(value, dict) and ('__class__' in value)): class_obj = import_class('pyleecan.Classes', value.get('__class__'), 'winding') value = class_obj(init_dict=value) elif ((type(value) is int) and (value == (- 1))): value = Winding() check_var('winding', value, 'Winding') self._winding = value if (self._winding is not None): self._winding.parent = self
4,680,165,766,541,386,000
setter of winding
pyleecan/Classes/LamSlotWind.py
_set_winding
IrakozeFD/pyleecan
python
def _set_winding(self, value): if isinstance(value, str): value = load_init_dict(value)[1] if (isinstance(value, dict) and ('__class__' in value)): class_obj = import_class('pyleecan.Classes', value.get('__class__'), 'winding') value = class_obj(init_dict=value) elif ((type(value) is int) and (value == (- 1))): value = Winding() check_var('winding', value, 'Winding') self._winding = value if (self._winding is not None): self._winding.parent = self
def get_rules(rule_list=None): 'Get the rules governing the snapshot creation\n\n Args:\n rule_list: List of rules\n Returns:\n Rules object with attribute `rules`. See Rules object for detailed doc.\n ' if (not rule_list): rule_list = DEFAULT_RULE_LIST return Rules(rule_list)
2,234,582,819,427,399,000
Get the rules governing the snapshot creation Args: rule_list: List of rules Returns: Rules object with attribute `rules`. See Rules object for detailed doc.
src/ggrc/snapshotter/rules.py
get_rules
MikalaiMikalalai/ggrc-core
python
def get_rules(rule_list=None): 'Get the rules governing the snapshot creation\n\n Args:\n rule_list: List of rules\n Returns:\n Rules object with attribute `rules`. See Rules object for detailed doc.\n ' if (not rule_list): rule_list = DEFAULT_RULE_LIST return Rules(rule_list)
@classmethod def internal_types(cls): 'Return set of internal type names.' return (cls.all - cls.external)
7,308,573,336,677,186,000
Return set of internal type names.
src/ggrc/snapshotter/rules.py
internal_types
MikalaiMikalalai/ggrc-core
python
@classmethod def internal_types(cls): return (cls.all - cls.external)
@classmethod def external_types(cls): 'Return set of external type names.' return cls.external
4,920,418,971,284,829,000
Return set of external type names.
src/ggrc/snapshotter/rules.py
external_types
MikalaiMikalalai/ggrc-core
python
@classmethod def external_types(cls): return cls.external
def __init__(self, publish_key, subscribe_key, secret_key=False, ssl_on=False, origin='pubsub.pubnub.com', pres_uuid=None): "\n #**\n #* Pubnub\n #*\n #* Init the Pubnub Client API\n #*\n #* @param string publish_key required key to send messages.\n #* @param string subscribe_key required key to receive messages.\n #* @param string secret_key optional key to sign messages.\n #* @param boolean ssl required for 2048 bit encrypted messages.\n #* @param string origin PUBNUB Server Origin.\n #* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)\n #**\n\n ## Initiat Class\n pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )\n\n " self.origin = origin self.limit = 1800 self.publish_key = publish_key self.subscribe_key = subscribe_key self.secret_key = secret_key self.ssl = ssl_on if self.ssl: self.origin = ('https://' + self.origin) else: self.origin = ('http://' + self.origin) self.uuid = (pres_uuid or str(uuid.uuid4())) if (not isinstance(self.uuid, basestring)): raise AttributeError('pres_uuid must be a string')
-71,531,499,991,373,630
#** #* Pubnub #* #* Init the Pubnub Client API #* #* @param string publish_key required key to send messages. #* @param string subscribe_key required key to receive messages. #* @param string secret_key optional key to sign messages. #* @param boolean ssl required for 2048 bit encrypted messages. #* @param string origin PUBNUB Server Origin. #* @param string pres_uuid optional identifier for presence (auto-generated if not supplied) #** ## Initiat Class pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
python/3.2/Pubnub.py
__init__
goodybag/pubnub-api
python
def __init__(self, publish_key, subscribe_key, secret_key=False, ssl_on=False, origin='pubsub.pubnub.com', pres_uuid=None): "\n #**\n #* Pubnub\n #*\n #* Init the Pubnub Client API\n #*\n #* @param string publish_key required key to send messages.\n #* @param string subscribe_key required key to receive messages.\n #* @param string secret_key optional key to sign messages.\n #* @param boolean ssl required for 2048 bit encrypted messages.\n #* @param string origin PUBNUB Server Origin.\n #* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)\n #**\n\n ## Initiat Class\n pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )\n\n " self.origin = origin self.limit = 1800 self.publish_key = publish_key self.subscribe_key = subscribe_key self.secret_key = secret_key self.ssl = ssl_on if self.ssl: self.origin = ('https://' + self.origin) else: self.origin = ('http://' + self.origin) self.uuid = (pres_uuid or str(uuid.uuid4())) if (not isinstance(self.uuid, basestring)): raise AttributeError('pres_uuid must be a string')
def publish(self, args): "\n #**\n #* Publish\n #*\n #* Send a message to a channel.\n #*\n #* @param array args with channel and message.\n #* @return array success information.\n #**\n\n ## Publish Example\n info = pubnub.publish({\n 'channel' : 'hello_world',\n 'message' : {\n 'some_text' : 'Hello my World'\n }\n })\n print(info)\n\n " if (not (args['channel'] and args['message'])): return [0, 'Missing Channel or Message'] channel = str(args['channel']) message = json.dumps(args['message'], separators=(',', ':')) if self.secret_key: signature = hashlib.md5('/'.join([self.publish_key, self.subscribe_key, self.secret_key, channel, message])).hexdigest() else: signature = '0' return self._request(['publish', self.publish_key, self.subscribe_key, signature, channel, '0', message])
-4,403,577,038,890,871,000
#** #* Publish #* #* Send a message to a channel. #* #* @param array args with channel and message. #* @return array success information. #** ## Publish Example info = pubnub.publish({ 'channel' : 'hello_world', 'message' : { 'some_text' : 'Hello my World' } }) print(info)
python/3.2/Pubnub.py
publish
goodybag/pubnub-api
python
def publish(self, args): "\n #**\n #* Publish\n #*\n #* Send a message to a channel.\n #*\n #* @param array args with channel and message.\n #* @return array success information.\n #**\n\n ## Publish Example\n info = pubnub.publish({\n 'channel' : 'hello_world',\n 'message' : {\n 'some_text' : 'Hello my World'\n }\n })\n print(info)\n\n " if (not (args['channel'] and args['message'])): return [0, 'Missing Channel or Message'] channel = str(args['channel']) message = json.dumps(args['message'], separators=(',', ':')) if self.secret_key: signature = hashlib.md5('/'.join([self.publish_key, self.subscribe_key, self.secret_key, channel, message])).hexdigest() else: signature = '0' return self._request(['publish', self.publish_key, self.subscribe_key, signature, channel, '0', message])
def subscribe(self, args): "\n #**\n #* Subscribe\n #*\n #* This is BLOCKING.\n #* Listen for a message on a channel.\n #*\n #* @param array args with channel and callback.\n #* @return false on fail, array on success.\n #**\n\n ## Subscribe Example\n def receive(message) :\n print(message)\n return True\n\n pubnub.subscribe({\n 'channel' : 'hello_world',\n 'callback' : receive \n })\n\n " if (not ('channel' in args)): raise Exception('Missing Channel.') return False if (not ('callback' in args)): raise Exception('Missing Callback.') return False channel = str(args['channel']) callback = args['callback'] subscribe_key = (args.get('subscribe_key') or self.subscribe_key) while True: timetoken = ((('timetoken' in args) and args['timetoken']) or 0) try: response = self._request((self._encode(['subscribe', subscribe_key, channel, '0', str(timetoken)]) + [('?uuid=' + self.uuid)]), encode=False) messages = response[0] args['timetoken'] = response[1] if (not len(messages)): continue for message in messages: if (not callback(message)): return except Exception: time.sleep(1) return True
-3,626,650,639,731,726,000
#** #* Subscribe #* #* This is BLOCKING. #* Listen for a message on a channel. #* #* @param array args with channel and callback. #* @return false on fail, array on success. #** ## Subscribe Example def receive(message) : print(message) return True pubnub.subscribe({ 'channel' : 'hello_world', 'callback' : receive })
python/3.2/Pubnub.py
subscribe
goodybag/pubnub-api
python
def subscribe(self, args): "\n #**\n #* Subscribe\n #*\n #* This is BLOCKING.\n #* Listen for a message on a channel.\n #*\n #* @param array args with channel and callback.\n #* @return false on fail, array on success.\n #**\n\n ## Subscribe Example\n def receive(message) :\n print(message)\n return True\n\n pubnub.subscribe({\n 'channel' : 'hello_world',\n 'callback' : receive \n })\n\n " if (not ('channel' in args)): raise Exception('Missing Channel.') return False if (not ('callback' in args)): raise Exception('Missing Callback.') return False channel = str(args['channel']) callback = args['callback'] subscribe_key = (args.get('subscribe_key') or self.subscribe_key) while True: timetoken = ((('timetoken' in args) and args['timetoken']) or 0) try: response = self._request((self._encode(['subscribe', subscribe_key, channel, '0', str(timetoken)]) + [('?uuid=' + self.uuid)]), encode=False) messages = response[0] args['timetoken'] = response[1] if (not len(messages)): continue for message in messages: if (not callback(message)): return except Exception: time.sleep(1) return True
def presence(self, args): "\n #**\n #* presence\n #*\n #* This is BLOCKING.\n #* Listen for presence events on a channel.\n #*\n #* @param array args with channel and callback.\n #* @return false on fail, array on success.\n #**\n\n ## Presence Example\n def pres_event(message) :\n print(message)\n return True\n\n pubnub.presence({\n 'channel' : 'hello_world',\n 'callback' : receive \n })\n " if (not ('channel' in args)): raise Exception('Missing Channel.') return False if (not ('callback' in args)): raise Exception('Missing Callback.') return False channel = str(args['channel']) callback = args['callback'] subscribe_key = (args.get('subscribe_key') or self.subscribe_key) return self.subscribe({'channel': (channel + '-pnpres'), 'subscribe_key': subscribe_key, 'callback': callback})
3,188,163,242,192,742,400
#** #* presence #* #* This is BLOCKING. #* Listen for presence events on a channel. #* #* @param array args with channel and callback. #* @return false on fail, array on success. #** ## Presence Example def pres_event(message) : print(message) return True pubnub.presence({ 'channel' : 'hello_world', 'callback' : receive })
python/3.2/Pubnub.py
presence
goodybag/pubnub-api
python
def presence(self, args): "\n #**\n #* presence\n #*\n #* This is BLOCKING.\n #* Listen for presence events on a channel.\n #*\n #* @param array args with channel and callback.\n #* @return false on fail, array on success.\n #**\n\n ## Presence Example\n def pres_event(message) :\n print(message)\n return True\n\n pubnub.presence({\n 'channel' : 'hello_world',\n 'callback' : receive \n })\n " if (not ('channel' in args)): raise Exception('Missing Channel.') return False if (not ('callback' in args)): raise Exception('Missing Callback.') return False channel = str(args['channel']) callback = args['callback'] subscribe_key = (args.get('subscribe_key') or self.subscribe_key) return self.subscribe({'channel': (channel + '-pnpres'), 'subscribe_key': subscribe_key, 'callback': callback})
def here_now(self, args): "\n #**\n #* Here Now\n #*\n #* Load current occupancy from a channel.\n #*\n #* @param array args with 'channel'.\n #* @return mixed false on fail, array on success.\n #*\n\n ## Presence Example\n here_now = pubnub.here_now({\n 'channel' : 'hello_world',\n })\n print(here_now['occupancy'])\n print(here_now['uuids'])\n\n " channel = str(args['channel']) if (not channel): raise Exception('Missing Channel') return False return self._request(['v2', 'presence', 'sub_key', self.subscribe_key, 'channel', channel])
4,767,466,575,551,364,000
#** #* Here Now #* #* Load current occupancy from a channel. #* #* @param array args with 'channel'. #* @return mixed false on fail, array on success. #* ## Presence Example here_now = pubnub.here_now({ 'channel' : 'hello_world', }) print(here_now['occupancy']) print(here_now['uuids'])
python/3.2/Pubnub.py
here_now
goodybag/pubnub-api
python
def here_now(self, args): "\n #**\n #* Here Now\n #*\n #* Load current occupancy from a channel.\n #*\n #* @param array args with 'channel'.\n #* @return mixed false on fail, array on success.\n #*\n\n ## Presence Example\n here_now = pubnub.here_now({\n 'channel' : 'hello_world',\n })\n print(here_now['occupancy'])\n print(here_now['uuids'])\n\n " channel = str(args['channel']) if (not channel): raise Exception('Missing Channel') return False return self._request(['v2', 'presence', 'sub_key', self.subscribe_key, 'channel', channel])
def history(self, args): "\n #**\n #* History\n #*\n #* Load history from a channel.\n #*\n #* @param array args with 'channel' and 'limit'.\n #* @return mixed false on fail, array on success.\n #*\n\n ## History Example\n history = pubnub.history({\n 'channel' : 'hello_world',\n 'limit' : 1\n })\n print(history)\n\n " limit = ((args.has_key('limit') and int(args['limit'])) or 10) channel = str(args['channel']) if (not channel): raise Exception('Missing Channel') return False return self._request(['history', self.subscribe_key, channel, '0', str(limit)])
-5,782,023,336,559,540,000
#** #* History #* #* Load history from a channel. #* #* @param array args with 'channel' and 'limit'. #* @return mixed false on fail, array on success. #* ## History Example history = pubnub.history({ 'channel' : 'hello_world', 'limit' : 1 }) print(history)
python/3.2/Pubnub.py
history
goodybag/pubnub-api
python
def history(self, args): "\n #**\n #* History\n #*\n #* Load history from a channel.\n #*\n #* @param array args with 'channel' and 'limit'.\n #* @return mixed false on fail, array on success.\n #*\n\n ## History Example\n history = pubnub.history({\n 'channel' : 'hello_world',\n 'limit' : 1\n })\n print(history)\n\n " limit = ((args.has_key('limit') and int(args['limit'])) or 10) channel = str(args['channel']) if (not channel): raise Exception('Missing Channel') return False return self._request(['history', self.subscribe_key, channel, '0', str(limit)])
def time(self): '\n #**\n #* Time\n #*\n #* Timestamp from PubNub Cloud.\n #*\n #* @return int timestamp.\n #*\n\n ## PubNub Server Time Example\n timestamp = pubnub.time()\n print(timestamp)\n\n ' return self._request(['time', '0'])[0]
8,419,300,377,443,495,000
#** #* Time #* #* Timestamp from PubNub Cloud. #* #* @return int timestamp. #* ## PubNub Server Time Example timestamp = pubnub.time() print(timestamp)
python/3.2/Pubnub.py
time
goodybag/pubnub-api
python
def time(self): '\n #**\n #* Time\n #*\n #* Timestamp from PubNub Cloud.\n #*\n #* @return int timestamp.\n #*\n\n ## PubNub Server Time Example\n timestamp = pubnub.time()\n print(timestamp)\n\n ' return self._request(['time', '0'])[0]
def award_id_values(data, obj): ' Get values from the awardID level of the xml ' value_map = {'modNumber': 'award_modification_amendme', 'transactionNumber': 'transaction_number', 'PIID': 'piid', 'agencyID': 'agency_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['awardContractID'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'agencyID': 'referenced_idv_agency_iden', 'modNumber': 'referenced_idv_modificatio', 'PIID': 'parent_award_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['referencedIDVID'][key]) except (KeyError, TypeError): obj[value] = None try: obj['referenced_idv_agency_desc'] = extract_text(data['referencedIDVID']['agencyID']['@name']) except (KeyError, TypeError): obj['referenced_idv_agency_desc'] = None return obj
1,619,012,655,650,988,300
Get values from the awardID level of the xml
dataactcore/scripts/pull_fpds_data.py
award_id_values
RonSherfey/data-act-broker-backend
python
def award_id_values(data, obj): ' ' value_map = {'modNumber': 'award_modification_amendme', 'transactionNumber': 'transaction_number', 'PIID': 'piid', 'agencyID': 'agency_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['awardContractID'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'agencyID': 'referenced_idv_agency_iden', 'modNumber': 'referenced_idv_modificatio', 'PIID': 'parent_award_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['referencedIDVID'][key]) except (KeyError, TypeError): obj[value] = None try: obj['referenced_idv_agency_desc'] = extract_text(data['referencedIDVID']['agencyID']['@name']) except (KeyError, TypeError): obj['referenced_idv_agency_desc'] = None return obj
def contract_id_values(data, obj): ' Get values from the contractID level of the xml ' value_map = {'modNumber': 'award_modification_amendme', 'PIID': 'piid', 'agencyID': 'agency_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['IDVID'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'agencyID': 'referenced_idv_agency_iden', 'modNumber': 'referenced_idv_modificatio', 'PIID': 'parent_award_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['referencedIDVID'][key]) except (KeyError, TypeError): obj[value] = None try: obj['referenced_idv_agency_desc'] = extract_text(data['referencedIDVID']['agencyID']['@name']) except (KeyError, TypeError): obj['referenced_idv_agency_desc'] = None return obj
-7,614,765,690,476,562,000
Get values from the contractID level of the xml
dataactcore/scripts/pull_fpds_data.py
contract_id_values
RonSherfey/data-act-broker-backend
python
def contract_id_values(data, obj): ' ' value_map = {'modNumber': 'award_modification_amendme', 'PIID': 'piid', 'agencyID': 'agency_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['IDVID'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'agencyID': 'referenced_idv_agency_iden', 'modNumber': 'referenced_idv_modificatio', 'PIID': 'parent_award_id'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['referencedIDVID'][key]) except (KeyError, TypeError): obj[value] = None try: obj['referenced_idv_agency_desc'] = extract_text(data['referencedIDVID']['agencyID']['@name']) except (KeyError, TypeError): obj['referenced_idv_agency_desc'] = None return obj
def competition_values(data, obj): ' Get values from the competition level of the xml ' value_map = {'A76Action': 'a_76_fair_act_action', 'commercialItemAcquisitionProcedures': 'commercial_item_acquisitio', 'commercialItemTestProgram': 'commercial_item_test_progr', 'evaluatedPreference': 'evaluated_preference', 'extentCompeted': 'extent_competed', 'fedBizOpps': 'fed_biz_opps', 'localAreaSetAside': 'local_area_set_aside', 'numberOfOffersReceived': 'number_of_offers_received', 'priceEvaluationPercentDifference': 'price_evaluation_adjustmen', 'reasonNotCompeted': 'other_than_full_and_open_c', 'research': 'research', 'smallBusinessCompetitivenessDemonstrationProgram': 'small_business_competitive', 'solicitationProcedures': 'solicitation_procedures', 'statutoryExceptionToFairOpportunity': 'fair_opportunity_limited_s', 'typeOfSetAside': 'type_set_aside'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'A76Action': 'a_76_fair_act_action_desc', 'commercialItemAcquisitionProcedures': 'commercial_item_acqui_desc', 'commercialItemTestProgram': 'commercial_item_test_desc', 'evaluatedPreference': 'evaluated_preference_desc', 'extentCompeted': 'extent_compete_description', 'fedBizOpps': 'fed_biz_opps_description', 'localAreaSetAside': 'local_area_set_aside_desc', 'reasonNotCompeted': 'other_than_full_and_o_desc', 'research': 'research_description', 'solicitationProcedures': 'solicitation_procedur_desc', 'statutoryExceptionToFairOpportunity': 'fair_opportunity_limi_desc', 'typeOfSetAside': 'type_set_aside_description'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None return obj
-3,159,530,546,042,139,600
Get values from the competition level of the xml
dataactcore/scripts/pull_fpds_data.py
competition_values
RonSherfey/data-act-broker-backend
python
def competition_values(data, obj): ' ' value_map = {'A76Action': 'a_76_fair_act_action', 'commercialItemAcquisitionProcedures': 'commercial_item_acquisitio', 'commercialItemTestProgram': 'commercial_item_test_progr', 'evaluatedPreference': 'evaluated_preference', 'extentCompeted': 'extent_competed', 'fedBizOpps': 'fed_biz_opps', 'localAreaSetAside': 'local_area_set_aside', 'numberOfOffersReceived': 'number_of_offers_received', 'priceEvaluationPercentDifference': 'price_evaluation_adjustmen', 'reasonNotCompeted': 'other_than_full_and_open_c', 'research': 'research', 'smallBusinessCompetitivenessDemonstrationProgram': 'small_business_competitive', 'solicitationProcedures': 'solicitation_procedures', 'statutoryExceptionToFairOpportunity': 'fair_opportunity_limited_s', 'typeOfSetAside': 'type_set_aside'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'A76Action': 'a_76_fair_act_action_desc', 'commercialItemAcquisitionProcedures': 'commercial_item_acqui_desc', 'commercialItemTestProgram': 'commercial_item_test_desc', 'evaluatedPreference': 'evaluated_preference_desc', 'extentCompeted': 'extent_compete_description', 'fedBizOpps': 'fed_biz_opps_description', 'localAreaSetAside': 'local_area_set_aside_desc', 'reasonNotCompeted': 'other_than_full_and_o_desc', 'research': 'research_description', 'solicitationProcedures': 'solicitation_procedur_desc', 'statutoryExceptionToFairOpportunity': 'fair_opportunity_limi_desc', 'typeOfSetAside': 'type_set_aside_description'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None return obj
def contract_data_values(data, obj, atom_type): ' Get values from the contractData level of the xml ' value_map = {'consolidatedContract': 'consolidated_contract', 'contingencyHumanitarianPeacekeepingOperation': 'contingency_humanitarian_o', 'contractFinancing': 'contract_financing', 'costAccountingStandardsClause': 'cost_accounting_standards', 'costOrPricingData': 'cost_or_pricing_data', 'descriptionOfContractRequirement': 'award_description', 'GFE-GFP': 'government_furnished_prope', 'inherentlyGovernmentalFunction': 'inherently_government_func', 'majorProgramCode': 'major_program', 'multiYearContract': 'multi_year_contract', 'nationalInterestActionCode': 'national_interest_action', 'numberOfActions': 'number_of_actions', 'performanceBasedServiceContract': 'performance_based_service', 'programAcronym': 'program_acronym', 'purchaseCardAsPaymentMethod': 'purchase_card_as_payment_m', 'reasonForModification': 'action_type', 'referencedIDVMultipleOrSingle': 'referenced_mult_or_single', 'referencedIDVType': 'referenced_idv_type', 'seaTransportation': 'sea_transportation', 'solicitationID': 'solicitation_identifier', 'typeOfContractPricing': 'type_of_contract_pricing', 'typeOfIDC': 'type_of_idc', 'undefinitizedAction': 'undefinitized_action'} if (atom_type == 'award'): value_map['contractActionType'] = 'contract_award_type' else: value_map['contractActionType'] = 'idv_type' value_map['multipleOrSingleAwardIDC'] = 'multiple_or_single_award_i' for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'consolidatedContract': 'consolidated_contract_desc', 'contingencyHumanitarianPeacekeepingOperation': 'contingency_humanitar_desc', 'contractFinancing': 'contract_financing_descrip', 'costAccountingStandardsClause': 'cost_accounting_stand_desc', 'costOrPricingData': 'cost_or_pricing_data_desc', 'GFE-GFP': 'government_furnished_desc', 'inherentlyGovernmentalFunction': 'inherently_government_desc', 'multiYearContract': 'multi_year_contract_desc', 'nationalInterestActionCode': 'national_interest_desc', 'performanceBasedServiceContract': 'performance_based_se_desc', 'purchaseCardAsPaymentMethod': 'purchase_card_as_paym_desc', 'reasonForModification': 'action_type_description', 'referencedIDVMultipleOrSingle': 'referenced_mult_or_si_desc', 'referencedIDVType': 'referenced_idv_type_desc', 'seaTransportation': 'sea_transportation_desc', 'typeOfContractPricing': 'type_of_contract_pric_desc', 'typeOfIDC': 'type_of_idc_description', 'undefinitizedAction': 'undefinitized_action_desc'} if (atom_type == 'award'): value_map['contractActionType'] = 'contract_award_type_desc' else: value_map['contractActionType'] = 'idv_type_description' value_map['multipleOrSingleAwardIDC'] = 'multiple_or_single_aw_desc' for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None return obj
-520,491,788,961,469,760
Get values from the contractData level of the xml
dataactcore/scripts/pull_fpds_data.py
contract_data_values
RonSherfey/data-act-broker-backend
python
def contract_data_values(data, obj, atom_type): ' ' value_map = {'consolidatedContract': 'consolidated_contract', 'contingencyHumanitarianPeacekeepingOperation': 'contingency_humanitarian_o', 'contractFinancing': 'contract_financing', 'costAccountingStandardsClause': 'cost_accounting_standards', 'costOrPricingData': 'cost_or_pricing_data', 'descriptionOfContractRequirement': 'award_description', 'GFE-GFP': 'government_furnished_prope', 'inherentlyGovernmentalFunction': 'inherently_government_func', 'majorProgramCode': 'major_program', 'multiYearContract': 'multi_year_contract', 'nationalInterestActionCode': 'national_interest_action', 'numberOfActions': 'number_of_actions', 'performanceBasedServiceContract': 'performance_based_service', 'programAcronym': 'program_acronym', 'purchaseCardAsPaymentMethod': 'purchase_card_as_payment_m', 'reasonForModification': 'action_type', 'referencedIDVMultipleOrSingle': 'referenced_mult_or_single', 'referencedIDVType': 'referenced_idv_type', 'seaTransportation': 'sea_transportation', 'solicitationID': 'solicitation_identifier', 'typeOfContractPricing': 'type_of_contract_pricing', 'typeOfIDC': 'type_of_idc', 'undefinitizedAction': 'undefinitized_action'} if (atom_type == 'award'): value_map['contractActionType'] = 'contract_award_type' else: value_map['contractActionType'] = 'idv_type' value_map['multipleOrSingleAwardIDC'] = 'multiple_or_single_award_i' for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'consolidatedContract': 'consolidated_contract_desc', 'contingencyHumanitarianPeacekeepingOperation': 'contingency_humanitar_desc', 'contractFinancing': 'contract_financing_descrip', 'costAccountingStandardsClause': 'cost_accounting_stand_desc', 'costOrPricingData': 'cost_or_pricing_data_desc', 'GFE-GFP': 'government_furnished_desc', 'inherentlyGovernmentalFunction': 'inherently_government_desc', 'multiYearContract': 'multi_year_contract_desc', 'nationalInterestActionCode': 'national_interest_desc', 'performanceBasedServiceContract': 'performance_based_se_desc', 'purchaseCardAsPaymentMethod': 'purchase_card_as_paym_desc', 'reasonForModification': 'action_type_description', 'referencedIDVMultipleOrSingle': 'referenced_mult_or_si_desc', 'referencedIDVType': 'referenced_idv_type_desc', 'seaTransportation': 'sea_transportation_desc', 'typeOfContractPricing': 'type_of_contract_pric_desc', 'typeOfIDC': 'type_of_idc_description', 'undefinitizedAction': 'undefinitized_action_desc'} if (atom_type == 'award'): value_map['contractActionType'] = 'contract_award_type_desc' else: value_map['contractActionType'] = 'idv_type_description' value_map['multipleOrSingleAwardIDC'] = 'multiple_or_single_aw_desc' for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None return obj
def dollar_values_values(data, obj): ' Get values from the dollarValues level of the xml ' value_map = {'baseAndAllOptionsValue': 'base_and_all_options_value', 'baseAndExercisedOptionsValue': 'base_exercised_options_val', 'obligatedAmount': 'federal_action_obligation'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None return obj
-25,077,798,399,392,490
Get values from the dollarValues level of the xml
dataactcore/scripts/pull_fpds_data.py
dollar_values_values
RonSherfey/data-act-broker-backend
python
def dollar_values_values(data, obj): ' ' value_map = {'baseAndAllOptionsValue': 'base_and_all_options_value', 'baseAndExercisedOptionsValue': 'base_exercised_options_val', 'obligatedAmount': 'federal_action_obligation'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None return obj
def total_dollar_values_values(data, obj): ' Get values from the totalDollarValues level of the xml ' value_map = {'totalBaseAndAllOptionsValue': 'potential_total_value_awar', 'totalBaseAndExercisedOptionsValue': 'current_total_value_award', 'totalObligatedAmount': 'total_obligated_amount'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None return obj
-6,938,656,846,285,551,000
Get values from the totalDollarValues level of the xml
dataactcore/scripts/pull_fpds_data.py
total_dollar_values_values
RonSherfey/data-act-broker-backend
python
def total_dollar_values_values(data, obj): ' ' value_map = {'totalBaseAndAllOptionsValue': 'potential_total_value_awar', 'totalBaseAndExercisedOptionsValue': 'current_total_value_award', 'totalObligatedAmount': 'total_obligated_amount'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None return obj
def legislative_mandates_values(data, obj): ' Get values from the legislativeMandates level of the xml ' value_map = {'ClingerCohenAct': 'clinger_cohen_act_planning', 'constructionWageRateRequirements': 'construction_wage_rate_req', 'interagencyContractingAuthority': 'interagency_contracting_au', 'otherStatutoryAuthority': 'other_statutory_authority', 'laborStandards': 'labor_standards', 'materialsSuppliesArticlesEquipment': 'materials_supplies_article'} additional_reporting = None try: ar_dicts = data['listOfAdditionalReportingValues']['additionalReportingValue'] except (KeyError, TypeError): ar_dicts = None if ar_dicts: if isinstance(ar_dicts, dict): ar_dicts = [ar_dicts] ars = [] for ar_dict in ar_dicts: ar_value = extract_text(ar_dict) try: ar_desc = extract_text(ar_dict['@description']) except (KeyError, TypeError): ar_desc = None ar_str = (ar_value if (ar_desc is None) else '{}: {}'.format(ar_value, ar_desc)) ars.append(ar_str) additional_reporting = '; '.join(ars) obj['additional_reporting'] = additional_reporting for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'ClingerCohenAct': 'clinger_cohen_act_pla_desc', 'constructionWageRateRequirements': 'construction_wage_rat_desc', 'interagencyContractingAuthority': 'interagency_contract_desc', 'laborStandards': 'labor_standards_descrip', 'materialsSuppliesArticlesEquipment': 'materials_supplies_descrip'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None return obj
5,262,888,862,239,357,000
Get values from the legislativeMandates level of the xml
dataactcore/scripts/pull_fpds_data.py
legislative_mandates_values
RonSherfey/data-act-broker-backend
python
def legislative_mandates_values(data, obj): ' ' value_map = {'ClingerCohenAct': 'clinger_cohen_act_planning', 'constructionWageRateRequirements': 'construction_wage_rate_req', 'interagencyContractingAuthority': 'interagency_contracting_au', 'otherStatutoryAuthority': 'other_statutory_authority', 'laborStandards': 'labor_standards', 'materialsSuppliesArticlesEquipment': 'materials_supplies_article'} additional_reporting = None try: ar_dicts = data['listOfAdditionalReportingValues']['additionalReportingValue'] except (KeyError, TypeError): ar_dicts = None if ar_dicts: if isinstance(ar_dicts, dict): ar_dicts = [ar_dicts] ars = [] for ar_dict in ar_dicts: ar_value = extract_text(ar_dict) try: ar_desc = extract_text(ar_dict['@description']) except (KeyError, TypeError): ar_desc = None ar_str = (ar_value if (ar_desc is None) else '{}: {}'.format(ar_value, ar_desc)) ars.append(ar_str) additional_reporting = '; '.join(ars) obj['additional_reporting'] = additional_reporting for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'ClingerCohenAct': 'clinger_cohen_act_pla_desc', 'constructionWageRateRequirements': 'construction_wage_rat_desc', 'interagencyContractingAuthority': 'interagency_contract_desc', 'laborStandards': 'labor_standards_descrip', 'materialsSuppliesArticlesEquipment': 'materials_supplies_descrip'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None return obj
def place_of_performance_values(data, obj): ' Get values from the placeOfPerformance level of the xml ' value_map = {'placeOfPerformanceCongressionalDistrict': 'place_of_performance_congr', 'placeOfPerformanceZIPCode': 'place_of_performance_zip4a'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None try: obj['place_of_perform_city_name'] = extract_text(data['placeOfPerformanceZIPCode']['@city']) except (KeyError, TypeError): obj['place_of_perform_city_name'] = None try: obj['place_of_perform_county_na'] = extract_text(data['placeOfPerformanceZIPCode']['@county']) except (KeyError, TypeError): obj['place_of_perform_county_na'] = None value_map = {'stateCode': 'place_of_performance_state', 'countryCode': 'place_of_perform_country_c'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['principalPlaceOfPerformance'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'countryCode': 'place_of_perf_country_desc', 'stateCode': 'place_of_perfor_state_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['principalPlaceOfPerformance'][key]['@name']) except (KeyError, TypeError): obj[value] = None return obj
-7,840,436,803,331,249,000
Get values from the placeOfPerformance level of the xml
dataactcore/scripts/pull_fpds_data.py
place_of_performance_values
RonSherfey/data-act-broker-backend
python
def place_of_performance_values(data, obj): ' ' value_map = {'placeOfPerformanceCongressionalDistrict': 'place_of_performance_congr', 'placeOfPerformanceZIPCode': 'place_of_performance_zip4a'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None try: obj['place_of_perform_city_name'] = extract_text(data['placeOfPerformanceZIPCode']['@city']) except (KeyError, TypeError): obj['place_of_perform_city_name'] = None try: obj['place_of_perform_county_na'] = extract_text(data['placeOfPerformanceZIPCode']['@county']) except (KeyError, TypeError): obj['place_of_perform_county_na'] = None value_map = {'stateCode': 'place_of_performance_state', 'countryCode': 'place_of_perform_country_c'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['principalPlaceOfPerformance'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'countryCode': 'place_of_perf_country_desc', 'stateCode': 'place_of_perfor_state_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['principalPlaceOfPerformance'][key]['@name']) except (KeyError, TypeError): obj[value] = None return obj
def product_or_service_information_values(data, obj): ' Get values from the productOrServiceInformation level of the xml ' value_map = {'claimantProgramCode': 'dod_claimant_program_code', 'contractBundling': 'contract_bundling', 'countryOfOrigin': 'country_of_product_or_serv', 'informationTechnologyCommercialItemCategory': 'information_technology_com', 'manufacturingOrganizationType': 'domestic_or_foreign_entity', 'placeOfManufacture': 'place_of_manufacture', 'principalNAICSCode': 'naics', 'productOrServiceCode': 'product_or_service_code', 'recoveredMaterialClauses': 'recovered_materials_sustai', 'systemEquipmentCode': 'program_system_or_equipmen', 'useOfEPADesignatedProducts': 'epa_designated_product'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'claimantProgramCode': 'dod_claimant_prog_cod_desc', 'contractBundling': 'contract_bundling_descrip', 'informationTechnologyCommercialItemCategory': 'information_technolog_desc', 'manufacturingOrganizationType': 'domestic_or_foreign_e_desc', 'placeOfManufacture': 'place_of_manufacture_desc', 'principalNAICSCode': 'naics_description', 'productOrServiceCode': 'product_or_service_co_desc', 'recoveredMaterialClauses': 'recovered_materials_s_desc', 'systemEquipmentCode': 'program_system_or_equ_desc', 'useOfEPADesignatedProducts': 'epa_designated_produc_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None try: obj['country_of_product_or_desc'] = extract_text(data['countryOfOrigin']['@name']) except (KeyError, TypeError): obj['country_of_product_or_desc'] = None return obj
2,910,396,238,477,755,000
Get values from the productOrServiceInformation level of the xml
dataactcore/scripts/pull_fpds_data.py
product_or_service_information_values
RonSherfey/data-act-broker-backend
python
def product_or_service_information_values(data, obj): ' ' value_map = {'claimantProgramCode': 'dod_claimant_program_code', 'contractBundling': 'contract_bundling', 'countryOfOrigin': 'country_of_product_or_serv', 'informationTechnologyCommercialItemCategory': 'information_technology_com', 'manufacturingOrganizationType': 'domestic_or_foreign_entity', 'placeOfManufacture': 'place_of_manufacture', 'principalNAICSCode': 'naics', 'productOrServiceCode': 'product_or_service_code', 'recoveredMaterialClauses': 'recovered_materials_sustai', 'systemEquipmentCode': 'program_system_or_equipmen', 'useOfEPADesignatedProducts': 'epa_designated_product'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'claimantProgramCode': 'dod_claimant_prog_cod_desc', 'contractBundling': 'contract_bundling_descrip', 'informationTechnologyCommercialItemCategory': 'information_technolog_desc', 'manufacturingOrganizationType': 'domestic_or_foreign_e_desc', 'placeOfManufacture': 'place_of_manufacture_desc', 'principalNAICSCode': 'naics_description', 'productOrServiceCode': 'product_or_service_co_desc', 'recoveredMaterialClauses': 'recovered_materials_s_desc', 'systemEquipmentCode': 'program_system_or_equ_desc', 'useOfEPADesignatedProducts': 'epa_designated_produc_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None try: obj['country_of_product_or_desc'] = extract_text(data['countryOfOrigin']['@name']) except (KeyError, TypeError): obj['country_of_product_or_desc'] = None return obj
def purchaser_information_values(data, obj): ' Get values from the purchaserInformation level of the xml ' value_map = {'contractingOfficeAgencyID': 'awarding_sub_tier_agency_c', 'contractingOfficeID': 'awarding_office_code', 'foreignFunding': 'foreign_funding', 'fundingRequestingAgencyID': 'funding_sub_tier_agency_co', 'fundingRequestingOfficeID': 'funding_office_code'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'foreignFunding': 'foreign_funding_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None value_map = {'contractingOfficeAgencyID': 'awarding_sub_tier_agency_n', 'contractingOfficeID': 'awarding_office_name', 'fundingRequestingAgencyID': 'funding_sub_tier_agency_na', 'fundingRequestingOfficeID': 'funding_office_name'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@name']) except (KeyError, TypeError): obj[value] = None return obj
-4,637,374,741,167,060,000
Get values from the purchaserInformation level of the xml
dataactcore/scripts/pull_fpds_data.py
purchaser_information_values
RonSherfey/data-act-broker-backend
python
def purchaser_information_values(data, obj): ' ' value_map = {'contractingOfficeAgencyID': 'awarding_sub_tier_agency_c', 'contractingOfficeID': 'awarding_office_code', 'foreignFunding': 'foreign_funding', 'fundingRequestingAgencyID': 'funding_sub_tier_agency_co', 'fundingRequestingOfficeID': 'funding_office_code'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'foreignFunding': 'foreign_funding_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None value_map = {'contractingOfficeAgencyID': 'awarding_sub_tier_agency_n', 'contractingOfficeID': 'awarding_office_name', 'fundingRequestingAgencyID': 'funding_sub_tier_agency_na', 'fundingRequestingOfficeID': 'funding_office_name'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@name']) except (KeyError, TypeError): obj[value] = None return obj
def relevant_contract_dates_values(data, obj): ' Get values from the relevantContractDates level of the xml ' value_map = {'currentCompletionDate': 'period_of_performance_curr', 'effectiveDate': 'period_of_performance_star', 'lastDateToOrder': 'ordering_period_end_date', 'signedDate': 'action_date', 'ultimateCompletionDate': 'period_of_perf_potential_e'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None return obj
-1,361,555,611,856,029,400
Get values from the relevantContractDates level of the xml
dataactcore/scripts/pull_fpds_data.py
relevant_contract_dates_values
RonSherfey/data-act-broker-backend
python
def relevant_contract_dates_values(data, obj): ' ' value_map = {'currentCompletionDate': 'period_of_performance_curr', 'effectiveDate': 'period_of_performance_star', 'lastDateToOrder': 'ordering_period_end_date', 'signedDate': 'action_date', 'ultimateCompletionDate': 'period_of_perf_potential_e'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None return obj
def vendor_values(data, obj): ' Get values from the vendor level of the xml ' value_map = {'CCRException': 'sam_exception', 'contractingOfficerBusinessSizeDetermination': 'contracting_officers_deter'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'CCRException': 'sam_exception_description', 'contractingOfficerBusinessSizeDetermination': 'contracting_officers_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None value_map = {'vendorAlternateName': 'vendor_alternate_name', 'vendorDoingAsBusinessName': 'vendor_doing_as_business_n', 'vendorEnabled': 'vendor_enabled', 'vendorLegalOrganizationName': 'vendor_legal_org_name', 'vendorName': 'awardee_or_recipient_legal'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorHeader'][key]) except (KeyError, TypeError): obj[value] = None try: data['vendorSiteDetails'] except KeyError: data['vendorSiteDetails'] = {} obj = vendor_site_details_values(data['vendorSiteDetails'], obj) return obj
3,875,442,262,193,164,000
Get values from the vendor level of the xml
dataactcore/scripts/pull_fpds_data.py
vendor_values
RonSherfey/data-act-broker-backend
python
def vendor_values(data, obj): ' ' value_map = {'CCRException': 'sam_exception', 'contractingOfficerBusinessSizeDetermination': 'contracting_officers_deter'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'CCRException': 'sam_exception_description', 'contractingOfficerBusinessSizeDetermination': 'contracting_officers_desc'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]['@description']) except (KeyError, TypeError): obj[value] = None value_map = {'vendorAlternateName': 'vendor_alternate_name', 'vendorDoingAsBusinessName': 'vendor_doing_as_business_n', 'vendorEnabled': 'vendor_enabled', 'vendorLegalOrganizationName': 'vendor_legal_org_name', 'vendorName': 'awardee_or_recipient_legal'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorHeader'][key]) except (KeyError, TypeError): obj[value] = None try: data['vendorSiteDetails'] except KeyError: data['vendorSiteDetails'] = {} obj = vendor_site_details_values(data['vendorSiteDetails'], obj) return obj
def vendor_site_details_values(data, obj): ' Get values from the vendorSiteDetails level of the xml (sub-level of vendor) ' value_map = {'divisionName': 'division_name', 'divisionNumberOrOfficeCode': 'division_number_or_office', 'vendorAlternateSiteCode': 'vendor_alternate_site_code', 'vendorSiteCode': 'vendor_site_code'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'is1862LandGrantCollege': 'c1862_land_grant_college', 'is1890LandGrantCollege': 'c1890_land_grant_college', 'is1994LandGrantCollege': 'c1994_land_grant_college', 'isAlaskanNativeServicingInstitution': 'alaskan_native_servicing_i', 'isHistoricallyBlackCollegeOrUniversity': 'historically_black_college', 'isMinorityInstitution': 'minority_institution', 'isNativeHawaiianServicingInstitution': 'native_hawaiian_servicing', 'isPrivateUniversityOrCollege': 'private_university_or_coll', 'isSchoolOfForestry': 'school_of_forestry', 'isStateControlledInstitutionofHigherLearning': 'state_controlled_instituti', 'isTribalCollege': 'tribal_college', 'isVeterinaryCollege': 'veterinary_college'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['typeOfEducationalEntity'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isAirportAuthority': 'airport_authority', 'isCouncilOfGovernments': 'council_of_governments', 'isHousingAuthoritiesPublicOrTribal': 'housing_authorities_public', 'isInterstateEntity': 'interstate_entity', 'isPlanningCommission': 'planning_commission', 'isPortAuthority': 'port_authority', 'isTransitAuthority': 'transit_authority'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['typeOfGovernmentEntity'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCommunityDevelopedCorporationOwnedFirm': 'community_developed_corpor', 'isForeignGovernment': 'foreign_government', 'isLaborSurplusAreaFirm': 'labor_surplus_area_firm', 'isStateGovernment': 'us_state_government', 'isTribalGovernment': 'us_tribal_government'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCorporateEntityNotTaxExempt': 'corporate_entity_not_tax_e', 'isCorporateEntityTaxExempt': 'corporate_entity_tax_exemp', 'isInternationalOrganization': 'international_organization', 'isPartnershipOrLimitedLiabilityPartnership': 'partnership_or_limited_lia', 'isSmallAgriculturalCooperative': 'small_agricultural_coopera', 'isSolePropreitorship': 'sole_proprietorship', 'isUSGovernmentEntity': 'us_government_entity'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes']['businessOrOrganizationType'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isFederalGovernment': 'us_federal_government', 'isFederalGovernmentAgency': 'federal_agency', 'isFederallyFundedResearchAndDevelopmentCorp': 'federally_funded_research'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes']['federalGovernment'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCityLocalGovernment': 'city_local_government', 'isCountyLocalGovernment': 'county_local_government', 'isInterMunicipalLocalGovernment': 'inter_municipal_local_gove', 'isLocalGovernment': 'us_local_government', 'isLocalGovernmentOwned': 'local_government_owned', 'isMunicipalityLocalGovernment': 'municipality_local_governm', 'isSchoolDistrictLocalGovernment': 'school_district_local_gove', 'isTownshipLocalGovernment': 'township_local_government'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes']['localGovernment'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isDOTCertifiedDisadvantagedBusinessEnterprise': 'dot_certified_disadvantage', 'isSBACertified8AJointVenture': 'sba_certified_8_a_joint_ve', 'isSBACertified8AProgramParticipant': 'c8a_program_participant', 'isSBACertifiedHUBZone': 'historically_underutilized', 'isSBACertifiedSmallDisadvantagedBusiness': 'small_disadvantaged_busine', 'isSelfCertifiedSmallDisadvantagedBusiness': 'self_certified_small_disad'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorCertifications'][key]) except (KeyError, TypeError): obj[value] = None try: obj['cage_code'] = extract_text(data['entityIdentifiers']['cageCode']) except (KeyError, TypeError): obj['cage_code'] = None value_map = {'DUNSNumber': 'awardee_or_recipient_uniqu', 'globalParentDUNSName': 'ultimate_parent_legal_enti', 'globalParentDUNSNumber': 'ultimate_parent_unique_ide'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['entityIdentifiers']['vendorDUNSInformation'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'UEI': 'awardee_or_recipient_uei', 'ultimateParentUEI': 'ultimate_parent_uei'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['entityIdentifiers']['vendorUEIInformation'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCommunityDevelopmentCorporation': 'community_development_corp', 'isDomesticShelter': 'domestic_shelter', 'isEducationalInstitution': 'educational_institution', 'isFoundation': 'foundation', 'isHispanicServicingInstitution': 'hispanic_servicing_institu', 'isHospital': 'hospital_flag', 'isManufacturerOfGoods': 'manufacturer_of_goods', 'isVeterinaryHospital': 'veterinary_hospital'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorLineOfBusiness'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'city': 'legal_entity_city_name', 'congressionalDistrictCode': 'legal_entity_congressional', 'countryCode': 'legal_entity_country_code', 'faxNo': 'vendor_fax_number', 'phoneNo': 'vendor_phone_number', 'streetAddress': 'legal_entity_address_line1', 'streetAddress2': 'legal_entity_address_line2', 'streetAddress3': 'legal_entity_address_line3', 'vendorLocationDisabledFlag': 'vendor_location_disabled_f', 'ZIPCode': 'legal_entity_zip4'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorLocation'][key]) except (KeyError, TypeError): obj[value] = None key = 'legal_entity_state_code' if (obj['legal_entity_country_code'] not in country_code_map): key = 'legal_entity_state_descrip' obj['legal_entity_state_code'] = None else: try: obj['legal_entity_state_descrip'] = extract_text(data['vendorLocation']['state']['@name']) except (KeyError, TypeError): obj['legal_entity_state_descrip'] = None try: obj[key] = extract_text(data['vendorLocation']['state']) except (KeyError, TypeError): obj[key] = None try: obj['legal_entity_country_name'] = extract_text(data['vendorLocation']['countryCode']['@name']) except (KeyError, TypeError): obj['legal_entity_country_name'] = None value_map = {'isForeignOwnedAndLocated': 'foreign_owned_and_located', 'isLimitedLiabilityCorporation': 'limited_liability_corporat', 'isShelteredWorkshop': 'the_ability_one_program', 'isSubchapterSCorporation': 'subchapter_s_corporation', 'organizationalType': 'organizational_type'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorOrganizationFactors'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isForProfitOrganization': 'for_profit_organization', 'isNonprofitOrganization': 'nonprofit_organization', 'isOtherNotForProfitOrganization': 'other_not_for_profit_organ'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorOrganizationFactors']['profitStructure'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'receivesContracts': 'contracts', 'receivesContractsAndGrants': 'receives_contracts_and_gra', 'receivesGrants': 'grants'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorRelationshipWithFederalGovernment'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isAlaskanNativeOwnedCorporationOrFirm': 'alaskan_native_owned_corpo', 'isAmericanIndianOwned': 'american_indian_owned_busi', 'isEconomicallyDisadvantagedWomenOwnedSmallBusiness': 'economically_disadvantaged', 'isIndianTribe': 'indian_tribe_federally_rec', 'isJointVentureEconomicallyDisadvantagedWomenOwnedSmallBusiness': 'joint_venture_economically', 'isJointVentureWomenOwnedSmallBusiness': 'joint_venture_women_owned', 'isNativeHawaiianOwnedOrganizationOrFirm': 'native_hawaiian_owned_busi', 'isServiceRelatedDisabledVeteranOwnedBusiness': 'service_disabled_veteran_o', 'isTriballyOwnedFirm': 'tribally_owned_business', 'isVerySmallBusiness': 'emerging_small_business', 'isVeteranOwned': 'veteran_owned_business', 'isWomenOwned': 'woman_owned_business', 'isWomenOwnedSmallBusiness': 'women_owned_small_business'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorSocioEconomicIndicators'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isAsianPacificAmericanOwnedBusiness': 'asian_pacific_american_own', 'isBlackAmericanOwnedBusiness': 'black_american_owned_busin', 'isHispanicAmericanOwnedBusiness': 'hispanic_american_owned_bu', 'isMinorityOwned': 'minority_owned_business', 'isNativeAmericanOwnedBusiness': 'native_american_owned_busi', 'isOtherMinorityOwned': 'other_minority_owned_busin', 'isSubContinentAsianAmericanOwnedBusiness': 'subcontinent_asian_asian_i'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorSocioEconomicIndicators']['minorityOwned'][key]) except (KeyError, TypeError): obj[value] = None return obj
-6,068,436,029,579,838,000
Get values from the vendorSiteDetails level of the xml (sub-level of vendor)
dataactcore/scripts/pull_fpds_data.py
vendor_site_details_values
RonSherfey/data-act-broker-backend
python
def vendor_site_details_values(data, obj): ' ' value_map = {'divisionName': 'division_name', 'divisionNumberOrOfficeCode': 'division_number_or_office', 'vendorAlternateSiteCode': 'vendor_alternate_site_code', 'vendorSiteCode': 'vendor_site_code'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data[key]) except (KeyError, TypeError): obj[value] = None value_map = {'is1862LandGrantCollege': 'c1862_land_grant_college', 'is1890LandGrantCollege': 'c1890_land_grant_college', 'is1994LandGrantCollege': 'c1994_land_grant_college', 'isAlaskanNativeServicingInstitution': 'alaskan_native_servicing_i', 'isHistoricallyBlackCollegeOrUniversity': 'historically_black_college', 'isMinorityInstitution': 'minority_institution', 'isNativeHawaiianServicingInstitution': 'native_hawaiian_servicing', 'isPrivateUniversityOrCollege': 'private_university_or_coll', 'isSchoolOfForestry': 'school_of_forestry', 'isStateControlledInstitutionofHigherLearning': 'state_controlled_instituti', 'isTribalCollege': 'tribal_college', 'isVeterinaryCollege': 'veterinary_college'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['typeOfEducationalEntity'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isAirportAuthority': 'airport_authority', 'isCouncilOfGovernments': 'council_of_governments', 'isHousingAuthoritiesPublicOrTribal': 'housing_authorities_public', 'isInterstateEntity': 'interstate_entity', 'isPlanningCommission': 'planning_commission', 'isPortAuthority': 'port_authority', 'isTransitAuthority': 'transit_authority'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['typeOfGovernmentEntity'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCommunityDevelopedCorporationOwnedFirm': 'community_developed_corpor', 'isForeignGovernment': 'foreign_government', 'isLaborSurplusAreaFirm': 'labor_surplus_area_firm', 'isStateGovernment': 'us_state_government', 'isTribalGovernment': 'us_tribal_government'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCorporateEntityNotTaxExempt': 'corporate_entity_not_tax_e', 'isCorporateEntityTaxExempt': 'corporate_entity_tax_exemp', 'isInternationalOrganization': 'international_organization', 'isPartnershipOrLimitedLiabilityPartnership': 'partnership_or_limited_lia', 'isSmallAgriculturalCooperative': 'small_agricultural_coopera', 'isSolePropreitorship': 'sole_proprietorship', 'isUSGovernmentEntity': 'us_government_entity'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes']['businessOrOrganizationType'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isFederalGovernment': 'us_federal_government', 'isFederalGovernmentAgency': 'federal_agency', 'isFederallyFundedResearchAndDevelopmentCorp': 'federally_funded_research'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes']['federalGovernment'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCityLocalGovernment': 'city_local_government', 'isCountyLocalGovernment': 'county_local_government', 'isInterMunicipalLocalGovernment': 'inter_municipal_local_gove', 'isLocalGovernment': 'us_local_government', 'isLocalGovernmentOwned': 'local_government_owned', 'isMunicipalityLocalGovernment': 'municipality_local_governm', 'isSchoolDistrictLocalGovernment': 'school_district_local_gove', 'isTownshipLocalGovernment': 'township_local_government'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorBusinessTypes']['localGovernment'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isDOTCertifiedDisadvantagedBusinessEnterprise': 'dot_certified_disadvantage', 'isSBACertified8AJointVenture': 'sba_certified_8_a_joint_ve', 'isSBACertified8AProgramParticipant': 'c8a_program_participant', 'isSBACertifiedHUBZone': 'historically_underutilized', 'isSBACertifiedSmallDisadvantagedBusiness': 'small_disadvantaged_busine', 'isSelfCertifiedSmallDisadvantagedBusiness': 'self_certified_small_disad'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorCertifications'][key]) except (KeyError, TypeError): obj[value] = None try: obj['cage_code'] = extract_text(data['entityIdentifiers']['cageCode']) except (KeyError, TypeError): obj['cage_code'] = None value_map = {'DUNSNumber': 'awardee_or_recipient_uniqu', 'globalParentDUNSName': 'ultimate_parent_legal_enti', 'globalParentDUNSNumber': 'ultimate_parent_unique_ide'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['entityIdentifiers']['vendorDUNSInformation'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'UEI': 'awardee_or_recipient_uei', 'ultimateParentUEI': 'ultimate_parent_uei'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['entityIdentifiers']['vendorUEIInformation'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isCommunityDevelopmentCorporation': 'community_development_corp', 'isDomesticShelter': 'domestic_shelter', 'isEducationalInstitution': 'educational_institution', 'isFoundation': 'foundation', 'isHispanicServicingInstitution': 'hispanic_servicing_institu', 'isHospital': 'hospital_flag', 'isManufacturerOfGoods': 'manufacturer_of_goods', 'isVeterinaryHospital': 'veterinary_hospital'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorLineOfBusiness'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'city': 'legal_entity_city_name', 'congressionalDistrictCode': 'legal_entity_congressional', 'countryCode': 'legal_entity_country_code', 'faxNo': 'vendor_fax_number', 'phoneNo': 'vendor_phone_number', 'streetAddress': 'legal_entity_address_line1', 'streetAddress2': 'legal_entity_address_line2', 'streetAddress3': 'legal_entity_address_line3', 'vendorLocationDisabledFlag': 'vendor_location_disabled_f', 'ZIPCode': 'legal_entity_zip4'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorLocation'][key]) except (KeyError, TypeError): obj[value] = None key = 'legal_entity_state_code' if (obj['legal_entity_country_code'] not in country_code_map): key = 'legal_entity_state_descrip' obj['legal_entity_state_code'] = None else: try: obj['legal_entity_state_descrip'] = extract_text(data['vendorLocation']['state']['@name']) except (KeyError, TypeError): obj['legal_entity_state_descrip'] = None try: obj[key] = extract_text(data['vendorLocation']['state']) except (KeyError, TypeError): obj[key] = None try: obj['legal_entity_country_name'] = extract_text(data['vendorLocation']['countryCode']['@name']) except (KeyError, TypeError): obj['legal_entity_country_name'] = None value_map = {'isForeignOwnedAndLocated': 'foreign_owned_and_located', 'isLimitedLiabilityCorporation': 'limited_liability_corporat', 'isShelteredWorkshop': 'the_ability_one_program', 'isSubchapterSCorporation': 'subchapter_s_corporation', 'organizationalType': 'organizational_type'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorOrganizationFactors'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isForProfitOrganization': 'for_profit_organization', 'isNonprofitOrganization': 'nonprofit_organization', 'isOtherNotForProfitOrganization': 'other_not_for_profit_organ'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorOrganizationFactors']['profitStructure'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'receivesContracts': 'contracts', 'receivesContractsAndGrants': 'receives_contracts_and_gra', 'receivesGrants': 'grants'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorRelationshipWithFederalGovernment'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isAlaskanNativeOwnedCorporationOrFirm': 'alaskan_native_owned_corpo', 'isAmericanIndianOwned': 'american_indian_owned_busi', 'isEconomicallyDisadvantagedWomenOwnedSmallBusiness': 'economically_disadvantaged', 'isIndianTribe': 'indian_tribe_federally_rec', 'isJointVentureEconomicallyDisadvantagedWomenOwnedSmallBusiness': 'joint_venture_economically', 'isJointVentureWomenOwnedSmallBusiness': 'joint_venture_women_owned', 'isNativeHawaiianOwnedOrganizationOrFirm': 'native_hawaiian_owned_busi', 'isServiceRelatedDisabledVeteranOwnedBusiness': 'service_disabled_veteran_o', 'isTriballyOwnedFirm': 'tribally_owned_business', 'isVerySmallBusiness': 'emerging_small_business', 'isVeteranOwned': 'veteran_owned_business', 'isWomenOwned': 'woman_owned_business', 'isWomenOwnedSmallBusiness': 'women_owned_small_business'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorSocioEconomicIndicators'][key]) except (KeyError, TypeError): obj[value] = None value_map = {'isAsianPacificAmericanOwnedBusiness': 'asian_pacific_american_own', 'isBlackAmericanOwnedBusiness': 'black_american_owned_busin', 'isHispanicAmericanOwnedBusiness': 'hispanic_american_owned_bu', 'isMinorityOwned': 'minority_owned_business', 'isNativeAmericanOwnedBusiness': 'native_american_owned_busi', 'isOtherMinorityOwned': 'other_minority_owned_busin', 'isSubContinentAsianAmericanOwnedBusiness': 'subcontinent_asian_asian_i'} for (key, value) in value_map.items(): try: obj[value] = extract_text(data['vendorSocioEconomicIndicators']['minorityOwned'][key]) except (KeyError, TypeError): obj[value] = None return obj
def generic_values(data, obj): ' Get values from the genericTags level of the xml ' generic_strings_value_map = {'genericString01': 'solicitation_date'} for (key, value) in generic_strings_value_map.items(): try: obj[value] = extract_text(data['genericStrings'][key]) except (KeyError, TypeError): obj[value] = None return obj
-1,668,385,486,940,818,700
Get values from the genericTags level of the xml
dataactcore/scripts/pull_fpds_data.py
generic_values
RonSherfey/data-act-broker-backend
python
def generic_values(data, obj): ' ' generic_strings_value_map = {'genericString01': 'solicitation_date'} for (key, value) in generic_strings_value_map.items(): try: obj[value] = extract_text(data['genericStrings'][key]) except (KeyError, TypeError): obj[value] = None return obj
def calculate_ppop_fields(obj, sess, county_by_name, county_by_code, state_code_list, country_list): " calculate values that aren't in any feed (or haven't been provided properly) for place of performance " if (obj['place_of_perform_country_c'] in country_code_map): if (obj['place_of_perform_country_c'] != 'USA'): obj['place_of_performance_state'] = country_code_map[obj['place_of_perform_country_c']] if (obj['place_of_performance_state'] in state_code_list): obj['place_of_perfor_state_desc'] = state_code_list[obj['place_of_performance_state']] obj['place_of_perform_country_c'] = 'USA' obj['place_of_perf_country_desc'] = 'UNITED STATES' if (obj['place_of_performance_state'] and (not obj['place_of_perfor_state_desc']) and (obj['place_of_performance_state'] in state_code_list)): obj['place_of_perfor_state_desc'] = state_code_list[obj['place_of_performance_state']] if (obj['place_of_perform_county_na'] and obj['place_of_performance_state']): state = obj['place_of_performance_state'] county_name = obj['place_of_perform_county_na'] if ((state in county_by_name) and (county_name in county_by_name[state])): obj['place_of_perform_county_co'] = county_by_name[state][county_name] if ((not obj['place_of_perform_county_co']) and obj['place_of_performance_zip4a']): obj['place_of_perform_county_co'] = get_county_by_zip(sess, obj['place_of_performance_zip4a']) if ((not obj['place_of_perform_county_na']) and (obj['place_of_performance_state'] in county_by_code) and (obj['place_of_perform_county_co'] in county_by_code[obj['place_of_performance_state']])): obj['place_of_perform_county_na'] = county_by_code[obj['place_of_performance_state']][obj['place_of_perform_county_co']] if (obj['place_of_performance_zip4a'] and is_valid_zip(obj['place_of_performance_zip4a'])): obj['place_of_performance_zip5'] = obj['place_of_performance_zip4a'][:5] if (len(obj['place_of_performance_zip4a']) > 5): obj['place_of_perform_zip_last4'] = obj['place_of_performance_zip4a'][(- 4):] if ((not obj['place_of_perf_country_desc']) and (obj['place_of_perform_country_c'] in country_list)): obj['place_of_perf_country_desc'] = country_list[obj['place_of_perform_country_c']]
-2,007,915,120,336,223,500
calculate values that aren't in any feed (or haven't been provided properly) for place of performance
dataactcore/scripts/pull_fpds_data.py
calculate_ppop_fields
RonSherfey/data-act-broker-backend
python
def calculate_ppop_fields(obj, sess, county_by_name, county_by_code, state_code_list, country_list): " " if (obj['place_of_perform_country_c'] in country_code_map): if (obj['place_of_perform_country_c'] != 'USA'): obj['place_of_performance_state'] = country_code_map[obj['place_of_perform_country_c']] if (obj['place_of_performance_state'] in state_code_list): obj['place_of_perfor_state_desc'] = state_code_list[obj['place_of_performance_state']] obj['place_of_perform_country_c'] = 'USA' obj['place_of_perf_country_desc'] = 'UNITED STATES' if (obj['place_of_performance_state'] and (not obj['place_of_perfor_state_desc']) and (obj['place_of_performance_state'] in state_code_list)): obj['place_of_perfor_state_desc'] = state_code_list[obj['place_of_performance_state']] if (obj['place_of_perform_county_na'] and obj['place_of_performance_state']): state = obj['place_of_performance_state'] county_name = obj['place_of_perform_county_na'] if ((state in county_by_name) and (county_name in county_by_name[state])): obj['place_of_perform_county_co'] = county_by_name[state][county_name] if ((not obj['place_of_perform_county_co']) and obj['place_of_performance_zip4a']): obj['place_of_perform_county_co'] = get_county_by_zip(sess, obj['place_of_performance_zip4a']) if ((not obj['place_of_perform_county_na']) and (obj['place_of_performance_state'] in county_by_code) and (obj['place_of_perform_county_co'] in county_by_code[obj['place_of_performance_state']])): obj['place_of_perform_county_na'] = county_by_code[obj['place_of_performance_state']][obj['place_of_perform_county_co']] if (obj['place_of_performance_zip4a'] and is_valid_zip(obj['place_of_performance_zip4a'])): obj['place_of_performance_zip5'] = obj['place_of_performance_zip4a'][:5] if (len(obj['place_of_performance_zip4a']) > 5): obj['place_of_perform_zip_last4'] = obj['place_of_performance_zip4a'][(- 4):] if ((not obj['place_of_perf_country_desc']) and (obj['place_of_perform_country_c'] in country_list)): obj['place_of_perf_country_desc'] = country_list[obj['place_of_perform_country_c']]
def calculate_legal_entity_fields(obj, sess, county_by_code, state_code_list, country_list): " calculate values that aren't in any feed (or haven't been provided properly) for legal entity " if (obj['legal_entity_country_code'] in country_code_map): if (obj['legal_entity_country_code'] != 'USA'): obj['legal_entity_state_code'] = country_code_map[obj['legal_entity_country_code']] if (obj['legal_entity_state_code'] in state_code_list): obj['legal_entity_state_descrip'] = state_code_list[obj['legal_entity_state_code']] obj['legal_entity_country_code'] = 'USA' obj['legal_entity_country_name'] = 'UNITED STATES' if (obj['legal_entity_state_code'] and (not obj['legal_entity_state_descrip']) and (obj['legal_entity_state_code'] in state_code_list)): obj['legal_entity_state_descrip'] = state_code_list[obj['legal_entity_state_code']] if (obj['legal_entity_zip4'] and is_valid_zip(obj['legal_entity_zip4'])): obj['legal_entity_county_code'] = get_county_by_zip(sess, obj['legal_entity_zip4']) if (obj['legal_entity_county_code'] and obj['legal_entity_state_code']): county_code = obj['legal_entity_county_code'] state = obj['legal_entity_state_code'] if ((state in county_by_code) and (county_code in county_by_code[state])): obj['legal_entity_county_name'] = county_by_code[state][county_code] obj['legal_entity_zip5'] = obj['legal_entity_zip4'][:5] if (len(obj['legal_entity_zip4']) > 5): obj['legal_entity_zip_last4'] = obj['legal_entity_zip4'][(- 4):] if ((not obj['legal_entity_country_name']) and (obj['legal_entity_country_code'] in country_list)): obj['legal_entity_country_name'] = country_list[obj['legal_entity_country_code']]
7,126,306,193,214,378,000
calculate values that aren't in any feed (or haven't been provided properly) for legal entity
dataactcore/scripts/pull_fpds_data.py
calculate_legal_entity_fields
RonSherfey/data-act-broker-backend
python
def calculate_legal_entity_fields(obj, sess, county_by_code, state_code_list, country_list): " " if (obj['legal_entity_country_code'] in country_code_map): if (obj['legal_entity_country_code'] != 'USA'): obj['legal_entity_state_code'] = country_code_map[obj['legal_entity_country_code']] if (obj['legal_entity_state_code'] in state_code_list): obj['legal_entity_state_descrip'] = state_code_list[obj['legal_entity_state_code']] obj['legal_entity_country_code'] = 'USA' obj['legal_entity_country_name'] = 'UNITED STATES' if (obj['legal_entity_state_code'] and (not obj['legal_entity_state_descrip']) and (obj['legal_entity_state_code'] in state_code_list)): obj['legal_entity_state_descrip'] = state_code_list[obj['legal_entity_state_code']] if (obj['legal_entity_zip4'] and is_valid_zip(obj['legal_entity_zip4'])): obj['legal_entity_county_code'] = get_county_by_zip(sess, obj['legal_entity_zip4']) if (obj['legal_entity_county_code'] and obj['legal_entity_state_code']): county_code = obj['legal_entity_county_code'] state = obj['legal_entity_state_code'] if ((state in county_by_code) and (county_code in county_by_code[state])): obj['legal_entity_county_name'] = county_by_code[state][county_code] obj['legal_entity_zip5'] = obj['legal_entity_zip4'][:5] if (len(obj['legal_entity_zip4']) > 5): obj['legal_entity_zip_last4'] = obj['legal_entity_zip4'][(- 4):] if ((not obj['legal_entity_country_name']) and (obj['legal_entity_country_code'] in country_list)): obj['legal_entity_country_name'] = country_list[obj['legal_entity_country_code']]
def calculate_remaining_fields(obj, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, atom_type): " Calculate values that aren't in any feed but can be calculated.\n\n Args:\n obj: a dictionary containing the details we need to derive from and to\n sess: the database connection\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n\n Returns:\n the object originally passed in with newly-calculated values added\n " obj['awarding_agency_code'] = None obj['awarding_agency_name'] = None obj['funding_agency_code'] = None obj['funding_agency_name'] = None obj['place_of_perform_county_co'] = None obj['legal_entity_county_code'] = None obj['legal_entity_county_name'] = None obj['detached_award_proc_unique'] = None if obj['awarding_sub_tier_agency_c']: try: sub_tier_agency = sub_tier_list[obj['awarding_sub_tier_agency_c']] use_frec = sub_tier_agency.is_frec agency_data = (sub_tier_agency.frec if use_frec else sub_tier_agency.cgac) obj['awarding_agency_code'] = (agency_data.frec_code if use_frec else agency_data.cgac_code) obj['awarding_agency_name'] = agency_data.agency_name except KeyError: logger.info('WARNING: MissingSubtierCGAC: The awarding sub-tier cgac_code: %s does not exist in cgac table. The FPDS-provided awarding sub-tier agency name (if given) for this cgac_code is %s. The award has been loaded with awarding_agency_code 999.', obj['awarding_sub_tier_agency_c'], obj['awarding_sub_tier_agency_n']) obj['awarding_agency_code'] = '999' obj['awarding_agency_name'] = None if obj['funding_sub_tier_agency_co']: try: sub_tier_agency = sub_tier_list[obj['funding_sub_tier_agency_co']] use_frec = sub_tier_agency.is_frec agency_data = (sub_tier_agency.frec if use_frec else sub_tier_agency.cgac) obj['funding_agency_code'] = (agency_data.frec_code if use_frec else agency_data.cgac_code) obj['funding_agency_name'] = agency_data.agency_name except KeyError: logger.info('WARNING: MissingSubtierCGAC: The funding sub-tier cgac_code: %s does not exist in cgac table. The FPDS-provided funding sub-tier agency name (if given) for this cgac_code is %s. The award has been loaded with funding_agency_code 999.', obj['funding_sub_tier_agency_co'], obj['funding_sub_tier_agency_na']) obj['funding_agency_code'] = '999' obj['funding_agency_name'] = None if obj['place_of_perform_country_c']: calculate_ppop_fields(obj, sess, county_by_name, county_by_code, state_code_list, country_list) if obj['legal_entity_country_code']: calculate_legal_entity_fields(obj, sess, county_by_code, state_code_list, country_list) obj['business_categories'] = get_business_categories(row=obj, data_type='fpds') if (obj['awardee_or_recipient_uniqu'] and (obj['awardee_or_recipient_uniqu'] in exec_comp_dict.keys())): exec_comp = exec_comp_dict[obj['awardee_or_recipient_uniqu']] for i in range(1, 6): obj['high_comp_officer{}_full_na'.format(i)] = exec_comp['officer{}_name'.format(i)] obj['high_comp_officer{}_amount'.format(i)] = exec_comp['officer{}_amt'.format(i)] else: for i in range(1, 6): obj['high_comp_officer{}_full_na'.format(i)] = None obj['high_comp_officer{}_amount'.format(i)] = None if (atom_type == 'award'): unique_award_string_list = ['CONT_AWD'] key_list = ['piid', 'agency_id', 'parent_award_id', 'referenced_idv_agency_iden'] else: unique_award_string_list = ['CONT_IDV'] key_list = ['piid', 'agency_id'] for item in key_list: unique_award_string_list.append((obj.get(item) or '-none-')) obj['unique_award_key'] = '_'.join(unique_award_string_list).upper() key_list = ['agency_id', 'referenced_idv_agency_iden', 'piid', 'award_modification_amendme', 'parent_award_id', 'transaction_number'] idv_list = ['agency_id', 'piid', 'award_modification_amendme'] unique_string = '' for item in key_list: if (len(unique_string) > 0): unique_string += '_' if ((atom_type == 'award') or (item in idv_list)): unique_string += (obj.get(item) or '-none-') else: unique_string += '-none-' obj['detached_award_proc_unique'] = unique_string return obj
1,665,732,658,534,018,300
Calculate values that aren't in any feed but can be calculated. Args: obj: a dictionary containing the details we need to derive from and to sess: the database connection sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code county_by_name: a dictionary containing all county codes, keyed by state and county name county_by_code: a dictionary containing all county names, keyed by state and county code state_code_list: a dictionary containing all state names, keyed by state code country_list: a dictionary containing all country names, keyed by country code exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV' Returns: the object originally passed in with newly-calculated values added
dataactcore/scripts/pull_fpds_data.py
calculate_remaining_fields
RonSherfey/data-act-broker-backend
python
def calculate_remaining_fields(obj, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, atom_type): " Calculate values that aren't in any feed but can be calculated.\n\n Args:\n obj: a dictionary containing the details we need to derive from and to\n sess: the database connection\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n\n Returns:\n the object originally passed in with newly-calculated values added\n " obj['awarding_agency_code'] = None obj['awarding_agency_name'] = None obj['funding_agency_code'] = None obj['funding_agency_name'] = None obj['place_of_perform_county_co'] = None obj['legal_entity_county_code'] = None obj['legal_entity_county_name'] = None obj['detached_award_proc_unique'] = None if obj['awarding_sub_tier_agency_c']: try: sub_tier_agency = sub_tier_list[obj['awarding_sub_tier_agency_c']] use_frec = sub_tier_agency.is_frec agency_data = (sub_tier_agency.frec if use_frec else sub_tier_agency.cgac) obj['awarding_agency_code'] = (agency_data.frec_code if use_frec else agency_data.cgac_code) obj['awarding_agency_name'] = agency_data.agency_name except KeyError: logger.info('WARNING: MissingSubtierCGAC: The awarding sub-tier cgac_code: %s does not exist in cgac table. The FPDS-provided awarding sub-tier agency name (if given) for this cgac_code is %s. The award has been loaded with awarding_agency_code 999.', obj['awarding_sub_tier_agency_c'], obj['awarding_sub_tier_agency_n']) obj['awarding_agency_code'] = '999' obj['awarding_agency_name'] = None if obj['funding_sub_tier_agency_co']: try: sub_tier_agency = sub_tier_list[obj['funding_sub_tier_agency_co']] use_frec = sub_tier_agency.is_frec agency_data = (sub_tier_agency.frec if use_frec else sub_tier_agency.cgac) obj['funding_agency_code'] = (agency_data.frec_code if use_frec else agency_data.cgac_code) obj['funding_agency_name'] = agency_data.agency_name except KeyError: logger.info('WARNING: MissingSubtierCGAC: The funding sub-tier cgac_code: %s does not exist in cgac table. The FPDS-provided funding sub-tier agency name (if given) for this cgac_code is %s. The award has been loaded with funding_agency_code 999.', obj['funding_sub_tier_agency_co'], obj['funding_sub_tier_agency_na']) obj['funding_agency_code'] = '999' obj['funding_agency_name'] = None if obj['place_of_perform_country_c']: calculate_ppop_fields(obj, sess, county_by_name, county_by_code, state_code_list, country_list) if obj['legal_entity_country_code']: calculate_legal_entity_fields(obj, sess, county_by_code, state_code_list, country_list) obj['business_categories'] = get_business_categories(row=obj, data_type='fpds') if (obj['awardee_or_recipient_uniqu'] and (obj['awardee_or_recipient_uniqu'] in exec_comp_dict.keys())): exec_comp = exec_comp_dict[obj['awardee_or_recipient_uniqu']] for i in range(1, 6): obj['high_comp_officer{}_full_na'.format(i)] = exec_comp['officer{}_name'.format(i)] obj['high_comp_officer{}_amount'.format(i)] = exec_comp['officer{}_amt'.format(i)] else: for i in range(1, 6): obj['high_comp_officer{}_full_na'.format(i)] = None obj['high_comp_officer{}_amount'.format(i)] = None if (atom_type == 'award'): unique_award_string_list = ['CONT_AWD'] key_list = ['piid', 'agency_id', 'parent_award_id', 'referenced_idv_agency_iden'] else: unique_award_string_list = ['CONT_IDV'] key_list = ['piid', 'agency_id'] for item in key_list: unique_award_string_list.append((obj.get(item) or '-none-')) obj['unique_award_key'] = '_'.join(unique_award_string_list).upper() key_list = ['agency_id', 'referenced_idv_agency_iden', 'piid', 'award_modification_amendme', 'parent_award_id', 'transaction_number'] idv_list = ['agency_id', 'piid', 'award_modification_amendme'] unique_string = for item in key_list: if (len(unique_string) > 0): unique_string += '_' if ((atom_type == 'award') or (item in idv_list)): unique_string += (obj.get(item) or '-none-') else: unique_string += '-none-' obj['detached_award_proc_unique'] = unique_string return obj
def process_data(data, sess, atom_type, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict): " Process the data coming in.\n\n Args:\n data: an object containing the data gathered from the feed\n sess: the database connection\n atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n\n Returns:\n An object containing the processed and calculated data.\n " obj = {} if (atom_type == 'award'): try: data['awardID'] except KeyError: data['awardID'] = {} obj = award_id_values(data['awardID'], obj) else: obj['transaction_number'] = None try: data['contractID'] except KeyError: data['contractID'] = {} obj = contract_id_values(data['contractID'], obj) try: data['competition'] except KeyError: data['competition'] = {} obj = competition_values(data['competition'], obj) try: data['contractData'] except KeyError: data['contractData'] = {} obj = contract_data_values(data['contractData'], obj, atom_type) try: data['dollarValues'] except KeyError: data['dollarValues'] = {} obj = dollar_values_values(data['dollarValues'], obj) try: data['totalDollarValues'] except KeyError: data['totalDollarValues'] = {} obj = total_dollar_values_values(data['totalDollarValues'], obj) if (atom_type == 'award'): try: data['placeOfPerformance'] except KeyError: data['placeOfPerformance'] = {} obj = place_of_performance_values(data['placeOfPerformance'], obj) else: obj['place_of_perform_county_na'] = None obj['place_of_performance_state'] = None obj['place_of_perfor_state_desc'] = None obj['place_of_performance_zip4a'] = None obj['place_of_perform_country_c'] = None obj['place_of_perf_country_desc'] = None try: data['legislativeMandates'] except KeyError: data['legislativeMandates'] = {} obj = legislative_mandates_values(data['legislativeMandates'], obj) try: obj['subcontracting_plan'] = extract_text(data['preferencePrograms']['subcontractPlan']) except (KeyError, TypeError): obj['subcontracting_plan'] = None try: obj['subcontracting_plan_desc'] = extract_text(data['preferencePrograms']['subcontractPlan']['@description']) except (KeyError, TypeError): obj['subcontracting_plan_desc'] = None try: data['productOrServiceInformation'] except KeyError: data['productOrServiceInformation'] = {} obj = product_or_service_information_values(data['productOrServiceInformation'], obj) try: data['purchaserInformation'] except KeyError: data['purchaserInformation'] = {} obj = purchaser_information_values(data['purchaserInformation'], obj) try: data['relevantContractDates'] except KeyError: data['relevantContractDates'] = {} obj = relevant_contract_dates_values(data['relevantContractDates'], obj) try: data['vendor'] except KeyError: data['vendor'] = {} obj = vendor_values(data['vendor'], obj) try: data['genericTags'] except KeyError: data['genericTags'] = {} obj = generic_values(data['genericTags'], obj) obj = calculate_remaining_fields(obj, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, atom_type) try: obj['last_modified'] = extract_text(data['transactionInformation']['lastModifiedDate']) except (KeyError, TypeError): obj['last_modified'] = None try: obj['initial_report_date'] = extract_text(data['transactionInformation']['createdDate']) except (KeyError, TypeError): obj['initial_report_date'] = None obj['pulled_from'] = atom_type free_fields = ['award_description', 'vendor_doing_as_business_n', 'legal_entity_address_line1', 'legal_entity_address_line2', 'legal_entity_address_line3', 'ultimate_parent_legal_enti', 'awardee_or_recipient_legal', 'other_statutory_authority'] for field in free_fields: if obj[field]: obj[field] = re.sub('\\s', ' ', obj[field]) return obj
4,557,314,005,484,484,000
Process the data coming in. Args: data: an object containing the data gathered from the feed sess: the database connection atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV' sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code county_by_name: a dictionary containing all county codes, keyed by state and county name county_by_code: a dictionary containing all county names, keyed by state and county code state_code_list: a dictionary containing all state names, keyed by state code country_list: a dictionary containing all country names, keyed by country code exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number Returns: An object containing the processed and calculated data.
dataactcore/scripts/pull_fpds_data.py
process_data
RonSherfey/data-act-broker-backend
python
def process_data(data, sess, atom_type, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict): " Process the data coming in.\n\n Args:\n data: an object containing the data gathered from the feed\n sess: the database connection\n atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n\n Returns:\n An object containing the processed and calculated data.\n " obj = {} if (atom_type == 'award'): try: data['awardID'] except KeyError: data['awardID'] = {} obj = award_id_values(data['awardID'], obj) else: obj['transaction_number'] = None try: data['contractID'] except KeyError: data['contractID'] = {} obj = contract_id_values(data['contractID'], obj) try: data['competition'] except KeyError: data['competition'] = {} obj = competition_values(data['competition'], obj) try: data['contractData'] except KeyError: data['contractData'] = {} obj = contract_data_values(data['contractData'], obj, atom_type) try: data['dollarValues'] except KeyError: data['dollarValues'] = {} obj = dollar_values_values(data['dollarValues'], obj) try: data['totalDollarValues'] except KeyError: data['totalDollarValues'] = {} obj = total_dollar_values_values(data['totalDollarValues'], obj) if (atom_type == 'award'): try: data['placeOfPerformance'] except KeyError: data['placeOfPerformance'] = {} obj = place_of_performance_values(data['placeOfPerformance'], obj) else: obj['place_of_perform_county_na'] = None obj['place_of_performance_state'] = None obj['place_of_perfor_state_desc'] = None obj['place_of_performance_zip4a'] = None obj['place_of_perform_country_c'] = None obj['place_of_perf_country_desc'] = None try: data['legislativeMandates'] except KeyError: data['legislativeMandates'] = {} obj = legislative_mandates_values(data['legislativeMandates'], obj) try: obj['subcontracting_plan'] = extract_text(data['preferencePrograms']['subcontractPlan']) except (KeyError, TypeError): obj['subcontracting_plan'] = None try: obj['subcontracting_plan_desc'] = extract_text(data['preferencePrograms']['subcontractPlan']['@description']) except (KeyError, TypeError): obj['subcontracting_plan_desc'] = None try: data['productOrServiceInformation'] except KeyError: data['productOrServiceInformation'] = {} obj = product_or_service_information_values(data['productOrServiceInformation'], obj) try: data['purchaserInformation'] except KeyError: data['purchaserInformation'] = {} obj = purchaser_information_values(data['purchaserInformation'], obj) try: data['relevantContractDates'] except KeyError: data['relevantContractDates'] = {} obj = relevant_contract_dates_values(data['relevantContractDates'], obj) try: data['vendor'] except KeyError: data['vendor'] = {} obj = vendor_values(data['vendor'], obj) try: data['genericTags'] except KeyError: data['genericTags'] = {} obj = generic_values(data['genericTags'], obj) obj = calculate_remaining_fields(obj, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, atom_type) try: obj['last_modified'] = extract_text(data['transactionInformation']['lastModifiedDate']) except (KeyError, TypeError): obj['last_modified'] = None try: obj['initial_report_date'] = extract_text(data['transactionInformation']['createdDate']) except (KeyError, TypeError): obj['initial_report_date'] = None obj['pulled_from'] = atom_type free_fields = ['award_description', 'vendor_doing_as_business_n', 'legal_entity_address_line1', 'legal_entity_address_line2', 'legal_entity_address_line3', 'ultimate_parent_legal_enti', 'awardee_or_recipient_legal', 'other_statutory_authority'] for field in free_fields: if obj[field]: obj[field] = re.sub('\\s', ' ', obj[field]) return obj
def process_delete_data(data, atom_type): ' process the delete feed data coming in ' unique_string = '' if (atom_type == 'award'): try: unique_string += extract_text(data['awardID']['awardContractID']['agencyID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['referencedIDVID']['agencyID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['awardContractID']['PIID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['awardContractID']['modNumber']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['referencedIDVID']['PIID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['awardContractID']['transactionNumber']) except (KeyError, TypeError): unique_string += '-none-' else: try: unique_string += extract_text(data['contractID']['IDVID']['agencyID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_-none-_' try: unique_string += extract_text(data['contractID']['IDVID']['PIID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['contractID']['IDVID']['modNumber']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_-none-_-none-' return unique_string
3,757,567,124,557,474,000
process the delete feed data coming in
dataactcore/scripts/pull_fpds_data.py
process_delete_data
RonSherfey/data-act-broker-backend
python
def process_delete_data(data, atom_type): ' ' unique_string = if (atom_type == 'award'): try: unique_string += extract_text(data['awardID']['awardContractID']['agencyID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['referencedIDVID']['agencyID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['awardContractID']['PIID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['awardContractID']['modNumber']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['referencedIDVID']['PIID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['awardID']['awardContractID']['transactionNumber']) except (KeyError, TypeError): unique_string += '-none-' else: try: unique_string += extract_text(data['contractID']['IDVID']['agencyID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_-none-_' try: unique_string += extract_text(data['contractID']['IDVID']['PIID']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_' try: unique_string += extract_text(data['contractID']['IDVID']['modNumber']) except (KeyError, TypeError): unique_string += '-none-' unique_string += '_-none-_-none-' return unique_string
def create_processed_data_list(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict): " Create a list of processed data\n\n Args:\n data: an object containing the data gathered from the feed\n sess: the database connection\n contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n\n Returns:\n A list containing the processed and calculated data.\n " data_list = [] for value in data: tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type, sub_tier_list=sub_tier_list, county_by_name=county_by_name, county_by_code=county_by_code, state_code_list=state_code_list, country_list=country_list, exec_comp_dict=exec_comp_dict) data_list.append(tmp_obj) return data_list
-8,462,654,797,997,696,000
Create a list of processed data Args: data: an object containing the data gathered from the feed sess: the database connection contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV' sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code county_by_name: a dictionary containing all county codes, keyed by state and county name county_by_code: a dictionary containing all county names, keyed by state and county code state_code_list: a dictionary containing all state names, keyed by state code country_list: a dictionary containing all country names, keyed by country code exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number Returns: A list containing the processed and calculated data.
dataactcore/scripts/pull_fpds_data.py
create_processed_data_list
RonSherfey/data-act-broker-backend
python
def create_processed_data_list(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict): " Create a list of processed data\n\n Args:\n data: an object containing the data gathered from the feed\n sess: the database connection\n contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n\n Returns:\n A list containing the processed and calculated data.\n " data_list = [] for value in data: tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type, sub_tier_list=sub_tier_list, county_by_name=county_by_name, county_by_code=county_by_code, state_code_list=state_code_list, country_list=country_list, exec_comp_dict=exec_comp_dict) data_list.append(tmp_obj) return data_list
def process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, now, threaded=False): " Start the processing for data and add it to the DB.\n\n Args:\n data: an object containing the data gathered from the feed\n contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n sess: the database connection\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n now: a timestamp indicating the time to set the updated_at to\n threaded: a boolean indicating whether the process is running as a thread or not\n " if threaded: for value in data: tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type, sub_tier_list=sub_tier_list, county_by_name=county_by_name, county_by_code=county_by_code, state_code_list=state_code_list, country_list=country_list, exec_comp_dict=exec_comp_dict) tmp_obj['updated_at'] = now insert_statement = insert(DetachedAwardProcurement).values(**tmp_obj).on_conflict_do_update(index_elements=['detached_award_proc_unique'], set_=tmp_obj) sess.execute(insert_statement) else: for value in data: tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type, sub_tier_list=sub_tier_list, county_by_name=county_by_name, county_by_code=county_by_code, state_code_list=state_code_list, country_list=country_list, exec_comp_dict=exec_comp_dict) try: statement = insert(DetachedAwardProcurement).values(**tmp_obj) sess.execute(statement) sess.commit() except IntegrityError: sess.rollback() tmp_obj['updated_at'] = now sess.query(DetachedAwardProcurement).filter_by(detached_award_proc_unique=tmp_obj['detached_award_proc_unique']).update(tmp_obj, synchronize_session=False) sess.commit()
-456,377,001,736,376,100
Start the processing for data and add it to the DB. Args: data: an object containing the data gathered from the feed contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV' sess: the database connection sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code county_by_name: a dictionary containing all county codes, keyed by state and county name county_by_code: a dictionary containing all county names, keyed by state and county code state_code_list: a dictionary containing all state names, keyed by state code country_list: a dictionary containing all country names, keyed by country code exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number now: a timestamp indicating the time to set the updated_at to threaded: a boolean indicating whether the process is running as a thread or not
dataactcore/scripts/pull_fpds_data.py
process_and_add
RonSherfey/data-act-broker-backend
python
def process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, now, threaded=False): " Start the processing for data and add it to the DB.\n\n Args:\n data: an object containing the data gathered from the feed\n contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n sess: the database connection\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n now: a timestamp indicating the time to set the updated_at to\n threaded: a boolean indicating whether the process is running as a thread or not\n " if threaded: for value in data: tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type, sub_tier_list=sub_tier_list, county_by_name=county_by_name, county_by_code=county_by_code, state_code_list=state_code_list, country_list=country_list, exec_comp_dict=exec_comp_dict) tmp_obj['updated_at'] = now insert_statement = insert(DetachedAwardProcurement).values(**tmp_obj).on_conflict_do_update(index_elements=['detached_award_proc_unique'], set_=tmp_obj) sess.execute(insert_statement) else: for value in data: tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type, sub_tier_list=sub_tier_list, county_by_name=county_by_name, county_by_code=county_by_code, state_code_list=state_code_list, country_list=country_list, exec_comp_dict=exec_comp_dict) try: statement = insert(DetachedAwardProcurement).values(**tmp_obj) sess.execute(statement) sess.commit() except IntegrityError: sess.rollback() tmp_obj['updated_at'] = now sess.query(DetachedAwardProcurement).filter_by(detached_award_proc_unique=tmp_obj['detached_award_proc_unique']).update(tmp_obj, synchronize_session=False) sess.commit()
def get_with_exception_hand(url_string, expect_entries=True): ' Retrieve data from FPDS, allow for multiple retries and timeouts ' exception_retries = (- 1) retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600] request_timeout = 60 while (exception_retries < len(retry_sleep_times)): try: resp = requests.get(url_string, timeout=request_timeout) if expect_entries: resp_dict = xmltodict.parse(resp.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) len(list_data(resp_dict['feed']['entry'])) break except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout, KeyError) as e: exception_retries += 1 request_timeout += 60 if (exception_retries < len(retry_sleep_times)): logger.info('Connection exception. Sleeping {}s and then retrying with a max wait of {}s...'.format(retry_sleep_times[exception_retries], request_timeout)) time.sleep(retry_sleep_times[exception_retries]) else: logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.') raise e return resp
-7,146,710,792,793,508,000
Retrieve data from FPDS, allow for multiple retries and timeouts
dataactcore/scripts/pull_fpds_data.py
get_with_exception_hand
RonSherfey/data-act-broker-backend
python
def get_with_exception_hand(url_string, expect_entries=True): ' ' exception_retries = (- 1) retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600] request_timeout = 60 while (exception_retries < len(retry_sleep_times)): try: resp = requests.get(url_string, timeout=request_timeout) if expect_entries: resp_dict = xmltodict.parse(resp.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) len(list_data(resp_dict['feed']['entry'])) break except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout, KeyError) as e: exception_retries += 1 request_timeout += 60 if (exception_retries < len(retry_sleep_times)): logger.info('Connection exception. Sleeping {}s and then retrying with a max wait of {}s...'.format(retry_sleep_times[exception_retries], request_timeout)) time.sleep(retry_sleep_times[exception_retries]) else: logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.') raise e return resp
def get_total_expected_records(base_url): ' Retrieve the total number of expected records based on the last paginated URL ' initial_request = get_with_exception_hand(base_url, expect_entries=False) initial_request_xml = xmltodict.parse(initial_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: urls_list = list_data(initial_request_xml['feed']['link']) except KeyError: urls_list = [] final_request_url = None for url in urls_list: if (url['@rel'] == 'last'): final_request_url = url['@href'] continue if (not final_request_url): try: return len(list_data(initial_request_xml['feed']['entry'])) except KeyError: return 0 final_request_count = int(final_request_url.split('&start=')[(- 1)]) final_request = get_with_exception_hand(final_request_url) final_request_xml = xmltodict.parse(final_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: entries_list = list_data(final_request_xml['feed']['entry']) except KeyError: raise Exception('Initial count failed, no entries in last page of request.') return (final_request_count + len(entries_list))
-6,784,017,721,531,892,000
Retrieve the total number of expected records based on the last paginated URL
dataactcore/scripts/pull_fpds_data.py
get_total_expected_records
RonSherfey/data-act-broker-backend
python
def get_total_expected_records(base_url): ' ' initial_request = get_with_exception_hand(base_url, expect_entries=False) initial_request_xml = xmltodict.parse(initial_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: urls_list = list_data(initial_request_xml['feed']['link']) except KeyError: urls_list = [] final_request_url = None for url in urls_list: if (url['@rel'] == 'last'): final_request_url = url['@href'] continue if (not final_request_url): try: return len(list_data(initial_request_xml['feed']['entry'])) except KeyError: return 0 final_request_count = int(final_request_url.split('&start=')[(- 1)]) final_request = get_with_exception_hand(final_request_url) final_request_xml = xmltodict.parse(final_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: entries_list = list_data(final_request_xml['feed']['entry']) except KeyError: raise Exception('Initial count failed, no entries in last page of request.') return (final_request_count + len(entries_list))
def get_data(contract_type, award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, last_run=None, threaded=False, start_date=None, end_date=None, metrics=None, specific_params=None): " Get the data from the atom feed based on contract/award type and the last time the script was run.\n\n Args:\n contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n award_type: a string indicating what the award type of the feed being checked is\n now: a timestamp indicating the time to set the updated_at to\n sess: the database connection\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n last_run: a date indicating the last time the pull was run\n threaded: a boolean indicating whether the process is running as a thread or not\n start_date: a date indicating the first date to pull from (must be provided with end_date)\n end_date: a date indicating the last date to pull from (must be provided with start_date)\n metrics: a dictionary to gather metrics for the script in\n specific_params: a string containing a specific set of params to run the query with (used for outside\n scripts that need to run a data load)\n " if (not metrics): metrics = {} data = [] yesterday = (now - datetime.timedelta(days=1)) utcnow = datetime.datetime.utcnow() if specific_params: params = specific_params elif (not last_run): params = (('SIGNED_DATE:[2016/10/01,' + yesterday.strftime('%Y/%m/%d')) + '] ') metrics['start_date'] = '2016/10/01' metrics['end_date'] = yesterday.strftime('%Y/%m/%d') else: last_run_date = (last_run - relativedelta(days=1)) params = (((('LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d')) + ',') + yesterday.strftime('%Y/%m/%d')) + '] ') metrics['start_date'] = last_run_date.strftime('%Y/%m/%d') metrics['end_date'] = yesterday.strftime('%Y/%m/%d') if (start_date and end_date): params = (((('LAST_MOD_DATE:[' + start_date) + ',') + end_date) + '] ') metrics['start_date'] = start_date metrics['end_date'] = end_date base_url = ((((((feed_url + params) + 'CONTRACT_TYPE:"') + contract_type.upper()) + '" AWARD_TYPE:"') + award_type) + '"') logger.info('Starting get feed: %s', base_url) total_expected_records = get_total_expected_records(base_url) logger.info('{} record(s) expected from this feed'.format(total_expected_records)) entries_processed = 0 while True: async def atom_async_get(entries_already_processed, total_expected_records): response_list = [] loop = asyncio.get_event_loop() requests_at_once = MAX_REQUESTS_AT_ONCE if ((total_expected_records - entries_already_processed) < (MAX_REQUESTS_AT_ONCE * MAX_ENTRIES)): requests_at_once = (math.ceil(((total_expected_records - entries_already_processed) / MAX_ENTRIES)) + 1) futures = [loop.run_in_executor(None, get_with_exception_hand, ((base_url + '&start=') + str((entries_already_processed + (start_offset * MAX_ENTRIES)))), (total_expected_records > (entries_already_processed + (start_offset * MAX_ENTRIES)))) for start_offset in range(requests_at_once)] for response in (await asyncio.gather(*futures)): response_list.append(response.text) pass return response_list loop = asyncio.get_event_loop() full_response = loop.run_until_complete(atom_async_get(entries_processed, total_expected_records)) for next_resp in full_response: response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: entries_per_response = list_data(response_dict['feed']['entry']) except KeyError: continue if (last_run or specific_params): for entry in entries_per_response: data.append(entry) entries_processed += 1 else: data.extend(create_processed_data_list(entries_per_response, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict)) entries_processed += len(entries_per_response) if (entries_processed > total_expected_records): for next_resp in full_response: response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: list_data(response_dict['feed']['entry']) except KeyError: logger.info(response_dict) continue raise Exception('Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}'.format(total_expected_records, len(data))) if data: logger.info('Retrieved %s lines of get %s: %s feed, writing next %s to DB', entries_processed, contract_type, award_type, len(data)) if (last_run or specific_params): process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, utcnow, threaded) else: add_processed_data_list(data, sess) logger.info('Successfully inserted %s lines of get %s: %s feed, continuing feed retrieval', len(data), contract_type, award_type) if (len(data) < (MAX_ENTRIES * MAX_REQUESTS_AT_ONCE)): if (entries_processed != total_expected_records): raise Exception('Records retrieved != Total expected records\nExpected: {}\nRetrieved: {}'.format(total_expected_records, entries_processed)) else: if ('records_received' not in metrics): metrics['records_received'] = total_expected_records else: metrics['records_received'] += total_expected_records break else: data = [] logger.info('Total entries in %s: %s feed: %s', contract_type, award_type, entries_processed) logger.info('Processed %s: %s data', contract_type, award_type)
-8,891,078,290,683,547,000
Get the data from the atom feed based on contract/award type and the last time the script was run. Args: contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV' award_type: a string indicating what the award type of the feed being checked is now: a timestamp indicating the time to set the updated_at to sess: the database connection sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code county_by_name: a dictionary containing all county codes, keyed by state and county name county_by_code: a dictionary containing all county names, keyed by state and county code state_code_list: a dictionary containing all state names, keyed by state code country_list: a dictionary containing all country names, keyed by country code exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number last_run: a date indicating the last time the pull was run threaded: a boolean indicating whether the process is running as a thread or not start_date: a date indicating the first date to pull from (must be provided with end_date) end_date: a date indicating the last date to pull from (must be provided with start_date) metrics: a dictionary to gather metrics for the script in specific_params: a string containing a specific set of params to run the query with (used for outside scripts that need to run a data load)
dataactcore/scripts/pull_fpds_data.py
get_data
RonSherfey/data-act-broker-backend
python
def get_data(contract_type, award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, last_run=None, threaded=False, start_date=None, end_date=None, metrics=None, specific_params=None): " Get the data from the atom feed based on contract/award type and the last time the script was run.\n\n Args:\n contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'\n award_type: a string indicating what the award type of the feed being checked is\n now: a timestamp indicating the time to set the updated_at to\n sess: the database connection\n sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code\n county_by_name: a dictionary containing all county codes, keyed by state and county name\n county_by_code: a dictionary containing all county names, keyed by state and county code\n state_code_list: a dictionary containing all state names, keyed by state code\n country_list: a dictionary containing all country names, keyed by country code\n exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number\n last_run: a date indicating the last time the pull was run\n threaded: a boolean indicating whether the process is running as a thread or not\n start_date: a date indicating the first date to pull from (must be provided with end_date)\n end_date: a date indicating the last date to pull from (must be provided with start_date)\n metrics: a dictionary to gather metrics for the script in\n specific_params: a string containing a specific set of params to run the query with (used for outside\n scripts that need to run a data load)\n " if (not metrics): metrics = {} data = [] yesterday = (now - datetime.timedelta(days=1)) utcnow = datetime.datetime.utcnow() if specific_params: params = specific_params elif (not last_run): params = (('SIGNED_DATE:[2016/10/01,' + yesterday.strftime('%Y/%m/%d')) + '] ') metrics['start_date'] = '2016/10/01' metrics['end_date'] = yesterday.strftime('%Y/%m/%d') else: last_run_date = (last_run - relativedelta(days=1)) params = (((('LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d')) + ',') + yesterday.strftime('%Y/%m/%d')) + '] ') metrics['start_date'] = last_run_date.strftime('%Y/%m/%d') metrics['end_date'] = yesterday.strftime('%Y/%m/%d') if (start_date and end_date): params = (((('LAST_MOD_DATE:[' + start_date) + ',') + end_date) + '] ') metrics['start_date'] = start_date metrics['end_date'] = end_date base_url = ((((((feed_url + params) + 'CONTRACT_TYPE:"') + contract_type.upper()) + '" AWARD_TYPE:"') + award_type) + '"') logger.info('Starting get feed: %s', base_url) total_expected_records = get_total_expected_records(base_url) logger.info('{} record(s) expected from this feed'.format(total_expected_records)) entries_processed = 0 while True: async def atom_async_get(entries_already_processed, total_expected_records): response_list = [] loop = asyncio.get_event_loop() requests_at_once = MAX_REQUESTS_AT_ONCE if ((total_expected_records - entries_already_processed) < (MAX_REQUESTS_AT_ONCE * MAX_ENTRIES)): requests_at_once = (math.ceil(((total_expected_records - entries_already_processed) / MAX_ENTRIES)) + 1) futures = [loop.run_in_executor(None, get_with_exception_hand, ((base_url + '&start=') + str((entries_already_processed + (start_offset * MAX_ENTRIES)))), (total_expected_records > (entries_already_processed + (start_offset * MAX_ENTRIES)))) for start_offset in range(requests_at_once)] for response in (await asyncio.gather(*futures)): response_list.append(response.text) pass return response_list loop = asyncio.get_event_loop() full_response = loop.run_until_complete(atom_async_get(entries_processed, total_expected_records)) for next_resp in full_response: response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: entries_per_response = list_data(response_dict['feed']['entry']) except KeyError: continue if (last_run or specific_params): for entry in entries_per_response: data.append(entry) entries_processed += 1 else: data.extend(create_processed_data_list(entries_per_response, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict)) entries_processed += len(entries_per_response) if (entries_processed > total_expected_records): for next_resp in full_response: response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES) try: list_data(response_dict['feed']['entry']) except KeyError: logger.info(response_dict) continue raise Exception('Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}'.format(total_expected_records, len(data))) if data: logger.info('Retrieved %s lines of get %s: %s feed, writing next %s to DB', entries_processed, contract_type, award_type, len(data)) if (last_run or specific_params): process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list, exec_comp_dict, utcnow, threaded) else: add_processed_data_list(data, sess) logger.info('Successfully inserted %s lines of get %s: %s feed, continuing feed retrieval', len(data), contract_type, award_type) if (len(data) < (MAX_ENTRIES * MAX_REQUESTS_AT_ONCE)): if (entries_processed != total_expected_records): raise Exception('Records retrieved != Total expected records\nExpected: {}\nRetrieved: {}'.format(total_expected_records, entries_processed)) else: if ('records_received' not in metrics): metrics['records_received'] = total_expected_records else: metrics['records_received'] += total_expected_records break else: data = [] logger.info('Total entries in %s: %s feed: %s', contract_type, award_type, entries_processed) logger.info('Processed %s: %s data', contract_type, award_type)
def get_delete_data(contract_type, now, sess, last_run, start_date=None, end_date=None, metrics=None): ' Get data from the delete feed ' if (not metrics): metrics = {} data = [] yesterday = (now - datetime.timedelta(days=1)) last_run_date = (last_run - relativedelta(days=1)) params = (((('LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d')) + ',') + yesterday.strftime('%Y/%m/%d')) + '] ') if (start_date and end_date): params = (((('LAST_MOD_DATE:[' + start_date) + ',') + end_date) + '] ') if (not metrics['start_date']): metrics['start_date'] = start_date if (not metrics['end_date']): metrics['end_date'] = end_date base_url = ((((delete_url + params) + 'CONTRACT_TYPE:"') + contract_type.upper()) + '"') logger.info('Starting delete feed: %s', base_url) total_expected_records = get_total_expected_records(base_url) logger.info('{} record(s) expected from this feed'.format(total_expected_records)) processed_deletions = 0 while True: exception_retries = (- 1) retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600] request_timeout = 60 try: resp = requests.get(((base_url + '&start=') + str(processed_deletions)), timeout=request_timeout) resp_data = xmltodict.parse(resp.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout) as e: exception_retries += 1 request_timeout += 60 if (exception_retries < len(retry_sleep_times)): logger.info('Connection exception caught. Sleeping {}s and then retrying with a max wait of {}s...'.format(retry_sleep_times[exception_retries], request_timeout)) time.sleep(retry_sleep_times[exception_retries]) else: logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.') raise e try: listed_data = list_data(resp_data['feed']['entry']) except KeyError: listed_data = [] if (processed_deletions > total_expected_records): raise Exception('Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}'.format(total_expected_records, len(processed_deletions))) for ld in listed_data: data.append(ld) processed_deletions += 1 if ((processed_deletions % 100) == 0): logger.info('On line %s of %s delete feed', str(processed_deletions), contract_type) if (len(listed_data) < 10): if (processed_deletions != total_expected_records): raise Exception('Records retrieved != Total expected records\nExpected: {}\nRetrieved: {}'.format(total_expected_records, len(listed_data))) else: if ('deletes_received' not in metrics): metrics['deletes_received'] = total_expected_records else: metrics['deletes_received'] += total_expected_records break else: listed_data = [] logger.info('Total entries in %s delete feed: %s', contract_type, str(processed_deletions)) delete_list = [] delete_dict = {} for value in data: last_modified = value['content'][contract_type]['transactionInformation']['lastModifiedDate'] unique_string = process_delete_data(value['content'][contract_type], atom_type=contract_type) existing_item = sess.query(DetachedAwardProcurement).filter_by(detached_award_proc_unique=unique_string).one_or_none() if existing_item: if (last_modified > existing_item.last_modified): delete_list.append(existing_item.detached_award_procurement_id) delete_dict[existing_item.detached_award_procurement_id] = existing_item.detached_award_proc_unique if delete_list: if ('records_deleted' not in metrics): metrics['records_deleted'] = len(delete_list) else: metrics['records_deleted'] += len(delete_list) sess.query(DetachedAwardProcurement).filter(DetachedAwardProcurement.detached_award_procurement_id.in_(delete_list)).delete(synchronize_session=False) seconds = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()) file_name = (((((now.strftime('%m-%d-%Y') + '_delete_records_') + contract_type) + '_') + str(seconds)) + '.csv') metrics['deleted_{}_records_file'.format(contract_type).lower()] = file_name headers = ['detached_award_procurement_id', 'detached_award_proc_unique'] if CONFIG_BROKER['use_aws']: s3client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region']) contents = bytes((','.join(headers) + '\n').encode()) for (key, value) in delete_dict.items(): contents += bytes('{},{}\n'.format(key, value).encode()) s3client.put_object(Bucket=CONFIG_BROKER['fpds_delete_bucket'], Key=file_name, Body=contents) else: with CsvLocalWriter(file_name, headers) as writer: for (key, value) in delete_dict.items(): writer.write([key, value]) writer.finish_batch()
6,783,106,870,836,893,000
Get data from the delete feed
dataactcore/scripts/pull_fpds_data.py
get_delete_data
RonSherfey/data-act-broker-backend
python
def get_delete_data(contract_type, now, sess, last_run, start_date=None, end_date=None, metrics=None): ' ' if (not metrics): metrics = {} data = [] yesterday = (now - datetime.timedelta(days=1)) last_run_date = (last_run - relativedelta(days=1)) params = (((('LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d')) + ',') + yesterday.strftime('%Y/%m/%d')) + '] ') if (start_date and end_date): params = (((('LAST_MOD_DATE:[' + start_date) + ',') + end_date) + '] ') if (not metrics['start_date']): metrics['start_date'] = start_date if (not metrics['end_date']): metrics['end_date'] = end_date base_url = ((((delete_url + params) + 'CONTRACT_TYPE:"') + contract_type.upper()) + '"') logger.info('Starting delete feed: %s', base_url) total_expected_records = get_total_expected_records(base_url) logger.info('{} record(s) expected from this feed'.format(total_expected_records)) processed_deletions = 0 while True: exception_retries = (- 1) retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600] request_timeout = 60 try: resp = requests.get(((base_url + '&start=') + str(processed_deletions)), timeout=request_timeout) resp_data = xmltodict.parse(resp.text, process_namespaces=True, namespaces=FPDS_NAMESPACES) except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout) as e: exception_retries += 1 request_timeout += 60 if (exception_retries < len(retry_sleep_times)): logger.info('Connection exception caught. Sleeping {}s and then retrying with a max wait of {}s...'.format(retry_sleep_times[exception_retries], request_timeout)) time.sleep(retry_sleep_times[exception_retries]) else: logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.') raise e try: listed_data = list_data(resp_data['feed']['entry']) except KeyError: listed_data = [] if (processed_deletions > total_expected_records): raise Exception('Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}'.format(total_expected_records, len(processed_deletions))) for ld in listed_data: data.append(ld) processed_deletions += 1 if ((processed_deletions % 100) == 0): logger.info('On line %s of %s delete feed', str(processed_deletions), contract_type) if (len(listed_data) < 10): if (processed_deletions != total_expected_records): raise Exception('Records retrieved != Total expected records\nExpected: {}\nRetrieved: {}'.format(total_expected_records, len(listed_data))) else: if ('deletes_received' not in metrics): metrics['deletes_received'] = total_expected_records else: metrics['deletes_received'] += total_expected_records break else: listed_data = [] logger.info('Total entries in %s delete feed: %s', contract_type, str(processed_deletions)) delete_list = [] delete_dict = {} for value in data: last_modified = value['content'][contract_type]['transactionInformation']['lastModifiedDate'] unique_string = process_delete_data(value['content'][contract_type], atom_type=contract_type) existing_item = sess.query(DetachedAwardProcurement).filter_by(detached_award_proc_unique=unique_string).one_or_none() if existing_item: if (last_modified > existing_item.last_modified): delete_list.append(existing_item.detached_award_procurement_id) delete_dict[existing_item.detached_award_procurement_id] = existing_item.detached_award_proc_unique if delete_list: if ('records_deleted' not in metrics): metrics['records_deleted'] = len(delete_list) else: metrics['records_deleted'] += len(delete_list) sess.query(DetachedAwardProcurement).filter(DetachedAwardProcurement.detached_award_procurement_id.in_(delete_list)).delete(synchronize_session=False) seconds = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()) file_name = (((((now.strftime('%m-%d-%Y') + '_delete_records_') + contract_type) + '_') + str(seconds)) + '.csv') metrics['deleted_{}_records_file'.format(contract_type).lower()] = file_name headers = ['detached_award_procurement_id', 'detached_award_proc_unique'] if CONFIG_BROKER['use_aws']: s3client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region']) contents = bytes((','.join(headers) + '\n').encode()) for (key, value) in delete_dict.items(): contents += bytes('{},{}\n'.format(key, value).encode()) s3client.put_object(Bucket=CONFIG_BROKER['fpds_delete_bucket'], Key=file_name, Body=contents) else: with CsvLocalWriter(file_name, headers) as writer: for (key, value) in delete_dict.items(): writer.write([key, value]) writer.finish_batch()
def create_lookups(sess): ' Create the lookups used for FPDS derivations.\n\n Args:\n sess: connection to database\n\n Returns:\n Dictionaries of sub tier agencies by code, country names by code, county names by state code + county\n code, county codes by state code + county name, state name by code, and executive compensation data by\n DUNS number\n ' sub_tiers = sess.query(SubTierAgency).all() sub_tier_list = {} for sub_tier in sub_tiers: sub_tier_list[sub_tier.sub_tier_agency_code] = sub_tier countries = sess.query(CountryCode).all() country_list = {} for country in countries: country_list[country.country_code] = country.country_name county_by_name = {} county_by_code = {} state_code_list = {} state_codes = sess.query(States.state_code, func.upper(States.state_name).label('state_name')).all() for state_code in state_codes: county_by_name[state_code.state_code] = {} county_by_code[state_code.state_code] = {} state_code_list[state_code.state_code] = state_code.state_name county_codes = sess.query(CountyCode.county_number, CountyCode.state_code, func.upper(CountyCode.county_name).label('county_name')).all() for county_code in county_codes: county_name = county_code.county_name.replace(' (CA)', '').strip() county_by_code[county_code.state_code][county_code.county_number] = county_name if re.match('^[A-Z\\s]+$', county_code.county_name): county_by_name[county_code.state_code][county_name] = county_code.county_number exec_comp_dict = {} duns_list = sess.query(DUNS).filter(DUNS.high_comp_officer1_full_na.isnot(None)).all() for duns in duns_list: exec_comp_dict[duns.awardee_or_recipient_uniqu] = {'officer1_name': duns.high_comp_officer1_full_na, 'officer1_amt': duns.high_comp_officer1_amount, 'officer2_name': duns.high_comp_officer2_full_na, 'officer2_amt': duns.high_comp_officer2_amount, 'officer3_name': duns.high_comp_officer3_full_na, 'officer3_amt': duns.high_comp_officer3_amount, 'officer4_name': duns.high_comp_officer4_full_na, 'officer4_amt': duns.high_comp_officer4_amount, 'officer5_name': duns.high_comp_officer5_full_na, 'officer5_amt': duns.high_comp_officer5_amount} del duns_list return (sub_tier_list, country_list, state_code_list, county_by_name, county_by_code, exec_comp_dict)
623,045,959,743,068,400
Create the lookups used for FPDS derivations. Args: sess: connection to database Returns: Dictionaries of sub tier agencies by code, country names by code, county names by state code + county code, county codes by state code + county name, state name by code, and executive compensation data by DUNS number
dataactcore/scripts/pull_fpds_data.py
create_lookups
RonSherfey/data-act-broker-backend
python
def create_lookups(sess): ' Create the lookups used for FPDS derivations.\n\n Args:\n sess: connection to database\n\n Returns:\n Dictionaries of sub tier agencies by code, country names by code, county names by state code + county\n code, county codes by state code + county name, state name by code, and executive compensation data by\n DUNS number\n ' sub_tiers = sess.query(SubTierAgency).all() sub_tier_list = {} for sub_tier in sub_tiers: sub_tier_list[sub_tier.sub_tier_agency_code] = sub_tier countries = sess.query(CountryCode).all() country_list = {} for country in countries: country_list[country.country_code] = country.country_name county_by_name = {} county_by_code = {} state_code_list = {} state_codes = sess.query(States.state_code, func.upper(States.state_name).label('state_name')).all() for state_code in state_codes: county_by_name[state_code.state_code] = {} county_by_code[state_code.state_code] = {} state_code_list[state_code.state_code] = state_code.state_name county_codes = sess.query(CountyCode.county_number, CountyCode.state_code, func.upper(CountyCode.county_name).label('county_name')).all() for county_code in county_codes: county_name = county_code.county_name.replace(' (CA)', ).strip() county_by_code[county_code.state_code][county_code.county_number] = county_name if re.match('^[A-Z\\s]+$', county_code.county_name): county_by_name[county_code.state_code][county_name] = county_code.county_number exec_comp_dict = {} duns_list = sess.query(DUNS).filter(DUNS.high_comp_officer1_full_na.isnot(None)).all() for duns in duns_list: exec_comp_dict[duns.awardee_or_recipient_uniqu] = {'officer1_name': duns.high_comp_officer1_full_na, 'officer1_amt': duns.high_comp_officer1_amount, 'officer2_name': duns.high_comp_officer2_full_na, 'officer2_amt': duns.high_comp_officer2_amount, 'officer3_name': duns.high_comp_officer3_full_na, 'officer3_amt': duns.high_comp_officer3_amount, 'officer4_name': duns.high_comp_officer4_full_na, 'officer4_amt': duns.high_comp_officer4_amount, 'officer5_name': duns.high_comp_officer5_full_na, 'officer5_amt': duns.high_comp_officer5_amount} del duns_list return (sub_tier_list, country_list, state_code_list, county_by_name, county_by_code, exec_comp_dict)
def parse_error(self, response): 'Parse an error response' error_code = response.split(' ')[0] if (error_code in self.EXCEPTION_CLASSES): response = response[(len(error_code) + 1):] return self.EXCEPTION_CLASSES[error_code](response) return ResponseError(response)
-334,308,344,740,590,700
Parse an error response
redis/connection.py
parse_error
theatlantic/redis-py
python
def parse_error(self, response): error_code = response.split(' ')[0] if (error_code in self.EXCEPTION_CLASSES): response = response[(len(error_code) + 1):] return self.EXCEPTION_CLASSES[error_code](response) return ResponseError(response)
def on_connect(self, connection): 'Called when the socket connects' self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) if connection.decode_responses: self.encoding = connection.encoding
-1,395,762,390,120,864,500
Called when the socket connects
redis/connection.py
on_connect
theatlantic/redis-py
python
def on_connect(self, connection): self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) if connection.decode_responses: self.encoding = connection.encoding
def on_disconnect(self): 'Called when the socket disconnects' if (self._sock is not None): self._sock.close() self._sock = None if (self._buffer is not None): self._buffer.close() self._buffer = None self.encoding = None
-1,984,250,152,985,980,400
Called when the socket disconnects
redis/connection.py
on_disconnect
theatlantic/redis-py
python
def on_disconnect(self): if (self._sock is not None): self._sock.close() self._sock = None if (self._buffer is not None): self._buffer.close() self._buffer = None self.encoding = None
def connect(self): 'Connects to the Redis server if not already connected' if self._sock: return try: sock = self._connect() except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock try: self.on_connect() except RedisError: self.disconnect() raise for callback in self._connect_callbacks: callback(self)
-1,121,706,814,142,285,700
Connects to the Redis server if not already connected
redis/connection.py
connect
theatlantic/redis-py
python
def connect(self): if self._sock: return try: sock = self._connect() except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock try: self.on_connect() except RedisError: self.disconnect() raise for callback in self._connect_callbacks: callback(self)
def _connect(self): 'Create a TCP socket connection' err = None for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): (family, socktype, proto, canonname, socket_address) = res sock = None try: sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.socket_keepalive: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) for (k, v) in iteritems(self.socket_keepalive_options): sock.setsockopt(socket.SOL_TCP, k, v) sock.settimeout(self.socket_connect_timeout) sock.connect(socket_address) sock.settimeout(self.socket_timeout) return sock except socket.error as _: err = _ if (sock is not None): sock.close() if (err is not None): raise err raise socket.error('socket.getaddrinfo returned an empty list')
-3,561,479,361,814,386,000
Create a TCP socket connection
redis/connection.py
_connect
theatlantic/redis-py
python
def _connect(self): err = None for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): (family, socktype, proto, canonname, socket_address) = res sock = None try: sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.socket_keepalive: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) for (k, v) in iteritems(self.socket_keepalive_options): sock.setsockopt(socket.SOL_TCP, k, v) sock.settimeout(self.socket_connect_timeout) sock.connect(socket_address) sock.settimeout(self.socket_timeout) return sock except socket.error as _: err = _ if (sock is not None): sock.close() if (err is not None): raise err raise socket.error('socket.getaddrinfo returned an empty list')
def on_connect(self): 'Initialize the connection, authenticate and select a database' self._parser.on_connect(self) if self.password: self.send_command('AUTH', self.password) if (nativestr(self.read_response()) != 'OK'): raise AuthenticationError('Invalid Password') if self.db: self.send_command('SELECT', self.db) if (nativestr(self.read_response()) != 'OK'): raise ConnectionError('Invalid Database')
6,806,879,356,599,901,000
Initialize the connection, authenticate and select a database
redis/connection.py
on_connect
theatlantic/redis-py
python
def on_connect(self): self._parser.on_connect(self) if self.password: self.send_command('AUTH', self.password) if (nativestr(self.read_response()) != 'OK'): raise AuthenticationError('Invalid Password') if self.db: self.send_command('SELECT', self.db) if (nativestr(self.read_response()) != 'OK'): raise ConnectionError('Invalid Database')
def disconnect(self): 'Disconnects from the Redis server' self._parser.on_disconnect() if (self._sock is None): return try: self._sock.shutdown(socket.SHUT_RDWR) self._sock.close() except socket.error: pass self._sock = None
-2,481,068,027,114,127,400
Disconnects from the Redis server
redis/connection.py
disconnect
theatlantic/redis-py
python
def disconnect(self): self._parser.on_disconnect() if (self._sock is None): return try: self._sock.shutdown(socket.SHUT_RDWR) self._sock.close() except socket.error: pass self._sock = None
def send_packed_command(self, command): 'Send an already packed command to the Redis server' if (not self._sock): self.connect() try: if isinstance(command, str): command = [command] for item in command: self._sock.sendall(item) except socket.timeout: self.disconnect() raise TimeoutError('Timeout writing to socket') except socket.error: e = sys.exc_info()[1] self.disconnect() if (len(e.args) == 1): (_errno, errmsg) = ('UNKNOWN', e.args[0]) else: (_errno, errmsg) = e.args raise ConnectionError(('Error %s while writing to socket. %s.' % (_errno, errmsg))) except: self.disconnect() raise
5,029,571,864,003,598,000
Send an already packed command to the Redis server
redis/connection.py
send_packed_command
theatlantic/redis-py
python
def send_packed_command(self, command): if (not self._sock): self.connect() try: if isinstance(command, str): command = [command] for item in command: self._sock.sendall(item) except socket.timeout: self.disconnect() raise TimeoutError('Timeout writing to socket') except socket.error: e = sys.exc_info()[1] self.disconnect() if (len(e.args) == 1): (_errno, errmsg) = ('UNKNOWN', e.args[0]) else: (_errno, errmsg) = e.args raise ConnectionError(('Error %s while writing to socket. %s.' % (_errno, errmsg))) except: self.disconnect() raise
def send_command(self, *args): 'Pack and send a command to the Redis server' self.send_packed_command(self.pack_command(*args))
-5,852,361,404,330,154,000
Pack and send a command to the Redis server
redis/connection.py
send_command
theatlantic/redis-py
python
def send_command(self, *args): self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0): "Poll the socket to see if there's data that can be read." sock = self._sock if (not sock): self.connect() sock = self._sock return (self._parser.can_read() or bool(select([sock], [], [], timeout)[0]))
3,041,537,158,249,160,700
Poll the socket to see if there's data that can be read.
redis/connection.py
can_read
theatlantic/redis-py
python
def can_read(self, timeout=0): sock = self._sock if (not sock): self.connect() sock = self._sock return (self._parser.can_read() or bool(select([sock], [], [], timeout)[0]))
def read_response(self): 'Read the response from a previously sent command' try: response = self._parser.read_response() except: self.disconnect() raise if isinstance(response, ResponseError): raise response return response
-6,449,246,373,174,010,000
Read the response from a previously sent command
redis/connection.py
read_response
theatlantic/redis-py
python
def read_response(self): try: response = self._parser.read_response() except: self.disconnect() raise if isinstance(response, ResponseError): raise response return response
def encode(self, value): 'Return a bytestring representation of the value' if isinstance(value, Token): return b(value.value) elif isinstance(value, bytes): return value elif isinstance(value, (int, long)): value = b(str(value)) elif isinstance(value, float): value = b(repr(value)) elif (not isinstance(value, basestring)): value = str(value) if isinstance(value, unicode): value = value.encode(self.encoding, self.encoding_errors) return value
5,629,754,806,404,947,000
Return a bytestring representation of the value
redis/connection.py
encode
theatlantic/redis-py
python
def encode(self, value): if isinstance(value, Token): return b(value.value) elif isinstance(value, bytes): return value elif isinstance(value, (int, long)): value = b(str(value)) elif isinstance(value, float): value = b(repr(value)) elif (not isinstance(value, basestring)): value = str(value) if isinstance(value, unicode): value = value.encode(self.encoding, self.encoding_errors) return value
def pack_command(self, *args): 'Pack a series of arguments into the Redis protocol' output = [] command = args[0] if (' ' in command): args = (tuple([Token(s) for s in command.split(' ')]) + args[1:]) else: args = ((Token(command),) + args[1:]) buff = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF)) for arg in imap(self.encode, args): if ((len(buff) > 6000) or (len(arg) > 6000)): buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF)) output.append(buff) output.append(arg) buff = SYM_CRLF else: buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF, arg, SYM_CRLF)) output.append(buff) return output
4,355,786,805,303,592,400
Pack a series of arguments into the Redis protocol
redis/connection.py
pack_command
theatlantic/redis-py
python
def pack_command(self, *args): output = [] command = args[0] if (' ' in command): args = (tuple([Token(s) for s in command.split(' ')]) + args[1:]) else: args = ((Token(command),) + args[1:]) buff = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF)) for arg in imap(self.encode, args): if ((len(buff) > 6000) or (len(arg) > 6000)): buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF)) output.append(buff) output.append(arg) buff = SYM_CRLF else: buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF, arg, SYM_CRLF)) output.append(buff) return output
def pack_commands(self, commands): 'Pack multiple commands into the Redis protocol' output = [] pieces = [] buffer_length = 0 for cmd in commands: for chunk in self.pack_command(*cmd): pieces.append(chunk) buffer_length += len(chunk) if (buffer_length > 6000): output.append(SYM_EMPTY.join(pieces)) buffer_length = 0 pieces = [] if pieces: output.append(SYM_EMPTY.join(pieces)) return output
-8,819,228,036,080,144,000
Pack multiple commands into the Redis protocol
redis/connection.py
pack_commands
theatlantic/redis-py
python
def pack_commands(self, commands): output = [] pieces = [] buffer_length = 0 for cmd in commands: for chunk in self.pack_command(*cmd): pieces.append(chunk) buffer_length += len(chunk) if (buffer_length > 6000): output.append(SYM_EMPTY.join(pieces)) buffer_length = 0 pieces = [] if pieces: output.append(SYM_EMPTY.join(pieces)) return output
def _connect(self): 'Wrap the socket with SSL support' sock = super(SSLConnection, self)._connect() sock = ssl.wrap_socket(sock, cert_reqs=self.cert_reqs, keyfile=self.keyfile, certfile=self.certfile, ca_certs=self.ca_certs) return sock
-2,325,370,612,891,198,500
Wrap the socket with SSL support
redis/connection.py
_connect
theatlantic/redis-py
python
def _connect(self): sock = super(SSLConnection, self)._connect() sock = ssl.wrap_socket(sock, cert_reqs=self.cert_reqs, keyfile=self.keyfile, certfile=self.certfile, ca_certs=self.ca_certs) return sock
def _connect(self): 'Create a Unix domain socket connection' sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.socket_timeout) sock.connect(self.path) return sock
2,119,124,442,193,394,700
Create a Unix domain socket connection
redis/connection.py
_connect
theatlantic/redis-py
python
def _connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.socket_timeout) sock.connect(self.path) return sock
@classmethod def from_url(cls, url, db=None, decode_components=False, **kwargs): "\n Return a connection pool configured from the given URL.\n\n For example::\n\n redis://[:password]@localhost:6379/0\n rediss://[:password]@localhost:6379/0\n unix://[:password]@/path/to/socket.sock?db=0\n\n Three URL schemes are supported:\n redis:// creates a normal TCP socket connection\n rediss:// creates a SSL wrapped TCP socket connection\n unix:// creates a Unix Domain Socket connection\n\n There are several ways to specify a database number. The parse function\n will return the first specified option:\n 1. A ``db`` querystring option, e.g. redis://localhost?db=0\n 2. If using the redis:// scheme, the path argument of the url, e.g.\n redis://localhost/0\n 3. The ``db`` argument to this function.\n\n If none of these options are specified, db=0 is used.\n\n The ``decode_components`` argument allows this function to work with\n percent-encoded URLs. If this argument is set to ``True`` all ``%xx``\n escapes will be replaced by their single-character equivalents after\n the URL has been parsed. This only applies to the ``hostname``,\n ``path``, and ``password`` components.\n\n Any additional querystring arguments and keyword arguments will be\n passed along to the ConnectionPool class's initializer. In the case\n of conflicting arguments, querystring arguments always win.\n " url_string = url url = urlparse(url) qs = '' if (('?' in url.path) and (not url.query)): qs = url.path.split('?', 1)[1] url = urlparse(url_string[:(- (len(qs) + 1))]) else: qs = url.query url_options = {} for (name, value) in iteritems(parse_qs(qs)): if (value and (len(value) > 0)): url_options[name] = value[0] if decode_components: password = (unquote(url.password) if url.password else None) path = (unquote(url.path) if url.path else None) hostname = (unquote(url.hostname) if url.hostname else None) else: password = url.password path = url.path hostname = url.hostname if (url.scheme == 'unix'): url_options.update({'password': password, 'path': path, 'connection_class': UnixDomainSocketConnection}) else: url_options.update({'host': hostname, 'port': int((url.port or 6379)), 'password': password}) if (('db' not in url_options) and path): try: url_options['db'] = int(path.replace('/', '')) except (AttributeError, ValueError): pass if (url.scheme == 'rediss'): url_options['connection_class'] = SSLConnection url_options['db'] = int(url_options.get('db', (db or 0))) kwargs.update(url_options) if ('charset' in kwargs): warnings.warn(DeprecationWarning('"charset" is deprecated. Use "encoding" instead')) kwargs['encoding'] = kwargs.pop('charset') if ('errors' in kwargs): warnings.warn(DeprecationWarning('"errors" is deprecated. Use "encoding_errors" instead')) kwargs['encoding_errors'] = kwargs.pop('errors') return cls(**kwargs)
8,498,954,111,016,500,000
Return a connection pool configured from the given URL. For example:: redis://[:password]@localhost:6379/0 rediss://[:password]@localhost:6379/0 unix://[:password]@/path/to/socket.sock?db=0 Three URL schemes are supported: redis:// creates a normal TCP socket connection rediss:// creates a SSL wrapped TCP socket connection unix:// creates a Unix Domain Socket connection There are several ways to specify a database number. The parse function will return the first specified option: 1. A ``db`` querystring option, e.g. redis://localhost?db=0 2. If using the redis:// scheme, the path argument of the url, e.g. redis://localhost/0 3. The ``db`` argument to this function. If none of these options are specified, db=0 is used. The ``decode_components`` argument allows this function to work with percent-encoded URLs. If this argument is set to ``True`` all ``%xx`` escapes will be replaced by their single-character equivalents after the URL has been parsed. This only applies to the ``hostname``, ``path``, and ``password`` components. Any additional querystring arguments and keyword arguments will be passed along to the ConnectionPool class's initializer. In the case of conflicting arguments, querystring arguments always win.
redis/connection.py
from_url
theatlantic/redis-py
python
@classmethod def from_url(cls, url, db=None, decode_components=False, **kwargs): "\n Return a connection pool configured from the given URL.\n\n For example::\n\n redis://[:password]@localhost:6379/0\n rediss://[:password]@localhost:6379/0\n unix://[:password]@/path/to/socket.sock?db=0\n\n Three URL schemes are supported:\n redis:// creates a normal TCP socket connection\n rediss:// creates a SSL wrapped TCP socket connection\n unix:// creates a Unix Domain Socket connection\n\n There are several ways to specify a database number. The parse function\n will return the first specified option:\n 1. A ``db`` querystring option, e.g. redis://localhost?db=0\n 2. If using the redis:// scheme, the path argument of the url, e.g.\n redis://localhost/0\n 3. The ``db`` argument to this function.\n\n If none of these options are specified, db=0 is used.\n\n The ``decode_components`` argument allows this function to work with\n percent-encoded URLs. If this argument is set to ``True`` all ``%xx``\n escapes will be replaced by their single-character equivalents after\n the URL has been parsed. This only applies to the ``hostname``,\n ``path``, and ``password`` components.\n\n Any additional querystring arguments and keyword arguments will be\n passed along to the ConnectionPool class's initializer. In the case\n of conflicting arguments, querystring arguments always win.\n " url_string = url url = urlparse(url) qs = if (('?' in url.path) and (not url.query)): qs = url.path.split('?', 1)[1] url = urlparse(url_string[:(- (len(qs) + 1))]) else: qs = url.query url_options = {} for (name, value) in iteritems(parse_qs(qs)): if (value and (len(value) > 0)): url_options[name] = value[0] if decode_components: password = (unquote(url.password) if url.password else None) path = (unquote(url.path) if url.path else None) hostname = (unquote(url.hostname) if url.hostname else None) else: password = url.password path = url.path hostname = url.hostname if (url.scheme == 'unix'): url_options.update({'password': password, 'path': path, 'connection_class': UnixDomainSocketConnection}) else: url_options.update({'host': hostname, 'port': int((url.port or 6379)), 'password': password}) if (('db' not in url_options) and path): try: url_options['db'] = int(path.replace('/', )) except (AttributeError, ValueError): pass if (url.scheme == 'rediss'): url_options['connection_class'] = SSLConnection url_options['db'] = int(url_options.get('db', (db or 0))) kwargs.update(url_options) if ('charset' in kwargs): warnings.warn(DeprecationWarning('"charset" is deprecated. Use "encoding" instead')) kwargs['encoding'] = kwargs.pop('charset') if ('errors' in kwargs): warnings.warn(DeprecationWarning('"errors" is deprecated. Use "encoding_errors" instead')) kwargs['encoding_errors'] = kwargs.pop('errors') return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None, **connection_kwargs): "\n Create a connection pool. If max_connections is set, then this\n object raises redis.ConnectionError when the pool's limit is reached.\n\n By default, TCP connections are created connection_class is specified.\n Use redis.UnixDomainSocketConnection for unix sockets.\n\n Any additional keyword arguments are passed to the constructor of\n connection_class.\n " max_connections = (max_connections or (2 ** 31)) if ((not isinstance(max_connections, (int, long))) or (max_connections < 0)): raise ValueError('"max_connections" must be a positive integer') self.connection_class = connection_class self.connection_kwargs = connection_kwargs self.max_connections = max_connections self.reset()
-376,968,440,516,761,660
Create a connection pool. If max_connections is set, then this object raises redis.ConnectionError when the pool's limit is reached. By default, TCP connections are created connection_class is specified. Use redis.UnixDomainSocketConnection for unix sockets. Any additional keyword arguments are passed to the constructor of connection_class.
redis/connection.py
__init__
theatlantic/redis-py
python
def __init__(self, connection_class=Connection, max_connections=None, **connection_kwargs): "\n Create a connection pool. If max_connections is set, then this\n object raises redis.ConnectionError when the pool's limit is reached.\n\n By default, TCP connections are created connection_class is specified.\n Use redis.UnixDomainSocketConnection for unix sockets.\n\n Any additional keyword arguments are passed to the constructor of\n connection_class.\n " max_connections = (max_connections or (2 ** 31)) if ((not isinstance(max_connections, (int, long))) or (max_connections < 0)): raise ValueError('"max_connections" must be a positive integer') self.connection_class = connection_class self.connection_kwargs = connection_kwargs self.max_connections = max_connections self.reset()
def get_connection(self, command_name, *keys, **options): 'Get a connection from the pool' self._checkpid() try: connection = self._available_connections.pop() except IndexError: connection = self.make_connection() self._in_use_connections.add(connection) return connection
3,067,355,640,920,227,300
Get a connection from the pool
redis/connection.py
get_connection
theatlantic/redis-py
python
def get_connection(self, command_name, *keys, **options): self._checkpid() try: connection = self._available_connections.pop() except IndexError: connection = self.make_connection() self._in_use_connections.add(connection) return connection
def make_connection(self): 'Create a new connection' if (self._created_connections >= self.max_connections): raise ConnectionError('Too many connections') self._created_connections += 1 return self.connection_class(**self.connection_kwargs)
6,708,195,135,593,214,000
Create a new connection
redis/connection.py
make_connection
theatlantic/redis-py
python
def make_connection(self): if (self._created_connections >= self.max_connections): raise ConnectionError('Too many connections') self._created_connections += 1 return self.connection_class(**self.connection_kwargs)
def release(self, connection): 'Releases the connection back to the pool' self._checkpid() if (connection.pid != self.pid): return self._in_use_connections.remove(connection) self._available_connections.append(connection)
8,546,149,006,600,573,000
Releases the connection back to the pool
redis/connection.py
release
theatlantic/redis-py
python
def release(self, connection): self._checkpid() if (connection.pid != self.pid): return self._in_use_connections.remove(connection) self._available_connections.append(connection)
def disconnect(self): 'Disconnects all connections in the pool' all_conns = chain(self._available_connections, self._in_use_connections) for connection in all_conns: connection.disconnect()
4,989,466,151,080,915,000
Disconnects all connections in the pool
redis/connection.py
disconnect
theatlantic/redis-py
python
def disconnect(self): all_conns = chain(self._available_connections, self._in_use_connections) for connection in all_conns: connection.disconnect()
def make_connection(self): 'Make a fresh connection.' connection = self.connection_class(**self.connection_kwargs) self._connections.append(connection) return connection
-3,766,099,365,760,060,000
Make a fresh connection.
redis/connection.py
make_connection
theatlantic/redis-py
python
def make_connection(self): connection = self.connection_class(**self.connection_kwargs) self._connections.append(connection) return connection
def get_connection(self, command_name, *keys, **options): '\n Get a connection, blocking for ``self.timeout`` until a connection\n is available from the pool.\n\n If the connection returned is ``None`` then creates a new connection.\n Because we use a last-in first-out queue, the existing connections\n (having been returned to the pool after the initial ``None`` values\n were added) will be returned before ``None`` values. This means we only\n create new connections when we need to, i.e.: the actual number of\n connections will only increase in response to demand.\n ' self._checkpid() connection = None try: connection = self.pool.get(block=True, timeout=self.timeout) except Empty: raise ConnectionError('No connection available.') if (connection is None): connection = self.make_connection() return connection
1,012,438,289,656,912,300
Get a connection, blocking for ``self.timeout`` until a connection is available from the pool. If the connection returned is ``None`` then creates a new connection. Because we use a last-in first-out queue, the existing connections (having been returned to the pool after the initial ``None`` values were added) will be returned before ``None`` values. This means we only create new connections when we need to, i.e.: the actual number of connections will only increase in response to demand.
redis/connection.py
get_connection
theatlantic/redis-py
python
def get_connection(self, command_name, *keys, **options): '\n Get a connection, blocking for ``self.timeout`` until a connection\n is available from the pool.\n\n If the connection returned is ``None`` then creates a new connection.\n Because we use a last-in first-out queue, the existing connections\n (having been returned to the pool after the initial ``None`` values\n were added) will be returned before ``None`` values. This means we only\n create new connections when we need to, i.e.: the actual number of\n connections will only increase in response to demand.\n ' self._checkpid() connection = None try: connection = self.pool.get(block=True, timeout=self.timeout) except Empty: raise ConnectionError('No connection available.') if (connection is None): connection = self.make_connection() return connection
def release(self, connection): 'Releases the connection back to the pool.' self._checkpid() if (connection.pid != self.pid): return try: self.pool.put_nowait(connection) except Full: pass
4,498,462,219,287,807,000
Releases the connection back to the pool.
redis/connection.py
release
theatlantic/redis-py
python
def release(self, connection): self._checkpid() if (connection.pid != self.pid): return try: self.pool.put_nowait(connection) except Full: pass
def disconnect(self): 'Disconnects all connections in the pool.' for connection in self._connections: connection.disconnect()
8,128,324,116,068,593,000
Disconnects all connections in the pool.
redis/connection.py
disconnect
theatlantic/redis-py
python
def disconnect(self): for connection in self._connections: connection.disconnect()
def input_dictionary(run_str): ' Parses the `input` block and builds a\n dictionary of keywords and their corresponding values.\n\n :param run_str: input string of the run.dat block\n :type run_str: str\n :rtype: dict[str: obj]\n ' inp_block = ioformat.ptt.end_block(run_str, 'input', footer='input') inp_dct = ioformat.ptt.keyword_dct_from_block(inp_block) inp_dct = automol.util.dict_.right_update(defaults_from_val_dct(RUN_INP_VAL_DCT), inp_dct) check_dct1(inp_dct, RUN_INP_VAL_DCT, RUN_INP_REQ, 'Run-Input') return inp_dct
655,159,734,187,371,000
Parses the `input` block and builds a dictionary of keywords and their corresponding values. :param run_str: input string of the run.dat block :type run_str: str :rtype: dict[str: obj]
mechlib/amech_io/parser/run.py
input_dictionary
Auto-Mech/moldriver
python
def input_dictionary(run_str): ' Parses the `input` block and builds a\n dictionary of keywords and their corresponding values.\n\n :param run_str: input string of the run.dat block\n :type run_str: str\n :rtype: dict[str: obj]\n ' inp_block = ioformat.ptt.end_block(run_str, 'input', footer='input') inp_dct = ioformat.ptt.keyword_dct_from_block(inp_block) inp_dct = automol.util.dict_.right_update(defaults_from_val_dct(RUN_INP_VAL_DCT), inp_dct) check_dct1(inp_dct, RUN_INP_VAL_DCT, RUN_INP_REQ, 'Run-Input') return inp_dct
def chem_idxs(run_str): ' Parses the `pes` block of the run.dat file and\n builds a dictionary of the PESs and corresponding channels the\n user wishes to run.\n\n Parses the `spc` block of the run.dat file and\n builds a dictionary of the species the\n user wishes to run.\n\n May break if idx is given on two lines of string.\n\n :param run_str: string of the run.dat input file\n :type run_str: str\n :returns: ({pes_idx: list of channel_idxs}, {1: list of species idxs})\n :rtype: dict[str: tuple]\n ' pes_block = ioformat.ptt.end_block(run_str, 'pes', footer='pes') if (pes_block is not None): _pes_idxs = {} for line in pes_block.strip().splitlines(): [pes_nums, chn_nums] = line.split(':') _pes_nums = ioformat.ptt.idx_lst_from_line(pes_nums) _chn_nums = ioformat.ptt.idx_lst_from_line(chn_nums) for idx in _pes_nums: _pes_idxs.update({(idx - 1): tuple(((val - 1) for val in _chn_nums))}) else: _pes_idxs = None spc_block = ioformat.ptt.end_block(run_str, 'spc', footer='spc') if (spc_block is not None): _idxs = () for line in spc_block.splitlines(): _idxs += ioformat.ptt.idx_lst_from_line(line) _spc_idxs = {1: tuple(((val - 1) for val in _idxs))} else: _spc_idxs = None if ((_pes_idxs is None) and (_spc_idxs is None)): error_message('No pes or spc section given in run.dat file. Quitting') sys.exit() return (_pes_idxs, _spc_idxs)
487,741,308,159,634,100
Parses the `pes` block of the run.dat file and builds a dictionary of the PESs and corresponding channels the user wishes to run. Parses the `spc` block of the run.dat file and builds a dictionary of the species the user wishes to run. May break if idx is given on two lines of string. :param run_str: string of the run.dat input file :type run_str: str :returns: ({pes_idx: list of channel_idxs}, {1: list of species idxs}) :rtype: dict[str: tuple]
mechlib/amech_io/parser/run.py
chem_idxs
Auto-Mech/moldriver
python
def chem_idxs(run_str): ' Parses the `pes` block of the run.dat file and\n builds a dictionary of the PESs and corresponding channels the\n user wishes to run.\n\n Parses the `spc` block of the run.dat file and\n builds a dictionary of the species the\n user wishes to run.\n\n May break if idx is given on two lines of string.\n\n :param run_str: string of the run.dat input file\n :type run_str: str\n :returns: ({pes_idx: list of channel_idxs}, {1: list of species idxs})\n :rtype: dict[str: tuple]\n ' pes_block = ioformat.ptt.end_block(run_str, 'pes', footer='pes') if (pes_block is not None): _pes_idxs = {} for line in pes_block.strip().splitlines(): [pes_nums, chn_nums] = line.split(':') _pes_nums = ioformat.ptt.idx_lst_from_line(pes_nums) _chn_nums = ioformat.ptt.idx_lst_from_line(chn_nums) for idx in _pes_nums: _pes_idxs.update({(idx - 1): tuple(((val - 1) for val in _chn_nums))}) else: _pes_idxs = None spc_block = ioformat.ptt.end_block(run_str, 'spc', footer='spc') if (spc_block is not None): _idxs = () for line in spc_block.splitlines(): _idxs += ioformat.ptt.idx_lst_from_line(line) _spc_idxs = {1: tuple(((val - 1) for val in _idxs))} else: _spc_idxs = None if ((_pes_idxs is None) and (_spc_idxs is None)): error_message('No pes or spc section given in run.dat file. Quitting') sys.exit() return (_pes_idxs, _spc_idxs)
def extract_task(tsk, tsk_lst): ' Searches for a task in the task lst and if found:\n the corresponding keywords and values will be returned\n\n Function only works if task is present in the list one time.\n\n :param tsk: task to extract information for\n :type tsk: str\n :param tsk_lst: list of tasks to run for some driver\n :type tsk_lst: tuple(tuple(str/dict))\n :rtype: tuple(str/dict)\n ' tsk_inf = None for _tsk_inf in tsk_lst: if any(((x == tsk) for x in _tsk_inf)): tsk_inf = _tsk_inf break return tsk_inf
5,581,091,852,028,295,000
Searches for a task in the task lst and if found: the corresponding keywords and values will be returned Function only works if task is present in the list one time. :param tsk: task to extract information for :type tsk: str :param tsk_lst: list of tasks to run for some driver :type tsk_lst: tuple(tuple(str/dict)) :rtype: tuple(str/dict)
mechlib/amech_io/parser/run.py
extract_task
Auto-Mech/moldriver
python
def extract_task(tsk, tsk_lst): ' Searches for a task in the task lst and if found:\n the corresponding keywords and values will be returned\n\n Function only works if task is present in the list one time.\n\n :param tsk: task to extract information for\n :type tsk: str\n :param tsk_lst: list of tasks to run for some driver\n :type tsk_lst: tuple(tuple(str/dict))\n :rtype: tuple(str/dict)\n ' tsk_inf = None for _tsk_inf in tsk_lst: if any(((x == tsk) for x in _tsk_inf)): tsk_inf = _tsk_inf break return tsk_inf
def tasks(run_str, thy_dct): ' runstr\n ' es_block = ioformat.ptt.end_block(run_str, 'els', footer='els') trans_block = ioformat.ptt.end_block(run_str, 'trans', footer='trans') therm_block = ioformat.ptt.end_block(run_str, 'thermo', footer='thermo') ktp_block = ioformat.ptt.end_block(run_str, 'ktp', footer='ktp') proc_block = ioformat.ptt.end_block(run_str, 'proc', footer='proc') es_tsks = _tsk_lst(es_block, 3) therm_tsks = _tsk_lst(therm_block, 2) ktp_tsks = _tsk_lst(ktp_block, 2) trans_tsks = _tsk_lst(trans_block, 3) proc_tsks = _tsk_lst(proc_block, 3) es_tsks = _tsk_defaults(es_tsks) therm_tsks = _tsk_defaults(therm_tsks) ktp_tsks = _tsk_defaults(ktp_tsks) trans_tsks = _tsk_defaults(trans_tsks) proc_tsks = _tsk_defaults(proc_tsks) _check_tsks(es_tsks, thy_dct) _check_tsks(therm_tsks, thy_dct) _check_tsks(ktp_tsks, thy_dct) _check_tsks(trans_tsks, thy_dct) _check_tsks(proc_tsks, thy_dct) tsk_dct = {'es': es_tsks, 'thermo': therm_tsks, 'ktp': ktp_tsks, 'trans': trans_tsks, 'proc': proc_tsks} return tsk_dct
-5,821,437,077,531,401,000
runstr
mechlib/amech_io/parser/run.py
tasks
Auto-Mech/moldriver
python
def tasks(run_str, thy_dct): ' \n ' es_block = ioformat.ptt.end_block(run_str, 'els', footer='els') trans_block = ioformat.ptt.end_block(run_str, 'trans', footer='trans') therm_block = ioformat.ptt.end_block(run_str, 'thermo', footer='thermo') ktp_block = ioformat.ptt.end_block(run_str, 'ktp', footer='ktp') proc_block = ioformat.ptt.end_block(run_str, 'proc', footer='proc') es_tsks = _tsk_lst(es_block, 3) therm_tsks = _tsk_lst(therm_block, 2) ktp_tsks = _tsk_lst(ktp_block, 2) trans_tsks = _tsk_lst(trans_block, 3) proc_tsks = _tsk_lst(proc_block, 3) es_tsks = _tsk_defaults(es_tsks) therm_tsks = _tsk_defaults(therm_tsks) ktp_tsks = _tsk_defaults(ktp_tsks) trans_tsks = _tsk_defaults(trans_tsks) proc_tsks = _tsk_defaults(proc_tsks) _check_tsks(es_tsks, thy_dct) _check_tsks(therm_tsks, thy_dct) _check_tsks(ktp_tsks, thy_dct) _check_tsks(trans_tsks, thy_dct) _check_tsks(proc_tsks, thy_dct) tsk_dct = {'es': es_tsks, 'thermo': therm_tsks, 'ktp': ktp_tsks, 'trans': trans_tsks, 'proc': proc_tsks} return tsk_dct
def _tsk_lst(tsk_str, num): ' Set the sequence of electronic structure tasks for a given\n species or PESs\n ' if (tsk_str is not None): tsks = [] tsk_str = ioformat.remove_whitespace_from_string(tsk_str) for line in tsk_str.splitlines(): _tsk = _split_line(line, num) tsks.append(_tsk) mod_tsks = tsks else: mod_tsks = None return mod_tsks
8,422,428,378,274,636,000
Set the sequence of electronic structure tasks for a given species or PESs
mechlib/amech_io/parser/run.py
_tsk_lst
Auto-Mech/moldriver
python
def _tsk_lst(tsk_str, num): ' Set the sequence of electronic structure tasks for a given\n species or PESs\n ' if (tsk_str is not None): tsks = [] tsk_str = ioformat.remove_whitespace_from_string(tsk_str) for line in tsk_str.splitlines(): _tsk = _split_line(line, num) tsks.append(_tsk) mod_tsks = tsks else: mod_tsks = None return mod_tsks
def _expand_tsks(tsks_lst): ' Loops over the driver task list and checks if each task is a\n macro-task that should be expanded into sub-tasks.\n\n Right now, it splits all obj tasks into spc and ts\n\n :param tsk_lst: list of tasks to run for some driver\n :type tsk_lst: tuple(tuple(str/dict))\n :rtype: tuple(str/dict)\n ' mod_tsks_lst = [] for tsk_lst in tsks_lst: [obj, tsk, dct] = tsk_lst objs = (['spc', 'ts'] if (obj == 'all') else [obj]) for obj in objs: mod_tsks_lst.append([obj, tsk, dct]) return mod_tsks_lst
8,819,405,878,303,621,000
Loops over the driver task list and checks if each task is a macro-task that should be expanded into sub-tasks. Right now, it splits all obj tasks into spc and ts :param tsk_lst: list of tasks to run for some driver :type tsk_lst: tuple(tuple(str/dict)) :rtype: tuple(str/dict)
mechlib/amech_io/parser/run.py
_expand_tsks
Auto-Mech/moldriver
python
def _expand_tsks(tsks_lst): ' Loops over the driver task list and checks if each task is a\n macro-task that should be expanded into sub-tasks.\n\n Right now, it splits all obj tasks into spc and ts\n\n :param tsk_lst: list of tasks to run for some driver\n :type tsk_lst: tuple(tuple(str/dict))\n :rtype: tuple(str/dict)\n ' mod_tsks_lst = [] for tsk_lst in tsks_lst: [obj, tsk, dct] = tsk_lst objs = (['spc', 'ts'] if (obj == 'all') else [obj]) for obj in objs: mod_tsks_lst.append([obj, tsk, dct]) return mod_tsks_lst
def _tsk_defaults(tsk_lst): ' Fill out the keyword dictionaries for various task lists with\n default values\n ' if (tsk_lst is not None): mod_tsk_lst = [] for _tsk_lst in tsk_lst: keyword_dct = _tsk_lst[(- 1)] tsk = _tsk_lst[:(- 1)][(- 1)] default_dct = defaults_from_key_val_dcts(tsk, TSK_KEY_DCT, TSK_VAL_DCT) new_key_dct = automol.util.dict_.right_update(default_dct, keyword_dct) mod_lst = (_tsk_lst[:(- 1)] + [new_key_dct]) mod_tsk_lst.append(mod_lst) else: mod_tsk_lst = None return mod_tsk_lst
-5,588,439,981,594,771,000
Fill out the keyword dictionaries for various task lists with default values
mechlib/amech_io/parser/run.py
_tsk_defaults
Auto-Mech/moldriver
python
def _tsk_defaults(tsk_lst): ' Fill out the keyword dictionaries for various task lists with\n default values\n ' if (tsk_lst is not None): mod_tsk_lst = [] for _tsk_lst in tsk_lst: keyword_dct = _tsk_lst[(- 1)] tsk = _tsk_lst[:(- 1)][(- 1)] default_dct = defaults_from_key_val_dcts(tsk, TSK_KEY_DCT, TSK_VAL_DCT) new_key_dct = automol.util.dict_.right_update(default_dct, keyword_dct) mod_lst = (_tsk_lst[:(- 1)] + [new_key_dct]) mod_tsk_lst.append(mod_lst) else: mod_tsk_lst = None return mod_tsk_lst
def _check_tsks(tsk_lsts, thy_dct): ' Loop over all of the tasks, add default keywords and parameters\n and assesses if all the input is valid\n ' if (tsk_lsts is not None): for tsk_lst in tsk_lsts: _tsk = tsk_lst[:(- 1)] if (len(_tsk) == 2): (obj, tsk) = (_tsk[0], _tsk[1]) else: (obj, tsk) = (None, _tsk[0]) key_dct = tsk_lst[(- 1)] if (obj is not None): obj_lst = (SUPP_OBJS if (obj == 'all') else (obj,)) for _obj in obj_lst: if (_obj not in TSK_KEY_DCT[tsk][0]): error_message(f'obj {obj}, not allowed for {tsk}') sys.exit() check_dct1(key_dct, TSK_VAL_DCT, (), 'Task') check_thy_lvls(key_dct, thy_dct)
-1,710,753,616,298,786,600
Loop over all of the tasks, add default keywords and parameters and assesses if all the input is valid
mechlib/amech_io/parser/run.py
_check_tsks
Auto-Mech/moldriver
python
def _check_tsks(tsk_lsts, thy_dct): ' Loop over all of the tasks, add default keywords and parameters\n and assesses if all the input is valid\n ' if (tsk_lsts is not None): for tsk_lst in tsk_lsts: _tsk = tsk_lst[:(- 1)] if (len(_tsk) == 2): (obj, tsk) = (_tsk[0], _tsk[1]) else: (obj, tsk) = (None, _tsk[0]) key_dct = tsk_lst[(- 1)] if (obj is not None): obj_lst = (SUPP_OBJS if (obj == 'all') else (obj,)) for _obj in obj_lst: if (_obj not in TSK_KEY_DCT[tsk][0]): error_message(f'obj {obj}, not allowed for {tsk}') sys.exit() check_dct1(key_dct, TSK_VAL_DCT, (), 'Task') check_thy_lvls(key_dct, thy_dct)
def _split_line(line, num): ' Split a line\n ' line = line.split() if (num == 3): (tsk, key_lst) = (line[:2], line[2:]) elif (num == 2): (tsk, key_lst) = (line[:1], line[1:]) key_dct = ioformat.ptt.keyword_dct_from_block('\n'.join(key_lst)) return (tsk + [key_dct])
-5,998,307,887,310,728,000
Split a line
mechlib/amech_io/parser/run.py
_split_line
Auto-Mech/moldriver
python
def _split_line(line, num): ' \n ' line = line.split() if (num == 3): (tsk, key_lst) = (line[:2], line[2:]) elif (num == 2): (tsk, key_lst) = (line[:1], line[1:]) key_dct = ioformat.ptt.keyword_dct_from_block('\n'.join(key_lst)) return (tsk + [key_dct])
def check_inputs(tsk_dct, pes_dct, pes_mod_dct, spc_mod_dct): ' Check if inputs placed that is required\n ' if (tsk_dct['ktp'] or tsk_dct['thermo']): if (pes_mod_dct is None): error_message('kTPDriver or Thermo Requested. \n However no kin model provided in models.dat\n Exiting MechDriver...') sys.exit() if (spc_mod_dct is None): error_message('kTPDriver or Thermo Requested. \n However no spc model provided in models.dat\n Exiting MechDriver...') sys.exit() if tsk_dct['ktp']: if (pes_dct is None): error_message('kTPDriver Requested. \n However no reaction channels provided in mechanism.dat\n Exiting MechDriver...') sys.exit()
6,198,580,794,665,911,000
Check if inputs placed that is required
mechlib/amech_io/parser/run.py
check_inputs
Auto-Mech/moldriver
python
def check_inputs(tsk_dct, pes_dct, pes_mod_dct, spc_mod_dct): ' \n ' if (tsk_dct['ktp'] or tsk_dct['thermo']): if (pes_mod_dct is None): error_message('kTPDriver or Thermo Requested. \n However no kin model provided in models.dat\n Exiting MechDriver...') sys.exit() if (spc_mod_dct is None): error_message('kTPDriver or Thermo Requested. \n However no spc model provided in models.dat\n Exiting MechDriver...') sys.exit() if tsk_dct['ktp']: if (pes_dct is None): error_message('kTPDriver Requested. \n However no reaction channels provided in mechanism.dat\n Exiting MechDriver...') sys.exit()
def ngram_processor(items, ngram_len): '\n Given a sequence or iterable of arbitrary items, return an iterator of\n item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable\n items.\n\n For example::\n\n >>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))\n [(1, 2, 3), (2, 3, 4), (3, 4, 5)]\n ' ngram = deque() current_len = 0 for item in items: if (current_len == ngram_len): (yield tuple(ngram)) ngram.popleft() current_len -= 1 ngram.append(item) current_len += 1 (yield tuple(ngram))
179,317,752,332,103,300
Given a sequence or iterable of arbitrary items, return an iterator of item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable items. For example:: >>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3)) [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
tests/textcode/test_analysis.py
ngram_processor
pombredanne/scancode-toolkit
python
def ngram_processor(items, ngram_len): '\n Given a sequence or iterable of arbitrary items, return an iterator of\n item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable\n items.\n\n For example::\n\n >>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))\n [(1, 2, 3), (2, 3, 4), (3, 4, 5)]\n ' ngram = deque() current_len = 0 for item in items: if (current_len == ngram_len): (yield tuple(ngram)) ngram.popleft() current_len -= 1 ngram.append(item) current_len += 1 (yield tuple(ngram))
def ignore(x): 'Method to indicate bypassing property validation' return x
857,614,166,513,775,100
Method to indicate bypassing property validation
troposphere/validators.py
ignore
akerbergen/troposphere
python
def ignore(x): return x
def defer(x): 'Method to indicate defering property validation' return x
2,087,068,956,255,348,000
Method to indicate defering property validation
troposphere/validators.py
defer
akerbergen/troposphere
python
def defer(x): return x
def __init__(self, assets_dir=None, physics_backend='BulletPhysics', time_step=0.001, gravity=[0, 0, (- 9.8)], worker_id=0, use_visualizer=False): 'Initialize the simulator.\n\n Args:\n assets_dir: The assets directory.\n physics_backend: Name of the physics engine backend.\n time_step: Time step of the simulation.\n gravity: The gravity as a 3-dimensional vector.\n worker_id: The id of the multi-threaded simulation.\n use_visualizer: Render the simulation use the debugging visualizer\n if True.\n ' self._assets_dir = os.path.abspath((assets_dir or './')) self._gravity = gravity physics_class = getattr(physics, physics_backend) self._physics = physics_class(time_step=time_step, use_visualizer=use_visualizer, worker_id=worker_id) self._num_steps = 0
-3,805,035,608,652,995,600
Initialize the simulator. Args: assets_dir: The assets directory. physics_backend: Name of the physics engine backend. time_step: Time step of the simulation. gravity: The gravity as a 3-dimensional vector. worker_id: The id of the multi-threaded simulation. use_visualizer: Render the simulation use the debugging visualizer if True.
robovat/simulation/simulator.py
__init__
StanfordVL/robovat
python
def __init__(self, assets_dir=None, physics_backend='BulletPhysics', time_step=0.001, gravity=[0, 0, (- 9.8)], worker_id=0, use_visualizer=False): 'Initialize the simulator.\n\n Args:\n assets_dir: The assets directory.\n physics_backend: Name of the physics engine backend.\n time_step: Time step of the simulation.\n gravity: The gravity as a 3-dimensional vector.\n worker_id: The id of the multi-threaded simulation.\n use_visualizer: Render the simulation use the debugging visualizer\n if True.\n ' self._assets_dir = os.path.abspath((assets_dir or './')) self._gravity = gravity physics_class = getattr(physics, physics_backend) self._physics = physics_class(time_step=time_step, use_visualizer=use_visualizer, worker_id=worker_id) self._num_steps = 0
def __del__(self): 'Delete the simulator.' del self._physics
-1,470,313,404,570,486,500
Delete the simulator.
robovat/simulation/simulator.py
__del__
StanfordVL/robovat
python
def __del__(self): del self._physics
def reset(self): 'Reset the simulation.' self.physics.reset() self.physics.set_gravity(self._gravity) self._bodies = dict() self._constraints = dict() self._num_steps = 0
8,568,562,312,400,552,000
Reset the simulation.
robovat/simulation/simulator.py
reset
StanfordVL/robovat
python
def reset(self): self.physics.reset() self.physics.set_gravity(self._gravity) self._bodies = dict() self._constraints = dict() self._num_steps = 0
def start(self): 'Start the simulation.' self.physics.start() self._num_steps = 0
-3,988,843,183,428,890,000
Start the simulation.
robovat/simulation/simulator.py
start
StanfordVL/robovat
python
def start(self): self.physics.start() self._num_steps = 0
def step(self): 'Take a simulation step.' for body in self.bodies.values(): body.update() for constraint in self.constraints.values(): constraint.update() self.physics.step() self._num_steps += 1
5,010,890,271,759,245,000
Take a simulation step.
robovat/simulation/simulator.py
step
StanfordVL/robovat
python
def step(self): for body in self.bodies.values(): body.update() for constraint in self.constraints.values(): constraint.update() self.physics.step() self._num_steps += 1
def add_body(self, filename, pose=None, scale=1.0, is_static=False, is_controllable=False, name=None): 'Add a body to the simulation.\n\n Args:\n filename: The path to the URDF file to be loaded. If the path is\n not absolute path, it will be joined with the assets directory.\n pose: The initial pose as an instance of Pose.\n scale: The scaling factor of the body.\n is_static: If True, set the base of the body to be static.\n is_controllable: If True, the body can apply motor controls.\n name: Used as a reference of the body in this Simulator instance.\n\n Returns:\n An instance of Body.\n ' if os.path.isabs(filename): path = filename else: path = os.path.join(self._assets_dir, filename) if (pose is None): pose = [[0, 0, 0], [0, 0, 0]] if is_controllable: body = ControllableBody(simulator=self, filename=path, pose=pose, scale=scale, is_static=is_static, name=name) else: body = Body(simulator=self, filename=path, pose=pose, scale=scale, is_static=is_static, name=name) self._bodies[body.name] = body return body
-787,321,298,949,126,800
Add a body to the simulation. Args: filename: The path to the URDF file to be loaded. If the path is not absolute path, it will be joined with the assets directory. pose: The initial pose as an instance of Pose. scale: The scaling factor of the body. is_static: If True, set the base of the body to be static. is_controllable: If True, the body can apply motor controls. name: Used as a reference of the body in this Simulator instance. Returns: An instance of Body.
robovat/simulation/simulator.py
add_body
StanfordVL/robovat
python
def add_body(self, filename, pose=None, scale=1.0, is_static=False, is_controllable=False, name=None): 'Add a body to the simulation.\n\n Args:\n filename: The path to the URDF file to be loaded. If the path is\n not absolute path, it will be joined with the assets directory.\n pose: The initial pose as an instance of Pose.\n scale: The scaling factor of the body.\n is_static: If True, set the base of the body to be static.\n is_controllable: If True, the body can apply motor controls.\n name: Used as a reference of the body in this Simulator instance.\n\n Returns:\n An instance of Body.\n ' if os.path.isabs(filename): path = filename else: path = os.path.join(self._assets_dir, filename) if (pose is None): pose = [[0, 0, 0], [0, 0, 0]] if is_controllable: body = ControllableBody(simulator=self, filename=path, pose=pose, scale=scale, is_static=is_static, name=name) else: body = Body(simulator=self, filename=path, pose=pose, scale=scale, is_static=is_static, name=name) self._bodies[body.name] = body return body
def remove_body(self, name): 'Remove the body.\n\n Args:\n body: An instance of Body.\n ' self.physics.remove_body(self._bodies[name].uid) del self._bodies[name]
-7,921,277,802,025,198,000
Remove the body. Args: body: An instance of Body.
robovat/simulation/simulator.py
remove_body
StanfordVL/robovat
python
def remove_body(self, name): 'Remove the body.\n\n Args:\n body: An instance of Body.\n ' self.physics.remove_body(self._bodies[name].uid) del self._bodies[name]
def add_constraint(self, parent, child, joint_type='fixed', joint_axis=[0, 0, 0], parent_frame_pose=None, child_frame_pose=None, max_force=None, max_linear_velocity=None, max_angular_velocity=None, is_controllable=False, name=None): 'Add a constraint to the simulation.\n\n Args:\n parent: The parent entity as an instance of Entity.\n child: The child entity as an instance of Entity.\n joint_type: The type of the joint.\n joint_axis: The axis of the joint.\n parent_frame_pose: The pose of the joint in the parent frame.\n child_frame_pose: The pose of the joint in the child frame.\n max_force: Max force the constraint can apply.\n max_linear_velocity: Maximum linear velocity.\n max_angular_velocity: Max angular velocity.\n is_controllable: If True, the constraint can apply motor controls.\n\n Returns:\n An instance of Constraint.\n ' if is_controllable: constraint = ControllableConstraint(parent, child, joint_type, joint_axis, parent_frame_pose, child_frame_pose, max_force=max_force, max_linear_velocity=max_linear_velocity, max_angular_velocity=max_angular_velocity, name=name) else: assert (max_linear_velocity is None) assert (max_angular_velocity is None) constraint = Constraint(parent, child, joint_type, joint_axis, parent_frame_pose, child_frame_pose, max_force=max_force, name=name) self._constraints[constraint.name] = constraint return constraint
-7,531,566,976,571,550,000
Add a constraint to the simulation. Args: parent: The parent entity as an instance of Entity. child: The child entity as an instance of Entity. joint_type: The type of the joint. joint_axis: The axis of the joint. parent_frame_pose: The pose of the joint in the parent frame. child_frame_pose: The pose of the joint in the child frame. max_force: Max force the constraint can apply. max_linear_velocity: Maximum linear velocity. max_angular_velocity: Max angular velocity. is_controllable: If True, the constraint can apply motor controls. Returns: An instance of Constraint.
robovat/simulation/simulator.py
add_constraint
StanfordVL/robovat
python
def add_constraint(self, parent, child, joint_type='fixed', joint_axis=[0, 0, 0], parent_frame_pose=None, child_frame_pose=None, max_force=None, max_linear_velocity=None, max_angular_velocity=None, is_controllable=False, name=None): 'Add a constraint to the simulation.\n\n Args:\n parent: The parent entity as an instance of Entity.\n child: The child entity as an instance of Entity.\n joint_type: The type of the joint.\n joint_axis: The axis of the joint.\n parent_frame_pose: The pose of the joint in the parent frame.\n child_frame_pose: The pose of the joint in the child frame.\n max_force: Max force the constraint can apply.\n max_linear_velocity: Maximum linear velocity.\n max_angular_velocity: Max angular velocity.\n is_controllable: If True, the constraint can apply motor controls.\n\n Returns:\n An instance of Constraint.\n ' if is_controllable: constraint = ControllableConstraint(parent, child, joint_type, joint_axis, parent_frame_pose, child_frame_pose, max_force=max_force, max_linear_velocity=max_linear_velocity, max_angular_velocity=max_angular_velocity, name=name) else: assert (max_linear_velocity is None) assert (max_angular_velocity is None) constraint = Constraint(parent, child, joint_type, joint_axis, parent_frame_pose, child_frame_pose, max_force=max_force, name=name) self._constraints[constraint.name] = constraint return constraint
def receive_robot_commands(self, robot_command, component_type='body'): "Receive a robot command.\n\n Args:\n robot_command: An instance of RobotCommand.\n component_type: Either 'body' or 'constraint'.\n " if (component_type == 'body'): component = self._bodies[robot_command.component] elif (component_type == 'constraint'): component = self._constraints[robot_command.component] else: raise ValueError(('Unrecognized component type: %r' % component_type)) command_method = getattr(component, robot_command.command_type) command_method(**robot_command.arguments)
-704,132,049,575,272,300
Receive a robot command. Args: robot_command: An instance of RobotCommand. component_type: Either 'body' or 'constraint'.
robovat/simulation/simulator.py
receive_robot_commands
StanfordVL/robovat
python
def receive_robot_commands(self, robot_command, component_type='body'): "Receive a robot command.\n\n Args:\n robot_command: An instance of RobotCommand.\n component_type: Either 'body' or 'constraint'.\n " if (component_type == 'body'): component = self._bodies[robot_command.component] elif (component_type == 'constraint'): component = self._constraints[robot_command.component] else: raise ValueError(('Unrecognized component type: %r' % component_type)) command_method = getattr(component, robot_command.command_type) command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None): 'Check if the loaded object is stable.\n\n Args:\n entity_a: The first entity.\n entity_b: The second entity, None for any entities.\n\n Returns:\n True if they have contacts, False otherwise.\n ' def _check_contact(entity_a, entity_b=None): a_uid = entity_a.uid if (entity_b is None): b_uid = None else: b_uid = entity_b.uid contact_points = self._physics.get_contact_points(a_uid, b_uid) has_contact = (len(contact_points) > 0) return has_contact if (not isinstance(entity_a, (list, tuple))): entities_a = [entity_a] else: entities_a = entity_a if (not isinstance(entity_b, (list, tuple))): entities_b = [entity_b] else: entities_b = entity_b has_contact = False for a in entities_a: for b in entities_b: if _check_contact(a, b): has_contact = True break return has_contact
-2,138,105,238,576,339,000
Check if the loaded object is stable. Args: entity_a: The first entity. entity_b: The second entity, None for any entities. Returns: True if they have contacts, False otherwise.
robovat/simulation/simulator.py
check_contact
StanfordVL/robovat
python
def check_contact(self, entity_a, entity_b=None): 'Check if the loaded object is stable.\n\n Args:\n entity_a: The first entity.\n entity_b: The second entity, None for any entities.\n\n Returns:\n True if they have contacts, False otherwise.\n ' def _check_contact(entity_a, entity_b=None): a_uid = entity_a.uid if (entity_b is None): b_uid = None else: b_uid = entity_b.uid contact_points = self._physics.get_contact_points(a_uid, b_uid) has_contact = (len(contact_points) > 0) return has_contact if (not isinstance(entity_a, (list, tuple))): entities_a = [entity_a] else: entities_a = entity_a if (not isinstance(entity_b, (list, tuple))): entities_b = [entity_b] else: entities_b = entity_b has_contact = False for a in entities_a: for b in entities_b: if _check_contact(a, b): has_contact = True break return has_contact
def check_stable(self, body, linear_velocity_threshold, angular_velocity_threshold): 'Check if the loaded object is stable.\n\n Args:\n body: An instance of body or a list of bodies.\n linear_velocity_threshold: Linear velocity threshold of being\n stable.\n angular_velocity_threshold: Angular velocity threshold of being\n stable.\n\n Returns:\n is_stable: True if the linear velocity and the angular velocity are\n almost zero; False otherwise.\n ' linear_velocity = np.linalg.norm(body.linear_velocity) angular_velocity = np.linalg.norm(body.angular_velocity) if (linear_velocity_threshold is None): has_linear_velocity = False else: has_linear_velocity = (linear_velocity >= linear_velocity_threshold) if (angular_velocity_threshold is None): has_angular_velocity = False else: has_angular_velocity = (angular_velocity >= angular_velocity_threshold) is_stable = ((not has_linear_velocity) and (not has_angular_velocity)) return is_stable
7,935,277,843,279,572,000
Check if the loaded object is stable. Args: body: An instance of body or a list of bodies. linear_velocity_threshold: Linear velocity threshold of being stable. angular_velocity_threshold: Angular velocity threshold of being stable. Returns: is_stable: True if the linear velocity and the angular velocity are almost zero; False otherwise.
robovat/simulation/simulator.py
check_stable
StanfordVL/robovat
python
def check_stable(self, body, linear_velocity_threshold, angular_velocity_threshold): 'Check if the loaded object is stable.\n\n Args:\n body: An instance of body or a list of bodies.\n linear_velocity_threshold: Linear velocity threshold of being\n stable.\n angular_velocity_threshold: Angular velocity threshold of being\n stable.\n\n Returns:\n is_stable: True if the linear velocity and the angular velocity are\n almost zero; False otherwise.\n ' linear_velocity = np.linalg.norm(body.linear_velocity) angular_velocity = np.linalg.norm(body.angular_velocity) if (linear_velocity_threshold is None): has_linear_velocity = False else: has_linear_velocity = (linear_velocity >= linear_velocity_threshold) if (angular_velocity_threshold is None): has_angular_velocity = False else: has_angular_velocity = (angular_velocity >= angular_velocity_threshold) is_stable = ((not has_linear_velocity) and (not has_angular_velocity)) return is_stable
def wait_until_stable(self, body, linear_velocity_threshold=0.005, angular_velocity_threshold=0.005, check_after_steps=100, min_stable_steps=100, max_steps=2000): 'Wait until the objects are stable.\n\n Args:\n body: An instance of body or a list of bodies.\n linear_velocity_threshold: Linear velocity threshold of being\n stable.\n angular_velocity_threshold: Angular velocity threshold of being\n stable.\n check_after_steps: Number of steps before checking.\n min_stable_steps: Minimum number of steps required to be stable.\n max_steps: Maximum steps to wait for objects being stable.\n ' if isinstance(body, (list, tuple)): body_list = body else: body_list = [body] num_steps = 0 num_stable_steps = 0 while 1: self.step() num_steps += 1 if (num_steps < check_after_steps): continue all_stable = True for b in body_list: is_stable = self.check_stable(b, linear_velocity_threshold, angular_velocity_threshold) if (not is_stable): all_stable = False break if all_stable: num_stable_steps += 1 if ((num_stable_steps >= min_stable_steps) or (num_steps >= max_steps)): break
731,992,707,841,130,100
Wait until the objects are stable. Args: body: An instance of body or a list of bodies. linear_velocity_threshold: Linear velocity threshold of being stable. angular_velocity_threshold: Angular velocity threshold of being stable. check_after_steps: Number of steps before checking. min_stable_steps: Minimum number of steps required to be stable. max_steps: Maximum steps to wait for objects being stable.
robovat/simulation/simulator.py
wait_until_stable
StanfordVL/robovat
python
def wait_until_stable(self, body, linear_velocity_threshold=0.005, angular_velocity_threshold=0.005, check_after_steps=100, min_stable_steps=100, max_steps=2000): 'Wait until the objects are stable.\n\n Args:\n body: An instance of body or a list of bodies.\n linear_velocity_threshold: Linear velocity threshold of being\n stable.\n angular_velocity_threshold: Angular velocity threshold of being\n stable.\n check_after_steps: Number of steps before checking.\n min_stable_steps: Minimum number of steps required to be stable.\n max_steps: Maximum steps to wait for objects being stable.\n ' if isinstance(body, (list, tuple)): body_list = body else: body_list = [body] num_steps = 0 num_stable_steps = 0 while 1: self.step() num_steps += 1 if (num_steps < check_after_steps): continue all_stable = True for b in body_list: is_stable = self.check_stable(b, linear_velocity_threshold, angular_velocity_threshold) if (not is_stable): all_stable = False break if all_stable: num_stable_steps += 1 if ((num_stable_steps >= min_stable_steps) or (num_steps >= max_steps)): break
def plot_pose(self, pose, axis_length=1.0, text=None, text_size=1.0, text_color=[0, 0, 0]): 'Plot a 6-DoF pose or a frame in the debugging visualizer.\n\n Args:\n pose: The pose to be plot.\n axis_length: The length of the axes.\n text: Text showing up next to the frame.\n text_size: Size of the text.\n text_color: Color of the text.\n ' if (not isinstance(pose, Pose)): pose = Pose(pose) origin = pose.position x_end = (origin + np.dot([axis_length, 0, 0], pose.matrix3.T)) y_end = (origin + np.dot([0, axis_length, 0], pose.matrix3.T)) z_end = (origin + np.dot([0, 0, axis_length], pose.matrix3.T)) pybullet.addUserDebugLine(origin, x_end, lineColorRGB=[1, 0, 0], lineWidth=2) pybullet.addUserDebugLine(origin, y_end, lineColorRGB=[0, 1, 0], lineWidth=2) pybullet.addUserDebugLine(origin, z_end, lineColorRGB=[0, 0, 1], lineWidth=2) if (text is not None): pybullet.addUserDebugText(text, origin, text_color, text_size)
-8,379,458,917,217,585,000
Plot a 6-DoF pose or a frame in the debugging visualizer. Args: pose: The pose to be plot. axis_length: The length of the axes. text: Text showing up next to the frame. text_size: Size of the text. text_color: Color of the text.
robovat/simulation/simulator.py
plot_pose
StanfordVL/robovat
python
def plot_pose(self, pose, axis_length=1.0, text=None, text_size=1.0, text_color=[0, 0, 0]): 'Plot a 6-DoF pose or a frame in the debugging visualizer.\n\n Args:\n pose: The pose to be plot.\n axis_length: The length of the axes.\n text: Text showing up next to the frame.\n text_size: Size of the text.\n text_color: Color of the text.\n ' if (not isinstance(pose, Pose)): pose = Pose(pose) origin = pose.position x_end = (origin + np.dot([axis_length, 0, 0], pose.matrix3.T)) y_end = (origin + np.dot([0, axis_length, 0], pose.matrix3.T)) z_end = (origin + np.dot([0, 0, axis_length], pose.matrix3.T)) pybullet.addUserDebugLine(origin, x_end, lineColorRGB=[1, 0, 0], lineWidth=2) pybullet.addUserDebugLine(origin, y_end, lineColorRGB=[0, 1, 0], lineWidth=2) pybullet.addUserDebugLine(origin, z_end, lineColorRGB=[0, 0, 1], lineWidth=2) if (text is not None): pybullet.addUserDebugText(text, origin, text_color, text_size)
def plot_line(self, start, end, line_color=[0, 0, 0], line_width=1): 'Plot a pose or a frame in the debugging visualizer.\n\n Args:\n start: Starting point of the line.\n end: Ending point of the line.\n line_color: Color of the line.\n line_width: Width of the line.\n ' pybullet.addUserDebugLine(start, end, lineColorRGB=line_color, lineWidth=line_width)
8,925,551,087,014,710,000
Plot a pose or a frame in the debugging visualizer. Args: start: Starting point of the line. end: Ending point of the line. line_color: Color of the line. line_width: Width of the line.
robovat/simulation/simulator.py
plot_line
StanfordVL/robovat
python
def plot_line(self, start, end, line_color=[0, 0, 0], line_width=1): 'Plot a pose or a frame in the debugging visualizer.\n\n Args:\n start: Starting point of the line.\n end: Ending point of the line.\n line_color: Color of the line.\n line_width: Width of the line.\n ' pybullet.addUserDebugLine(start, end, lineColorRGB=line_color, lineWidth=line_width)
def clear_visualization(self): 'Clear all visualization items.' pybullet.removeAllUserDebugItems()
5,383,127,262,027,536,000
Clear all visualization items.
robovat/simulation/simulator.py
clear_visualization
StanfordVL/robovat
python
def clear_visualization(self): pybullet.removeAllUserDebugItems()
def __init__(self): 'Default constructor' self.rgb_image_topic = rospy.get_param('~rgb_image_topic', '/camera/rgb/image_raw') self.camera_publisher = rospy.Publisher(self.rgb_image_topic, sensor_msgs.msg.Image, queue_size=1) self.camera_pub_frequency = rospy.get_param('~camera_pub_frequency', 20) self.bridge = CvBridge() self.camera_info_topic = rospy.get_param('~camera_info_topic', '/camera/rgb/camera_info') self.camera_info = sensor_msgs.msg.CameraInfo() self.camera_info_publisher = rospy.Publisher(self.camera_info_topic, sensor_msgs.msg.CameraInfo, queue_size=1) self.camera_frame_id = rospy.get_param('~camera_frame_id', 'camera_link') self.camera_info.header.frame_id = self.camera_frame_id self.capture = cv2.VideoCapture(0) (ok, frame) = self.capture.read() (width, height, _) = frame.shape focal_length = height center = ((height / 2), (width / 2)) camera_matrix = np.array([[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype='double') P_matrix = np.array([[focal_length, 0, center[0], 0], [0, focal_length, center[1], 0], [0, 0, 1, 0]], dtype='double') dist_coeffs = np.zeros((4, 1)) self.camera_info.D = list(dist_coeffs) self.camera_info.K = list(camera_matrix.flatten()) self.camera_info.P = list(P_matrix.flatten()) self.timer = rospy.Timer(rospy.Duration((1.0 / self.camera_pub_frequency)), self.timer_callback) rospy.loginfo('Camera publisher ready !') while (not rospy.is_shutdown()): rospy.spin() self.capture.release()
7,922,984,320,217,438,000
Default constructor
scripts/camera_publisher_node.py
__init__
Alexandre-Bonneau/uwds3_perception
python
def __init__(self): self.rgb_image_topic = rospy.get_param('~rgb_image_topic', '/camera/rgb/image_raw') self.camera_publisher = rospy.Publisher(self.rgb_image_topic, sensor_msgs.msg.Image, queue_size=1) self.camera_pub_frequency = rospy.get_param('~camera_pub_frequency', 20) self.bridge = CvBridge() self.camera_info_topic = rospy.get_param('~camera_info_topic', '/camera/rgb/camera_info') self.camera_info = sensor_msgs.msg.CameraInfo() self.camera_info_publisher = rospy.Publisher(self.camera_info_topic, sensor_msgs.msg.CameraInfo, queue_size=1) self.camera_frame_id = rospy.get_param('~camera_frame_id', 'camera_link') self.camera_info.header.frame_id = self.camera_frame_id self.capture = cv2.VideoCapture(0) (ok, frame) = self.capture.read() (width, height, _) = frame.shape focal_length = height center = ((height / 2), (width / 2)) camera_matrix = np.array([[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]], dtype='double') P_matrix = np.array([[focal_length, 0, center[0], 0], [0, focal_length, center[1], 0], [0, 0, 1, 0]], dtype='double') dist_coeffs = np.zeros((4, 1)) self.camera_info.D = list(dist_coeffs) self.camera_info.K = list(camera_matrix.flatten()) self.camera_info.P = list(P_matrix.flatten()) self.timer = rospy.Timer(rospy.Duration((1.0 / self.camera_pub_frequency)), self.timer_callback) rospy.loginfo('Camera publisher ready !') while (not rospy.is_shutdown()): rospy.spin() self.capture.release()
def test_get_time_range() -> None: '\n Test finding the time range of a query.\n ' body = {'selected_columns': ['event_id'], 'conditions': [('timestamp', '>=', '2019-09-18T10:00:00'), ('timestamp', '>=', '2000-09-18T10:00:00'), ('timestamp', '<', '2019-09-19T12:00:00'), [('timestamp', '<', '2019-09-18T12:00:00'), ('project_id', 'IN', [1])], ('project_id', 'IN', [1])]} events = get_dataset('events') query = parse_query(body, events) processors = events.get_default_entity().get_query_processors() for processor in processors: if isinstance(processor, TimeSeriesProcessor): processor.process_query(query, HTTPRequestSettings()) (from_date_ast, to_date_ast) = get_time_range(identity_translate(query), 'timestamp') assert ((from_date_ast is not None) and isinstance(from_date_ast, datetime) and (from_date_ast.isoformat() == '2019-09-18T10:00:00')) assert ((to_date_ast is not None) and isinstance(to_date_ast, datetime) and (to_date_ast.isoformat() == '2019-09-19T12:00:00'))
-293,156,988,971,422,200
Test finding the time range of a query.
tests/clickhouse/query_dsl/test_time_range.py
test_get_time_range
fpacifici/snuba
python
def test_get_time_range() -> None: '\n \n ' body = {'selected_columns': ['event_id'], 'conditions': [('timestamp', '>=', '2019-09-18T10:00:00'), ('timestamp', '>=', '2000-09-18T10:00:00'), ('timestamp', '<', '2019-09-19T12:00:00'), [('timestamp', '<', '2019-09-18T12:00:00'), ('project_id', 'IN', [1])], ('project_id', 'IN', [1])]} events = get_dataset('events') query = parse_query(body, events) processors = events.get_default_entity().get_query_processors() for processor in processors: if isinstance(processor, TimeSeriesProcessor): processor.process_query(query, HTTPRequestSettings()) (from_date_ast, to_date_ast) = get_time_range(identity_translate(query), 'timestamp') assert ((from_date_ast is not None) and isinstance(from_date_ast, datetime) and (from_date_ast.isoformat() == '2019-09-18T10:00:00')) assert ((to_date_ast is not None) and isinstance(to_date_ast, datetime) and (to_date_ast.isoformat() == '2019-09-19T12:00:00'))