body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@read_session
def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None):
'\n List the bad file replicas history. Method only used by necromancer\n\n :param limit: The maximum number of replicas returned.\n :param thread: The assigned thread for this necromancer.\n :param total_threads: The total number of threads of all necromancers.\n :param session: The database session in use.\n '
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).filter((models.BadReplicas.state == BadFilesStatus.BAD))
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name')
query = query.limit(limit)
bad_replicas = {}
for (scope, name, rse_id) in query.yield_per(1000):
if (rse_id not in bad_replicas):
bad_replicas[rse_id] = []
bad_replicas[rse_id].append({'scope': scope, 'name': name})
return bad_replicas | 270,415,974,295,745,380 | List the bad file replicas history. Method only used by necromancer
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use. | lib/rucio/core/replica.py | list_bad_replicas_history | bari12/rucio | python | @read_session
def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None):
'\n List the bad file replicas history. Method only used by necromancer\n\n :param limit: The maximum number of replicas returned.\n :param thread: The assigned thread for this necromancer.\n :param total_threads: The total number of threads of all necromancers.\n :param session: The database session in use.\n '
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).filter((models.BadReplicas.state == BadFilesStatus.BAD))
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name')
query = query.limit(limit)
bad_replicas = {}
for (scope, name, rse_id) in query.yield_per(1000):
if (rse_id not in bad_replicas):
bad_replicas[rse_id] = []
bad_replicas[rse_id].append({'scope': scope, 'name': name})
return bad_replicas |
@transactional_session
def update_bad_replicas_history(dids, rse_id, session=None):
'\n Update the bad file replicas history. Method only used by necromancer\n\n :param dids: The list of DIDs.\n :param rse_id: The rse_id.\n :param session: The database session in use.\n '
for did in dids:
try:
result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one()
state = result.state
if (state == ReplicaState.AVAILABLE):
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False)
elif (state != ReplicaState.BAD):
try:
session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one()
except NoResultFound:
update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session)
session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False)
else:
pass
except NoResultFound:
try:
result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one()
state = result.availability
final_state = None
if (state == DIDAvailability.LOST):
final_state = BadFilesStatus.LOST
elif (state == DIDAvailability.DELETED):
final_state = BadFilesStatus.DELETED
elif (state == DIDAvailability.AVAILABLE):
final_state = BadFilesStatus.DELETED
else:
print('Houston we have a problem.')
final_state = BadFilesStatus.DELETED
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False)
except NoResultFound:
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) | -5,265,296,215,497,797,000 | Update the bad file replicas history. Method only used by necromancer
:param dids: The list of DIDs.
:param rse_id: The rse_id.
:param session: The database session in use. | lib/rucio/core/replica.py | update_bad_replicas_history | bari12/rucio | python | @transactional_session
def update_bad_replicas_history(dids, rse_id, session=None):
'\n Update the bad file replicas history. Method only used by necromancer\n\n :param dids: The list of DIDs.\n :param rse_id: The rse_id.\n :param session: The database session in use.\n '
for did in dids:
try:
result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one()
state = result.state
if (state == ReplicaState.AVAILABLE):
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False)
elif (state != ReplicaState.BAD):
try:
session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one()
except NoResultFound:
update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session)
session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False)
else:
pass
except NoResultFound:
try:
result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one()
state = result.availability
final_state = None
if (state == DIDAvailability.LOST):
final_state = BadFilesStatus.LOST
elif (state == DIDAvailability.DELETED):
final_state = BadFilesStatus.DELETED
elif (state == DIDAvailability.AVAILABLE):
final_state = BadFilesStatus.DELETED
else:
print('Houston we have a problem.')
final_state = BadFilesStatus.DELETED
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False)
except NoResultFound:
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) |
@transactional_session
def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None):
'\n Declare a list of bad replicas.\n\n :param pfns: The list of PFNs.\n :param rse_id: The RSE id.\n :param reason: The reason of the loss.\n :param issuer: The issuer account.\n :param status: Either BAD or SUSPICIOUS.\n :param scheme: The scheme of the PFNs.\n :param session: The database session in use.\n '
unknown_replicas = []
declared_replicas = []
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
replicas = []
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = parsed_pfn[pfn]['path']
if (path.startswith('/user') or path.startswith('/group')):
scope = ('%s.%s' % (path.split('/')[1], path.split('/')[2]))
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo=issuer.vo)
(__exists, scope, name, already_declared, size) = __exists_replicas(rse_id, scope, name, path=None, session=session)
if (__exists and (((status == BadFilesStatus.BAD) and (not already_declared)) or (status == BadFilesStatus.SUSPICIOUS))):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
elif already_declared:
unknown_replicas.append(('%s %s' % (pfn, 'Already declared')))
else:
no_hidden_char = True
for char in str(pfn):
if (not isprint(char)):
unknown_replicas.append(('%s %s' % (pfn, 'PFN contains hidden chars')))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append(('%s %s' % (pfn, 'Unknown replica')))
if (status == BadFilesStatus.BAD):
try:
update_replicas_states(replicas, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
else:
path_clause = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = ('%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']))
(__exists, scope, name, already_declared, size) = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session)
if (__exists and (((status == BadFilesStatus.BAD) and (not already_declared)) or (status == BadFilesStatus.SUSPICIOUS))):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
path_clause.append((models.RSEFileAssociation.path == path))
if path.startswith('/'):
path_clause.append((models.RSEFileAssociation.path == path[1:]))
else:
path_clause.append((models.RSEFileAssociation.path == ('/%s' % path)))
elif already_declared:
unknown_replicas.append(('%s %s' % (pfn, 'Already declared')))
else:
no_hidden_char = True
for char in str(pfn):
if (not isprint(char)):
unknown_replicas.append(('%s %s' % (pfn, 'PFN contains hidden chars')))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append(('%s %s' % (pfn, 'Unknown replica')))
if ((status == BadFilesStatus.BAD) and (declared_replicas != [])):
query = session.query(models.RSEFileAssociation).with_hint(models.RSEFileAssociation, '+ index(replicas REPLICAS_PATH_IDX', 'oracle').filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*path_clause))
rowcount = query.update({'state': ReplicaState.BAD})
if (rowcount != len(declared_replicas)):
print(rowcount, len(declared_replicas), declared_replicas)
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
raise exception.RucioException(error.args)
return unknown_replicas | -9,121,332,924,852,603,000 | Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: Either BAD or SUSPICIOUS.
:param scheme: The scheme of the PFNs.
:param session: The database session in use. | lib/rucio/core/replica.py | __declare_bad_file_replicas | bari12/rucio | python | @transactional_session
def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None):
'\n Declare a list of bad replicas.\n\n :param pfns: The list of PFNs.\n :param rse_id: The RSE id.\n :param reason: The reason of the loss.\n :param issuer: The issuer account.\n :param status: Either BAD or SUSPICIOUS.\n :param scheme: The scheme of the PFNs.\n :param session: The database session in use.\n '
unknown_replicas = []
declared_replicas = []
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
replicas = []
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = parsed_pfn[pfn]['path']
if (path.startswith('/user') or path.startswith('/group')):
scope = ('%s.%s' % (path.split('/')[1], path.split('/')[2]))
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo=issuer.vo)
(__exists, scope, name, already_declared, size) = __exists_replicas(rse_id, scope, name, path=None, session=session)
if (__exists and (((status == BadFilesStatus.BAD) and (not already_declared)) or (status == BadFilesStatus.SUSPICIOUS))):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
elif already_declared:
unknown_replicas.append(('%s %s' % (pfn, 'Already declared')))
else:
no_hidden_char = True
for char in str(pfn):
if (not isprint(char)):
unknown_replicas.append(('%s %s' % (pfn, 'PFN contains hidden chars')))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append(('%s %s' % (pfn, 'Unknown replica')))
if (status == BadFilesStatus.BAD):
try:
update_replicas_states(replicas, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
else:
path_clause = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = ('%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']))
(__exists, scope, name, already_declared, size) = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session)
if (__exists and (((status == BadFilesStatus.BAD) and (not already_declared)) or (status == BadFilesStatus.SUSPICIOUS))):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
path_clause.append((models.RSEFileAssociation.path == path))
if path.startswith('/'):
path_clause.append((models.RSEFileAssociation.path == path[1:]))
else:
path_clause.append((models.RSEFileAssociation.path == ('/%s' % path)))
elif already_declared:
unknown_replicas.append(('%s %s' % (pfn, 'Already declared')))
else:
no_hidden_char = True
for char in str(pfn):
if (not isprint(char)):
unknown_replicas.append(('%s %s' % (pfn, 'PFN contains hidden chars')))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append(('%s %s' % (pfn, 'Unknown replica')))
if ((status == BadFilesStatus.BAD) and (declared_replicas != [])):
query = session.query(models.RSEFileAssociation).with_hint(models.RSEFileAssociation, '+ index(replicas REPLICAS_PATH_IDX', 'oracle').filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*path_clause))
rowcount = query.update({'state': ReplicaState.BAD})
if (rowcount != len(declared_replicas)):
print(rowcount, len(declared_replicas), declared_replicas)
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
raise exception.RucioException(error.args)
return unknown_replicas |
@transactional_session
def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None):
'\n Declare a list of bad replicas.\n\n :param dids: The list of DIDs.\n :param rse_id: The RSE id.\n :param reason: The reason of the loss.\n :param issuer: The issuer account.\n :param state: BadFilesStatus.BAD\n :param session: The database session in use.\n '
unknown_replicas = []
replicas_for_update = []
for did in dids:
scope = InternalScope(did['scope'], vo=issuer.vo)
name = did['name']
(replica_exists, _scope, _name, already_declared, size) = __exists_replicas(rse_id, scope, name, path=None, session=session)
if (replica_exists and (not already_declared)):
replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
elif already_declared:
unknown_replicas.append(('%s:%s %s' % (did['scope'], name, 'Already declared')))
else:
unknown_replicas.append(('%s:%s %s' % (did['scope'], name, 'Unknown replica')))
if (state == BadFilesStatus.BAD):
try:
update_replicas_states(replicas_for_update, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except (IntegrityError, DatabaseError, FlushError) as error:
raise exception.RucioException(error.args)
return unknown_replicas | -7,335,185,593,077,941,000 | Declare a list of bad replicas.
:param dids: The list of DIDs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param state: BadFilesStatus.BAD
:param session: The database session in use. | lib/rucio/core/replica.py | add_bad_dids | bari12/rucio | python | @transactional_session
def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None):
'\n Declare a list of bad replicas.\n\n :param dids: The list of DIDs.\n :param rse_id: The RSE id.\n :param reason: The reason of the loss.\n :param issuer: The issuer account.\n :param state: BadFilesStatus.BAD\n :param session: The database session in use.\n '
unknown_replicas = []
replicas_for_update = []
for did in dids:
scope = InternalScope(did['scope'], vo=issuer.vo)
name = did['name']
(replica_exists, _scope, _name, already_declared, size) = __exists_replicas(rse_id, scope, name, path=None, session=session)
if (replica_exists and (not already_declared)):
replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
elif already_declared:
unknown_replicas.append(('%s:%s %s' % (did['scope'], name, 'Already declared')))
else:
unknown_replicas.append(('%s:%s %s' % (did['scope'], name, 'Unknown replica')))
if (state == BadFilesStatus.BAD):
try:
update_replicas_states(replicas_for_update, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except (IntegrityError, DatabaseError, FlushError) as error:
raise exception.RucioException(error.args)
return unknown_replicas |
@transactional_session
def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None):
'\n Declare a list of bad replicas.\n\n :param pfns: The list of PFNs.\n :param reason: The reason of the loss.\n :param issuer: The issuer account.\n :param status: The status of the file (SUSPICIOUS or BAD).\n :param session: The database session in use.\n '
(scheme, files_to_declare, unknown_replicas) = get_pfn_to_rse(pfns, vo=issuer.vo, session=session)
for rse_id in files_to_declare:
notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session)
if notdeclared:
unknown_replicas[rse_id] = notdeclared
return unknown_replicas | -8,094,433,688,527,944,000 | Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: The status of the file (SUSPICIOUS or BAD).
:param session: The database session in use. | lib/rucio/core/replica.py | declare_bad_file_replicas | bari12/rucio | python | @transactional_session
def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None):
'\n Declare a list of bad replicas.\n\n :param pfns: The list of PFNs.\n :param reason: The reason of the loss.\n :param issuer: The issuer account.\n :param status: The status of the file (SUSPICIOUS or BAD).\n :param session: The database session in use.\n '
(scheme, files_to_declare, unknown_replicas) = get_pfn_to_rse(pfns, vo=issuer.vo, session=session)
for rse_id in files_to_declare:
notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session)
if notdeclared:
unknown_replicas[rse_id] = notdeclared
return unknown_replicas |
@read_session
def get_pfn_to_rse(pfns, vo='def', session=None):
"\n Get the RSE associated to a list of PFNs.\n\n :param pfns: The list of pfn.\n :param vo: The VO to find RSEs at.\n :param session: The database session in use.\n\n :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}.\n "
unknown_replicas = {}
storage_elements = []
se_condition = []
dict_rse = {}
surls = clean_surls(pfns)
scheme = (surls[0].split(':')[0] if surls else None)
for surl in surls:
if (surl.split(':')[0] != scheme):
raise exception.InvalidType('The PFNs specified must have the same protocol')
split_se = surl.split('/')[2].split(':')
storage_element = split_se[0]
if (storage_element not in storage_elements):
storage_elements.append(storage_element)
se_condition.append((models.RSEProtocols.hostname == storage_element))
query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).filter(and_(or_(*se_condition), (models.RSEProtocols.scheme == scheme))).filter((models.RSE.staging_area == false()))
protocols = {}
for (rse_id, protocol, hostname, port, prefix) in query.yield_per(10000):
protocols[rse_id] = (('%s://%s%s' % (protocol, hostname, prefix)), ('%s://%s:%s%s' % (protocol, hostname, port, prefix)))
hint = None
for surl in surls:
if (hint and ((surl.find(protocols[hint][0]) > (- 1)) or (surl.find(protocols[hint][1]) > (- 1)))):
dict_rse[hint].append(surl)
else:
mult_rse_match = 0
for rse_id in protocols:
if (((surl.find(protocols[rse_id][0]) > (- 1)) or (surl.find(protocols[rse_id][1]) > (- 1))) and (get_rse_vo(rse_id=rse_id, session=session) == vo)):
mult_rse_match += 1
if (mult_rse_match > 1):
print(('ERROR, multiple matches : %s at %s' % (surl, rse_id)))
raise exception.RucioException(('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))))
hint = rse_id
if (hint not in dict_rse):
dict_rse[hint] = []
dict_rse[hint].append(surl)
if (mult_rse_match == 0):
if ('unknown' not in unknown_replicas):
unknown_replicas['unknown'] = []
unknown_replicas['unknown'].append(surl)
return (scheme, dict_rse, unknown_replicas) | 8,781,953,538,096,400,000 | Get the RSE associated to a list of PFNs.
:param pfns: The list of pfn.
:param vo: The VO to find RSEs at.
:param session: The database session in use.
:returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. | lib/rucio/core/replica.py | get_pfn_to_rse | bari12/rucio | python | @read_session
def get_pfn_to_rse(pfns, vo='def', session=None):
"\n Get the RSE associated to a list of PFNs.\n\n :param pfns: The list of pfn.\n :param vo: The VO to find RSEs at.\n :param session: The database session in use.\n\n :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}.\n "
unknown_replicas = {}
storage_elements = []
se_condition = []
dict_rse = {}
surls = clean_surls(pfns)
scheme = (surls[0].split(':')[0] if surls else None)
for surl in surls:
if (surl.split(':')[0] != scheme):
raise exception.InvalidType('The PFNs specified must have the same protocol')
split_se = surl.split('/')[2].split(':')
storage_element = split_se[0]
if (storage_element not in storage_elements):
storage_elements.append(storage_element)
se_condition.append((models.RSEProtocols.hostname == storage_element))
query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).filter(and_(or_(*se_condition), (models.RSEProtocols.scheme == scheme))).filter((models.RSE.staging_area == false()))
protocols = {}
for (rse_id, protocol, hostname, port, prefix) in query.yield_per(10000):
protocols[rse_id] = (('%s://%s%s' % (protocol, hostname, prefix)), ('%s://%s:%s%s' % (protocol, hostname, port, prefix)))
hint = None
for surl in surls:
if (hint and ((surl.find(protocols[hint][0]) > (- 1)) or (surl.find(protocols[hint][1]) > (- 1)))):
dict_rse[hint].append(surl)
else:
mult_rse_match = 0
for rse_id in protocols:
if (((surl.find(protocols[rse_id][0]) > (- 1)) or (surl.find(protocols[rse_id][1]) > (- 1))) and (get_rse_vo(rse_id=rse_id, session=session) == vo)):
mult_rse_match += 1
if (mult_rse_match > 1):
print(('ERROR, multiple matches : %s at %s' % (surl, rse_id)))
raise exception.RucioException(('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))))
hint = rse_id
if (hint not in dict_rse):
dict_rse[hint] = []
dict_rse[hint].append(surl)
if (mult_rse_match == 0):
if ('unknown' not in unknown_replicas):
unknown_replicas['unknown'] = []
unknown_replicas['unknown'].append(surl)
return (scheme, dict_rse, unknown_replicas) |
@read_session
def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None):
"\n List RSE File replicas with no locks.\n\n :param limit: The maximum number of replicas returned.\n :param thread: The assigned thread for this necromancer.\n :param total_threads: The total number of threads of all necromancers.\n :param session: The database session in use.\n\n :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}.\n "
schema_dot = (('%s.' % DEFAULT_SCHEMA_NAME) if DEFAULT_SCHEMA_NAME else '')
if (session.bind.dialect.name == 'oracle'):
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).with_hint(models.RSEFileAssociation, '+ index(replicas REPLICAS_STATE_IDX)', 'oracle').filter(text(("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot)))).filter((models.RSEFileAssociation.state == ReplicaState.BAD))
else:
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).filter((models.RSEFileAssociation.state == ReplicaState.BAD))
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable=('%sreplicas.name' % schema_dot))
query = query.join(models.DataIdentifier, and_((models.DataIdentifier.scope == models.RSEFileAssociation.scope), (models.DataIdentifier.name == models.RSEFileAssociation.name))).filter((models.DataIdentifier.availability != DIDAvailability.LOST))
query = query.limit(limit)
rows = []
for (scope, name, rse_id) in query.yield_per(1000):
rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)})
return rows | 7,562,381,061,718,328,000 | List RSE File replicas with no locks.
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
:returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. | lib/rucio/core/replica.py | list_bad_replicas | bari12/rucio | python | @read_session
def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None):
"\n List RSE File replicas with no locks.\n\n :param limit: The maximum number of replicas returned.\n :param thread: The assigned thread for this necromancer.\n :param total_threads: The total number of threads of all necromancers.\n :param session: The database session in use.\n\n :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}.\n "
schema_dot = (('%s.' % DEFAULT_SCHEMA_NAME) if DEFAULT_SCHEMA_NAME else )
if (session.bind.dialect.name == 'oracle'):
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).with_hint(models.RSEFileAssociation, '+ index(replicas REPLICAS_STATE_IDX)', 'oracle').filter(text(("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot)))).filter((models.RSEFileAssociation.state == ReplicaState.BAD))
else:
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).filter((models.RSEFileAssociation.state == ReplicaState.BAD))
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable=('%sreplicas.name' % schema_dot))
query = query.join(models.DataIdentifier, and_((models.DataIdentifier.scope == models.RSEFileAssociation.scope), (models.DataIdentifier.name == models.RSEFileAssociation.name))).filter((models.DataIdentifier.availability != DIDAvailability.LOST))
query = query.limit(limit)
rows = []
for (scope, name, rse_id) in query.yield_per(1000):
rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)})
return rows |
@stream_session
def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None):
"\n Get the DIDs associated to a PFN on one given RSE\n\n :param pfns: The list of PFNs.\n :param rse_id: The RSE id.\n :param vo: The VO to get DIDs from.\n :param session: The database session in use.\n :returns: A dictionary {pfn: {'scope': scope, 'name': name}}\n "
dict_rse = {}
if (not rse_id):
(scheme, dict_rse, unknown_replicas) = get_pfn_to_rse(pfns, vo=vo, session=session)
if unknown_replicas:
raise Exception
else:
scheme = 'srm'
dict_rse[rse_id] = pfns
for rse_id in dict_rse:
pfns = dict_rse[rse_id]
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
pfndict = {}
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = parsed_pfn[pfn]['path']
if (path.startswith('/user') or path.startswith('/group')):
scope = ('%s.%s' % (path.split('/')[1], path.split('/')[2]))
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo)
(yield {pfn: {'scope': scope, 'name': name}})
else:
condition = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = ('%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']))
pfndict[path] = pfn
condition.append(and_((models.RSEFileAssociation.path == path), (models.RSEFileAssociation.rse_id == rse_id)))
for (scope, name, pfn) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)):
(yield {pfndict[pfn]: {'scope': scope, 'name': name}}) | 5,691,291,921,351,244,000 | Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param vo: The VO to get DIDs from.
:param session: The database session in use.
:returns: A dictionary {pfn: {'scope': scope, 'name': name}} | lib/rucio/core/replica.py | get_did_from_pfns | bari12/rucio | python | @stream_session
def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None):
"\n Get the DIDs associated to a PFN on one given RSE\n\n :param pfns: The list of PFNs.\n :param rse_id: The RSE id.\n :param vo: The VO to get DIDs from.\n :param session: The database session in use.\n :returns: A dictionary {pfn: {'scope': scope, 'name': name}}\n "
dict_rse = {}
if (not rse_id):
(scheme, dict_rse, unknown_replicas) = get_pfn_to_rse(pfns, vo=vo, session=session)
if unknown_replicas:
raise Exception
else:
scheme = 'srm'
dict_rse[rse_id] = pfns
for rse_id in dict_rse:
pfns = dict_rse[rse_id]
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
pfndict = {}
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = parsed_pfn[pfn]['path']
if (path.startswith('/user') or path.startswith('/group')):
scope = ('%s.%s' % (path.split('/')[1], path.split('/')[2]))
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo)
(yield {pfn: {'scope': scope, 'name': name}})
else:
condition = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = ('%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']))
pfndict[path] = pfn
condition.append(and_((models.RSEFileAssociation.path == path), (models.RSEFileAssociation.rse_id == rse_id)))
for (scope, name, pfn) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)):
(yield {pfndict[pfn]: {'scope': scope, 'name': name}}) |
def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session):
"\n Resolve list of DIDs into a list of conditions.\n\n :param dids: The list of data identifiers (DIDs).\n :param unavailable: (deprecated) Also include unavailable replicas in the list.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.\n :param resolve_archives: When set to true, find archives which contain the replicas.\n :param session: The database session in use.\n "
(did_clause, dataset_clause, file_clause, constituent_clause) = ([], [], [], [])
files_wo_replica = []
for did in [dict(tupleized) for tupleized in set((tuple(item.items()) for item in dids))]:
if ((('type' in did) and (did['type'] in (DIDType.FILE, DIDType.FILE.value))) or (('did_type' in did) and (did['did_type'] in (DIDType.FILE, DIDType.FILE.value)))):
files_wo_replica.append({'scope': did['scope'], 'name': did['name']})
file_clause.append(and_((models.RSEFileAssociation.scope == did['scope']), (models.RSEFileAssociation.name == did['name'])))
else:
did_clause.append(and_((models.DataIdentifier.scope == did['scope']), (models.DataIdentifier.name == did['name'])))
if did_clause:
for (scope, name, did_type, constituent) in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*did_clause)):
if (resolve_archives and constituent):
constituent_clause.append(and_((models.ConstituentAssociation.child_scope == scope), (models.ConstituentAssociation.child_name == name)))
if (did_type == DIDType.FILE):
files_wo_replica.append({'scope': scope, 'name': name})
file_clause.append(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name)))
elif (did_type == DIDType.DATASET):
dataset_clause.append(and_((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name)))
else:
content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type)
content_query = content_query.with_hint(models.DataIdentifierAssociation, 'INDEX(CONTENTS CONTENTS_PK)', 'oracle')
child_dids = [(scope, name)]
while child_dids:
(s, n) = child_dids.pop()
for tmp_did in content_query.filter_by(scope=s, name=n):
if (tmp_did.child_type == DIDType.DATASET):
dataset_clause.append(and_((models.DataIdentifierAssociation.scope == tmp_did.child_scope), (models.DataIdentifierAssociation.name == tmp_did.child_name)))
else:
child_dids.append((tmp_did.child_scope, tmp_did.child_name))
state_clause = None
if (not all_states):
if (not unavailable):
state_clause = and_((models.RSEFileAssociation.state == ReplicaState.AVAILABLE))
else:
state_clause = or_((models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE), (models.RSEFileAssociation.state == ReplicaState.COPYING))
return (file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica) | 8,015,680,666,655,023,000 | Resolve list of DIDs into a list of conditions.
:param dids: The list of data identifiers (DIDs).
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param session: The database session in use. | lib/rucio/core/replica.py | _resolve_dids | bari12/rucio | python | def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session):
"\n Resolve list of DIDs into a list of conditions.\n\n :param dids: The list of data identifiers (DIDs).\n :param unavailable: (deprecated) Also include unavailable replicas in the list.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.\n :param resolve_archives: When set to true, find archives which contain the replicas.\n :param session: The database session in use.\n "
(did_clause, dataset_clause, file_clause, constituent_clause) = ([], [], [], [])
files_wo_replica = []
for did in [dict(tupleized) for tupleized in set((tuple(item.items()) for item in dids))]:
if ((('type' in did) and (did['type'] in (DIDType.FILE, DIDType.FILE.value))) or (('did_type' in did) and (did['did_type'] in (DIDType.FILE, DIDType.FILE.value)))):
files_wo_replica.append({'scope': did['scope'], 'name': did['name']})
file_clause.append(and_((models.RSEFileAssociation.scope == did['scope']), (models.RSEFileAssociation.name == did['name'])))
else:
did_clause.append(and_((models.DataIdentifier.scope == did['scope']), (models.DataIdentifier.name == did['name'])))
if did_clause:
for (scope, name, did_type, constituent) in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*did_clause)):
if (resolve_archives and constituent):
constituent_clause.append(and_((models.ConstituentAssociation.child_scope == scope), (models.ConstituentAssociation.child_name == name)))
if (did_type == DIDType.FILE):
files_wo_replica.append({'scope': scope, 'name': name})
file_clause.append(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name)))
elif (did_type == DIDType.DATASET):
dataset_clause.append(and_((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name)))
else:
content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type)
content_query = content_query.with_hint(models.DataIdentifierAssociation, 'INDEX(CONTENTS CONTENTS_PK)', 'oracle')
child_dids = [(scope, name)]
while child_dids:
(s, n) = child_dids.pop()
for tmp_did in content_query.filter_by(scope=s, name=n):
if (tmp_did.child_type == DIDType.DATASET):
dataset_clause.append(and_((models.DataIdentifierAssociation.scope == tmp_did.child_scope), (models.DataIdentifierAssociation.name == tmp_did.child_name)))
else:
child_dids.append((tmp_did.child_scope, tmp_did.child_name))
state_clause = None
if (not all_states):
if (not unavailable):
state_clause = and_((models.RSEFileAssociation.state == ReplicaState.AVAILABLE))
else:
state_clause = or_((models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE), (models.RSEFileAssociation.state == ReplicaState.COPYING))
return (file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica) |
def _pick_n_random(nrandom, generator):
'\n Select n random elements from the generator\n '
if (not nrandom):
(yield from generator)
return
selected = []
i = 0
iterator = iter(generator)
try:
for _ in range(nrandom):
selected.append(next(iterator))
i += 1
while True:
element = next(iterator)
i += 1
index_to_substitute = random.randint(0, i)
if (index_to_substitute < nrandom):
selected[index_to_substitute] = element
except StopIteration:
pass
for r in selected:
(yield r) | 9,044,595,939,997,208,000 | Select n random elements from the generator | lib/rucio/core/replica.py | _pick_n_random | bari12/rucio | python | def _pick_n_random(nrandom, generator):
'\n \n '
if (not nrandom):
(yield from generator)
return
selected = []
i = 0
iterator = iter(generator)
try:
for _ in range(nrandom):
selected.append(next(iterator))
i += 1
while True:
element = next(iterator)
i += 1
index_to_substitute = random.randint(0, i)
if (index_to_substitute < nrandom):
selected[index_to_substitute] = element
except StopIteration:
pass
for r in selected:
(yield r) |
def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session):
'\n List file replicas for a list of datasets.\n\n :param session: The database session in use.\n '
if (not dataset_clause):
return
replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).with_hint(models.RSEFileAssociation, text='INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', dialect_name='oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name))).join(models.RSE, (models.RSE.id == models.RSEFileAssociation.rse_id)).filter((models.RSE.deleted == false())).filter(or_(*dataset_clause)).order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name)
if (not ignore_availability):
replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if (state_clause is not None):
replica_query = replica_query.filter(and_(state_clause))
if (rse_clause is not None):
replica_query = replica_query.filter(or_(*rse_clause))
if updated_after:
replica_query = replica_query.filter((models.RSEFileAssociation.updated_at >= updated_after))
for (scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile) in replica_query.yield_per(500):
(yield (scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile)) | 4,717,627,223,720,227,000 | List file replicas for a list of datasets.
:param session: The database session in use. | lib/rucio/core/replica.py | _list_replicas_for_datasets | bari12/rucio | python | def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session):
'\n List file replicas for a list of datasets.\n\n :param session: The database session in use.\n '
if (not dataset_clause):
return
replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).with_hint(models.RSEFileAssociation, text='INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', dialect_name='oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name))).join(models.RSE, (models.RSE.id == models.RSEFileAssociation.rse_id)).filter((models.RSE.deleted == false())).filter(or_(*dataset_clause)).order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name)
if (not ignore_availability):
replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if (state_clause is not None):
replica_query = replica_query.filter(and_(state_clause))
if (rse_clause is not None):
replica_query = replica_query.filter(or_(*rse_clause))
if updated_after:
replica_query = replica_query.filter((models.RSEFileAssociation.updated_at >= updated_after))
for (scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile) in replica_query.yield_per(500):
(yield (scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile)) |
def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
'\n List file replicas for archive constituents.\n '
if (not constituent_clause):
return
constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).with_hint(models.RSEFileAssociation, text='INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', dialect_name='oracle').with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.ConstituentAssociation.scope == models.RSEFileAssociation.scope), (models.ConstituentAssociation.name == models.RSEFileAssociation.name))).join(models.RSE, (models.RSE.id == models.RSEFileAssociation.rse_id)).filter((models.RSE.deleted == false())).filter(or_(*constituent_clause)).order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name)
if (not ignore_availability):
constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if (state_clause is not None):
constituent_query = constituent_query.filter(and_(state_clause))
if (rse_clause is not None):
constituent_query = constituent_query.filter(or_(*rse_clause))
if updated_after:
constituent_query = constituent_query.filter((models.RSEFileAssociation.updated_at >= updated_after))
for replica in constituent_query.yield_per(500):
(scope, name) = (replica[0], replica[1])
(({'scope': scope, 'name': name} in files_wo_replica) and files_wo_replica.remove({'scope': scope, 'name': name}))
(yield replica) | 1,421,381,160,717,762,800 | List file replicas for archive constituents. | lib/rucio/core/replica.py | _list_replicas_for_constituents | bari12/rucio | python | def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
'\n \n '
if (not constituent_clause):
return
constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).with_hint(models.RSEFileAssociation, text='INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', dialect_name='oracle').with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.ConstituentAssociation.scope == models.RSEFileAssociation.scope), (models.ConstituentAssociation.name == models.RSEFileAssociation.name))).join(models.RSE, (models.RSE.id == models.RSEFileAssociation.rse_id)).filter((models.RSE.deleted == false())).filter(or_(*constituent_clause)).order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name)
if (not ignore_availability):
constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if (state_clause is not None):
constituent_query = constituent_query.filter(and_(state_clause))
if (rse_clause is not None):
constituent_query = constituent_query.filter(or_(*rse_clause))
if updated_after:
constituent_query = constituent_query.filter((models.RSEFileAssociation.updated_at >= updated_after))
for replica in constituent_query.yield_per(500):
(scope, name) = (replica[0], replica[1])
(({'scope': scope, 'name': name} in files_wo_replica) and files_wo_replica.remove({'scope': scope, 'name': name}))
(yield replica) |
def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
'\n List file replicas for a list of files.\n\n :param session: The database session in use.\n '
if (not file_clause):
return
for replica_condition in chunks(file_clause, 50):
filters = [(models.RSEFileAssociation.rse_id == models.RSE.id), (models.RSE.deleted == false()), or_(*replica_condition)]
if (not ignore_availability):
filters.append(models.RSE.availability.in_((4, 5, 6, 7)))
if (state_clause is not None):
filters.append(state_clause)
if rse_clause:
filters.append(or_(*rse_clause))
if updated_after:
filters.append((models.RSEFileAssociation.updated_at >= updated_after))
replica_query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).filter(and_(*filters)).order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle')
for (scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile) in replica_query.all():
(({'scope': scope, 'name': name} in files_wo_replica) and files_wo_replica.remove({'scope': scope, 'name': name}))
(yield (scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile)) | -8,911,362,273,277,100,000 | List file replicas for a list of files.
:param session: The database session in use. | lib/rucio/core/replica.py | _list_replicas_for_files | bari12/rucio | python | def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
'\n List file replicas for a list of files.\n\n :param session: The database session in use.\n '
if (not file_clause):
return
for replica_condition in chunks(file_clause, 50):
filters = [(models.RSEFileAssociation.rse_id == models.RSE.id), (models.RSE.deleted == false()), or_(*replica_condition)]
if (not ignore_availability):
filters.append(models.RSE.availability.in_((4, 5, 6, 7)))
if (state_clause is not None):
filters.append(state_clause)
if rse_clause:
filters.append(or_(*rse_clause))
if updated_after:
filters.append((models.RSEFileAssociation.updated_at >= updated_after))
replica_query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).filter(and_(*filters)).order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle')
for (scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile) in replica_query.all():
(({'scope': scope, 'name': name} in files_wo_replica) and files_wo_replica.remove({'scope': scope, 'name': name}))
(yield (scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile)) |
def get_vp_endpoint():
"\n VP endpoint is the Virtual Placement server.\n Once VP is integrated in Rucio it won't be needed.\n "
vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='')
return vp_endpoint | -4,848,187,607,751,366,000 | VP endpoint is the Virtual Placement server.
Once VP is integrated in Rucio it won't be needed. | lib/rucio/core/replica.py | get_vp_endpoint | bari12/rucio | python | def get_vp_endpoint():
"\n VP endpoint is the Virtual Placement server.\n Once VP is integrated in Rucio it won't be needed.\n "
vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default=)
return vp_endpoint |
def get_multi_cache_prefix(cache_site, filename, logger=logging.log):
'\n for a givent cache site and filename, return address of the cache node that\n should be prefixed.\n\n :param cache_site: Cache site\n :param filename: Filename\n '
vp_endpoint = get_vp_endpoint()
if (not vp_endpoint):
return ''
x_caches = REGION.get('CacheSites')
if (x_caches is NO_VALUE):
try:
response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False)
if response.ok:
x_caches = response.json()
REGION.set('CacheSites', x_caches)
else:
REGION.set('CacheSites', {'could not reload': ''})
return ''
except requests.exceptions.RequestException as re:
REGION.set('CacheSites', {'could not reload': ''})
logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re))
return ''
if (cache_site not in x_caches):
return ''
xcache_site = x_caches[cache_site]
h = (float(unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / (2 ** 64))
for irange in xcache_site['ranges']:
if (h < irange[1]):
return xcache_site['servers'][irange[0]][0]
return '' | -6,507,303,531,448,083,000 | for a givent cache site and filename, return address of the cache node that
should be prefixed.
:param cache_site: Cache site
:param filename: Filename | lib/rucio/core/replica.py | get_multi_cache_prefix | bari12/rucio | python | def get_multi_cache_prefix(cache_site, filename, logger=logging.log):
'\n for a givent cache site and filename, return address of the cache node that\n should be prefixed.\n\n :param cache_site: Cache site\n :param filename: Filename\n '
vp_endpoint = get_vp_endpoint()
if (not vp_endpoint):
return
x_caches = REGION.get('CacheSites')
if (x_caches is NO_VALUE):
try:
response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False)
if response.ok:
x_caches = response.json()
REGION.set('CacheSites', x_caches)
else:
REGION.set('CacheSites', {'could not reload': })
return
except requests.exceptions.RequestException as re:
REGION.set('CacheSites', {'could not reload': })
logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re))
return
if (cache_site not in x_caches):
return
xcache_site = x_caches[cache_site]
h = (float(unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / (2 ** 64))
for irange in xcache_site['ranges']:
if (h < irange[1]):
return xcache_site['servers'][irange[0]][0]
return |
@stream_session
def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None):
"\n List file replicas for a list of data identifiers (DIDs).\n\n :param dids: The list of data identifiers (DIDs).\n :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)\n :param unavailable: (deprecated) Also include unavailable replicas in the list.\n :param request_id: ID associated with the request for debugging.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.\n :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs.\n :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}\n :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan']\n :param sign_urls: If set, will sign the PFNs if necessary.\n :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.\n :param resolve_archives: When set to true, find archives which contain the replicas.\n :param resolve_parents: When set to true, find all parent datasets which contain the replicas.\n :param updated_after: datetime (UTC time), only return replicas updated after this time\n :param session: The database session in use.\n "
if dids:
filter = {'vo': dids[0]['scope'].vo}
else:
filter = {'vo': 'def'}
(file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica) = _resolve_dids(dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session)
rse_clause = []
if rse_expression:
for rse in parse_expression(expression=rse_expression, filter=filter, session=session):
rse_clause.append((models.RSEFileAssociation.rse_id == rse['id']))
(yield from _pick_n_random(nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session))) | -3,264,574,774,412,415,500 | List file replicas for a list of data identifiers (DIDs).
:param dids: The list of data identifiers (DIDs).
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param request_id: ID associated with the request for debugging.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan']
:param sign_urls: If set, will sign the PFNs if necessary.
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param resolve_parents: When set to true, find all parent datasets which contain the replicas.
:param updated_after: datetime (UTC time), only return replicas updated after this time
:param session: The database session in use. | lib/rucio/core/replica.py | list_replicas | bari12/rucio | python | @stream_session
def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None):
"\n List file replicas for a list of data identifiers (DIDs).\n\n :param dids: The list of data identifiers (DIDs).\n :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)\n :param unavailable: (deprecated) Also include unavailable replicas in the list.\n :param request_id: ID associated with the request for debugging.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.\n :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs.\n :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}\n :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan']\n :param sign_urls: If set, will sign the PFNs if necessary.\n :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.\n :param resolve_archives: When set to true, find archives which contain the replicas.\n :param resolve_parents: When set to true, find all parent datasets which contain the replicas.\n :param updated_after: datetime (UTC time), only return replicas updated after this time\n :param session: The database session in use.\n "
if dids:
filter = {'vo': dids[0]['scope'].vo}
else:
filter = {'vo': 'def'}
(file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica) = _resolve_dids(dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session)
rse_clause = []
if rse_expression:
for rse in parse_expression(expression=rse_expression, filter=filter, session=session):
rse_clause.append((models.RSEFileAssociation.rse_id == rse['id']))
(yield from _pick_n_random(nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session))) |
@transactional_session
def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None):
'\n Bulk add new dids.\n\n :param dids: the list of new files.\n :param account: The account owner.\n :param session: The database session in use.\n :returns: True is successful.\n '
for file in files:
new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=(file.get('account') or account), did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None)
new_did.save(session=session, flush=False)
if (('meta' in file) and file['meta']):
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session)
if dataset_meta:
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session)
try:
session.flush()
except IntegrityError as error:
if (match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0])):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except DatabaseError as error:
if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True | 1,837,834,521,056,973,800 | Bulk add new dids.
:param dids: the list of new files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | __bulk_add_new_file_dids | bari12/rucio | python | @transactional_session
def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None):
'\n Bulk add new dids.\n\n :param dids: the list of new files.\n :param account: The account owner.\n :param session: The database session in use.\n :returns: True is successful.\n '
for file in files:
new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=(file.get('account') or account), did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None)
new_did.save(session=session, flush=False)
if (('meta' in file) and file['meta']):
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session)
if dataset_meta:
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session)
try:
session.flush()
except IntegrityError as error:
if (match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0])):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except DatabaseError as error:
if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True |
@transactional_session
def __bulk_add_file_dids(files, account, dataset_meta=None, session=None):
'\n Bulk add new dids.\n\n :param dids: the list of files.\n :param account: The account owner.\n :param session: The database session in use.\n :returns: True is successful.\n '
condition = []
for f in files:
condition.append(and_((models.DataIdentifier.scope == f['scope']), (models.DataIdentifier.name == f['name']), (models.DataIdentifier.did_type == DIDType.FILE)))
q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, 'INDEX(dids DIDS_PK)', 'oracle').filter(or_(*condition))
available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q]
new_files = list()
for file in files:
found = False
for available_file in available_files:
if ((file['scope'] == available_file['scope']) and (file['name'] == available_file['name'])):
found = True
break
if (not found):
new_files.append(file)
__bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session)
return (new_files + available_files) | -8,821,624,138,230,299,000 | Bulk add new dids.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | __bulk_add_file_dids | bari12/rucio | python | @transactional_session
def __bulk_add_file_dids(files, account, dataset_meta=None, session=None):
'\n Bulk add new dids.\n\n :param dids: the list of files.\n :param account: The account owner.\n :param session: The database session in use.\n :returns: True is successful.\n '
condition = []
for f in files:
condition.append(and_((models.DataIdentifier.scope == f['scope']), (models.DataIdentifier.name == f['name']), (models.DataIdentifier.did_type == DIDType.FILE)))
q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, 'INDEX(dids DIDS_PK)', 'oracle').filter(or_(*condition))
available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q]
new_files = list()
for file in files:
found = False
for available_file in available_files:
if ((file['scope'] == available_file['scope']) and (file['name'] == available_file['name'])):
found = True
break
if (not found):
new_files.append(file)
__bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session)
return (new_files + available_files) |
@transactional_session
def __bulk_add_replicas(rse_id, files, account, session=None):
'\n Bulk add new dids.\n\n :param rse_id: the RSE id.\n :param dids: the list of files.\n :param account: The account owner.\n :param session: The database session in use.\n :returns: True is successful.\n '
(nbfiles, bytes) = (0, 0)
condition = []
for f in files:
condition.append(and_((models.RSEFileAssociation.scope == f['scope']), (models.RSEFileAssociation.name == f['name']), (models.RSEFileAssociation.rse_id == rse_id)))
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle').filter(or_(*condition))
available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query]
default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None)
default_tombstone = tombstone_from_delay(default_tombstone_delay)
new_replicas = []
for file in files:
found = False
for available_replica in available_replicas:
if ((file['scope'] == available_replica['scope']) and (file['name'] == available_replica['name']) and (rse_id == available_replica['rse_id'])):
found = True
break
if (not found):
nbfiles += 1
bytes += file['bytes']
new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': (file.get('tombstone') or default_tombstone)})
try:
(new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas))
session.flush()
return (nbfiles, bytes)
except IntegrityError as error:
if (match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0])):
raise exception.Duplicate('File replica already exists!')
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args) | 5,013,684,583,000,202,000 | Bulk add new dids.
:param rse_id: the RSE id.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | __bulk_add_replicas | bari12/rucio | python | @transactional_session
def __bulk_add_replicas(rse_id, files, account, session=None):
'\n Bulk add new dids.\n\n :param rse_id: the RSE id.\n :param dids: the list of files.\n :param account: The account owner.\n :param session: The database session in use.\n :returns: True is successful.\n '
(nbfiles, bytes) = (0, 0)
condition = []
for f in files:
condition.append(and_((models.RSEFileAssociation.scope == f['scope']), (models.RSEFileAssociation.name == f['name']), (models.RSEFileAssociation.rse_id == rse_id)))
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle').filter(or_(*condition))
available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query]
default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None)
default_tombstone = tombstone_from_delay(default_tombstone_delay)
new_replicas = []
for file in files:
found = False
for available_replica in available_replicas:
if ((file['scope'] == available_replica['scope']) and (file['name'] == available_replica['name']) and (rse_id == available_replica['rse_id'])):
found = True
break
if (not found):
nbfiles += 1
bytes += file['bytes']
new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': (file.get('tombstone') or default_tombstone)})
try:
(new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas))
session.flush()
return (nbfiles, bytes)
except IntegrityError as error:
if (match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0])):
raise exception.Duplicate('File replica already exists!')
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args) |
@transactional_session
def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None):
'\n Bulk add file replicas.\n\n :param rse_id: The RSE id.\n :param files: The list of files.\n :param account: The account owner.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param session: The database session in use.\n\n :returns: True is successful.\n '
def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None):
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr)
expected_pfns = p.lfns2pfns(lfns)
return clean_surls(expected_pfns.values())
replica_rse = get_rse(rse_id=rse_id, session=session)
if (replica_rse.volatile is True):
raise exception.UnsupportedOperation(('Cannot add replicas on volatile RSE %s ' % replica_rse.rse))
if ((not (replica_rse.availability & 2)) and (not ignore_availability)):
raise exception.ResourceTemporaryUnavailable(('%s is temporary unavailable for writing' % replica_rse.rse))
replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session)
(pfns, scheme) = ({}, None)
for file in files:
if ('pfn' not in file):
if (not replica_rse.deterministic):
raise exception.UnsupportedOperation(('PFN needed for this (non deterministic) RSE %s ' % replica_rse.rse))
else:
scheme = file['pfn'].split(':')[0]
pfns.setdefault(scheme, []).append(file['pfn'])
if pfns:
rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session)
for scheme in pfns.keys():
if (not replica_rse.deterministic):
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme)
pfns[scheme] = p.parse_pfns(pfns=pfns[scheme])
for file in files:
if file['pfn'].startswith(scheme):
tmp = pfns[scheme][file['pfn']]
file['path'] = ''.join([tmp['path'], tmp['name']])
else:
lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)]
pfns[scheme] = clean_surls(pfns[scheme])
found_on_wan = False
available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan')
expected_pfns_wan = None
for protocol_attr in available_wan_protocols:
pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr)
if ((not expected_pfns_wan) and pfns_wan_buffer):
expected_pfns_wan = pfns_wan_buffer
found_on_wan = (found_on_wan or (pfns_wan_buffer == pfns[scheme]))
if found_on_wan:
break
if (not found_on_wan):
found_on_lan = False
available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan')
for protocol_attr in available_lan_protocols:
pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr)
found_on_lan = (found_on_lan or (pfns_lan_buffer == pfns[scheme]))
if found_on_lan:
break
if (found_on_lan == pfns[scheme]):
pfns[scheme] = expected_pfns_wan
else:
raise exception.InvalidPath(('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))))
(nbfiles, bytes) = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session)
increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session)
return replicas | -7,440,819,356,795,058,000 | Bulk add file replicas.
:param rse_id: The RSE id.
:param files: The list of files.
:param account: The account owner.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | add_replicas | bari12/rucio | python | @transactional_session
def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None):
'\n Bulk add file replicas.\n\n :param rse_id: The RSE id.\n :param files: The list of files.\n :param account: The account owner.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param session: The database session in use.\n\n :returns: True is successful.\n '
def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None):
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr)
expected_pfns = p.lfns2pfns(lfns)
return clean_surls(expected_pfns.values())
replica_rse = get_rse(rse_id=rse_id, session=session)
if (replica_rse.volatile is True):
raise exception.UnsupportedOperation(('Cannot add replicas on volatile RSE %s ' % replica_rse.rse))
if ((not (replica_rse.availability & 2)) and (not ignore_availability)):
raise exception.ResourceTemporaryUnavailable(('%s is temporary unavailable for writing' % replica_rse.rse))
replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session)
(pfns, scheme) = ({}, None)
for file in files:
if ('pfn' not in file):
if (not replica_rse.deterministic):
raise exception.UnsupportedOperation(('PFN needed for this (non deterministic) RSE %s ' % replica_rse.rse))
else:
scheme = file['pfn'].split(':')[0]
pfns.setdefault(scheme, []).append(file['pfn'])
if pfns:
rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session)
for scheme in pfns.keys():
if (not replica_rse.deterministic):
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme)
pfns[scheme] = p.parse_pfns(pfns=pfns[scheme])
for file in files:
if file['pfn'].startswith(scheme):
tmp = pfns[scheme][file['pfn']]
file['path'] = .join([tmp['path'], tmp['name']])
else:
lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)]
pfns[scheme] = clean_surls(pfns[scheme])
found_on_wan = False
available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan')
expected_pfns_wan = None
for protocol_attr in available_wan_protocols:
pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr)
if ((not expected_pfns_wan) and pfns_wan_buffer):
expected_pfns_wan = pfns_wan_buffer
found_on_wan = (found_on_wan or (pfns_wan_buffer == pfns[scheme]))
if found_on_wan:
break
if (not found_on_wan):
found_on_lan = False
available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan')
for protocol_attr in available_lan_protocols:
pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr)
found_on_lan = (found_on_lan or (pfns_lan_buffer == pfns[scheme]))
if found_on_lan:
break
if (found_on_lan == pfns[scheme]):
pfns[scheme] = expected_pfns_wan
else:
raise exception.InvalidPath(('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))))
(nbfiles, bytes) = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session)
increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session)
return replicas |
@transactional_session
def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None):
"\n Add File replica.\n\n :param rse_id: the rse id.\n :param scope: the scope name.\n :param name: The data identifier name.\n :param bytes: the size of the file.\n :param account: The account owner.\n :param md5: The md5 checksum.\n :param adler32: The adler32 checksum.\n :param pfn: Physical file name (for nondeterministic rse).\n :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary.\n :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].\n :param tombstone: If True, create replica with a tombstone.\n :param session: The database session in use.\n\n :returns: True is successful.\n "
if (meta is None):
meta = {}
file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone}
if pfn:
file['pfn'] = pfn
return add_replicas(rse_id=rse_id, files=[file], account=account, session=session) | -2,775,978,389,477,913,600 | Add File replica.
:param rse_id: the rse id.
:param scope: the scope name.
:param name: The data identifier name.
:param bytes: the size of the file.
:param account: The account owner.
:param md5: The md5 checksum.
:param adler32: The adler32 checksum.
:param pfn: Physical file name (for nondeterministic rse).
:param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary.
:param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param tombstone: If True, create replica with a tombstone.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | add_replica | bari12/rucio | python | @transactional_session
def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None):
"\n Add File replica.\n\n :param rse_id: the rse id.\n :param scope: the scope name.\n :param name: The data identifier name.\n :param bytes: the size of the file.\n :param account: The account owner.\n :param md5: The md5 checksum.\n :param adler32: The adler32 checksum.\n :param pfn: Physical file name (for nondeterministic rse).\n :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary.\n :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].\n :param tombstone: If True, create replica with a tombstone.\n :param session: The database session in use.\n\n :returns: True is successful.\n "
if (meta is None):
meta = {}
file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone}
if pfn:
file['pfn'] = pfn
return add_replicas(rse_id=rse_id, files=[file], account=account, session=session) |
@transactional_session
def delete_replicas(rse_id, files, ignore_availability=True, session=None):
'\n Delete file replicas.\n\n :param rse_id: the rse id.\n :param files: the list of files to delete.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param session: The database session in use.\n '
replica_rse = get_rse(rse_id=rse_id, session=session)
if ((not (replica_rse.availability & 1)) and (not ignore_availability)):
raise exception.ResourceTemporaryUnavailable(('%s is temporary unavailablefor deleting' % replica_rse.rse))
(replica_condition, src_condition) = ([], [])
for file in files:
replica_condition.append(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name'])))
src_condition.append(and_((models.Source.scope == file['scope']), (models.Source.name == file['name']), (models.Source.rse_id == rse_id)))
(delta, bytes, rowcount) = (0, 0, 0)
for chunk in chunks(src_condition, 10):
rowcount = session.query(models.Source).filter(or_(*chunk)).delete(synchronize_session=False)
rowcount = 0
for chunk in chunks(replica_condition, 10):
for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).with_hint(models.RSEFileAssociation, 'INDEX(REPLICAS REPLICAS_PK)', 'oracle').filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*chunk)):
bytes += replica_bytes
delta += 1
rowcount += session.query(models.RSEFileAssociation).filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*chunk)).delete(synchronize_session=False)
if (rowcount != len(files)):
raise exception.ReplicaNotFound("One or several replicas don't exist.")
__cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session)
decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) | -6,768,273,747,281,366,000 | Delete file replicas.
:param rse_id: the rse id.
:param files: the list of files to delete.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use. | lib/rucio/core/replica.py | delete_replicas | bari12/rucio | python | @transactional_session
def delete_replicas(rse_id, files, ignore_availability=True, session=None):
'\n Delete file replicas.\n\n :param rse_id: the rse id.\n :param files: the list of files to delete.\n :param ignore_availability: Ignore the RSE blocklisting.\n :param session: The database session in use.\n '
replica_rse = get_rse(rse_id=rse_id, session=session)
if ((not (replica_rse.availability & 1)) and (not ignore_availability)):
raise exception.ResourceTemporaryUnavailable(('%s is temporary unavailablefor deleting' % replica_rse.rse))
(replica_condition, src_condition) = ([], [])
for file in files:
replica_condition.append(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name'])))
src_condition.append(and_((models.Source.scope == file['scope']), (models.Source.name == file['name']), (models.Source.rse_id == rse_id)))
(delta, bytes, rowcount) = (0, 0, 0)
for chunk in chunks(src_condition, 10):
rowcount = session.query(models.Source).filter(or_(*chunk)).delete(synchronize_session=False)
rowcount = 0
for chunk in chunks(replica_condition, 10):
for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).with_hint(models.RSEFileAssociation, 'INDEX(REPLICAS REPLICAS_PK)', 'oracle').filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*chunk)):
bytes += replica_bytes
delta += 1
rowcount += session.query(models.RSEFileAssociation).filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*chunk)).delete(synchronize_session=False)
if (rowcount != len(files)):
raise exception.ReplicaNotFound("One or several replicas don't exist.")
__cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session)
decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) |
@transactional_session
def __cleanup_after_replica_deletion(rse_id, files, session=None):
'\n Perform update of collections/archive associations/dids after the removal of their replicas\n :param rse_id: the rse id\n :param files: list of files whose replica got deleted\n :param session: The database session in use.\n '
(parent_condition, did_condition) = ([], [])
(clt_replica_condition, dst_replica_condition) = ([], [])
(incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition) = ([], [], [], [])
for file in files:
dst_replica_condition.append(and_((models.DataIdentifierAssociation.child_scope == file['scope']), (models.DataIdentifierAssociation.child_name == file['name']), exists(select([1]).prefix_with('/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */', dialect='oracle')).where(and_((models.CollectionReplica.scope == models.DataIdentifierAssociation.scope), (models.CollectionReplica.name == models.DataIdentifierAssociation.name), (models.CollectionReplica.rse_id == rse_id)))))
parent_condition.append(and_((models.DataIdentifierAssociation.child_scope == file['scope']), (models.DataIdentifierAssociation.child_name == file['name']), (~ exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == file['scope']), (models.DataIdentifier.name == file['name']), (models.DataIdentifier.availability == DIDAvailability.LOST)))), (~ exists(select([1]).prefix_with('/*+ INDEX(REPLICAS REPLICAS_PK) */', dialect='oracle')).where(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name'])))), (~ exists(select([1]).prefix_with('/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */', dialect='oracle')).where(and_((models.ConstituentAssociation.child_scope == file['scope']), (models.ConstituentAssociation.child_name == file['name']))))))
did_condition.append(and_((models.DataIdentifier.scope == file['scope']), (models.DataIdentifier.name == file['name']), (models.DataIdentifier.availability != DIDAvailability.LOST), (~ exists(select([1]).prefix_with('/*+ INDEX(REPLICAS REPLICAS_PK) */', dialect='oracle')).where(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name'])))), (~ exists(select([1]).prefix_with('/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */', dialect='oracle')).where(and_((models.ConstituentAssociation.child_scope == file['scope']), (models.ConstituentAssociation.child_name == file['name']))))))
archive_contents_condition.append(and_((models.ConstituentAssociation.scope == file['scope']), (models.ConstituentAssociation.name == file['name']), (~ exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == file['scope']), (models.DataIdentifier.name == file['name']), (models.DataIdentifier.availability == DIDAvailability.LOST)))), (~ exists(select([1]).prefix_with('/*+ INDEX(REPLICAS REPLICAS_PK) */', dialect='oracle')).where(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name']))))))
if dst_replica_condition:
for chunk in chunks(dst_replica_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter(or_(*chunk)).distinct()
for (parent_scope, parent_name) in query:
models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).save(session=session, flush=False)
while parent_condition:
(child_did_condition, tmp_parent_condition) = ([], [])
for chunk in chunks(parent_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).filter(or_(*chunk))
for (parent_scope, parent_name, did_type, child_scope, child_name) in query:
child_did_condition.append(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name), (models.DataIdentifierAssociation.child_scope == child_scope), (models.DataIdentifierAssociation.child_name == child_name)))
clt_is_not_archive_condition.append(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name), exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == models.DataIdentifierAssociation.scope), (models.DataIdentifier.name == models.DataIdentifierAssociation.name), (models.DataIdentifier.is_archive == true()))), (~ exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope), (models.DataIdentifier.name == models.DataIdentifierAssociation.child_name), (models.DataIdentifier.is_archive == true()))))))
clt_replica_condition.append(and_((models.CollectionReplica.scope == parent_scope), (models.CollectionReplica.name == parent_name), exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == parent_scope), (models.DataIdentifier.name == parent_name), (models.DataIdentifier.is_open == False))), (~ exists(select([1]).prefix_with('/*+ INDEX(CONTENTS CONTENTS_PK) */', dialect='oracle')).where(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name))))))
tmp_parent_condition.append(and_((models.DataIdentifierAssociation.child_scope == parent_scope), (models.DataIdentifierAssociation.child_name == parent_name), (~ exists(select([1]).prefix_with('/*+ INDEX(CONTENTS CONTENTS_PK) */', dialect='oracle')).where(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name))))))
did_condition.append(and_((models.DataIdentifier.scope == parent_scope), (models.DataIdentifier.name == parent_name), (models.DataIdentifier.is_open == False), (~ exists([1]).where(and_((models.DataIdentifierAssociation.child_scope == parent_scope), (models.DataIdentifierAssociation.child_name == parent_name)))), (~ exists([1]).where(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name))))))
if child_did_condition:
for chunk in chunks(child_did_condition, 10):
modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).distinct().with_hint(models.DataIdentifierAssociation, 'INDEX(CONTENTS CONTENTS_PK)', 'oracle').filter(or_(*chunk)).filter(exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifierAssociation.scope == models.DataIdentifier.scope), (models.DataIdentifierAssociation.name == models.DataIdentifier.name), or_((models.DataIdentifier.complete == true()), (models.DataIdentifier.complete is None)))))
for (parent_scope, parent_name, parent_did_type) in modifieds:
message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'}
if (message not in messages):
messages.append(message)
incomplete_condition.append(and_((models.DataIdentifier.scope == parent_scope), (models.DataIdentifier.name == parent_name), (models.DataIdentifier.did_type == parent_did_type)))
for chunk in chunks(child_did_condition, 10):
rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session)
session.query(models.DataIdentifierAssociation).filter(or_(*chunk)).delete(synchronize_session=False)
parent_condition = tmp_parent_condition
for chunk in chunks(clt_replica_condition, 10):
session.query(models.CollectionReplica).filter(or_(*chunk)).delete(synchronize_session=False)
for chunk in chunks(incomplete_condition, 10):
session.query(models.DataIdentifier).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*chunk)).filter((models.DataIdentifier.complete != false())).update({'complete': False}, synchronize_session=False)
(messages, deleted_dids, deleted_rules, deleted_did_meta) = ([], [], [], [])
for chunk in chunks(did_condition, 100):
query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*chunk))
for (scope, name, did_type) in query:
if (did_type == DIDType.DATASET):
messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})})
deleted_rules.append(and_((models.ReplicationRule.scope == scope), (models.ReplicationRule.name == name)))
deleted_dids.append(and_((models.DataIdentifier.scope == scope), (models.DataIdentifier.name == name)))
if (session.bind.dialect.name == 'oracle'):
oracle_version = int(session.connection().connection.version.split('.')[0])
if (oracle_version >= 12):
deleted_did_meta.append(and_((models.DidMeta.scope == scope), (models.DidMeta.name == name)))
else:
deleted_did_meta.append(and_((models.DidMeta.scope == scope), (models.DidMeta.name == name)))
removed_constituents = []
constituents_to_delete_condition = []
for chunk in chunks(archive_contents_condition, 30):
query = session.query(models.ConstituentAssociation).with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)', 'oracle').filter(or_(*chunk))
for constituent in query:
removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name})
constituents_to_delete_condition.append(and_((models.ConstituentAssociation.scope == constituent.scope), (models.ConstituentAssociation.name == constituent.name), (models.ConstituentAssociation.child_scope == constituent.child_scope), (models.ConstituentAssociation.child_name == constituent.child_name)))
models.ConstituentAssociationHistory(child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at).save(session=session, flush=False)
if (len(constituents_to_delete_condition) > 200):
session.query(models.ConstituentAssociation).with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)', 'oracle').filter(or_(*constituents_to_delete_condition)).delete(synchronize_session=False)
constituents_to_delete_condition.clear()
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
removed_constituents.clear()
if constituents_to_delete_condition:
session.query(models.ConstituentAssociation).with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)', 'oracle').filter(or_(*constituents_to_delete_condition)).delete(synchronize_session=False)
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
for chunk in chunks(deleted_rules, 100):
session.query(models.ReplicationRule).with_hint(models.ReplicationRule, 'INDEX(RULES RULES_SCOPE_NAME_IDX)', 'oracle').filter(or_(*chunk)).filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).delete(synchronize_session=False)
for chunk in chunks(deleted_did_meta, 100):
session.query(models.DidMeta).filter(or_(*chunk)).delete(synchronize_session=False)
for chunk in chunks(messages, 100):
session.bulk_insert_mappings(models.Message, chunk)
for chunk in chunks(deleted_dids, 100):
session.query(models.DataIdentifier).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*chunk)).delete(synchronize_session=False)
if (session.bind.dialect.name != 'oracle'):
rucio.core.did.insert_deleted_dids(chunk, session=session)
for chunk in chunks(clt_is_not_archive_condition, 100):
clt_to_update = list(session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).with_hint(models.DataIdentifierAssociation, 'INDEX(CONTENTS CONTENTS_PK)', 'oracle').filter(or_(*chunk)))
if clt_to_update:
session.query(models.DataIdentifier).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_((and_((models.DataIdentifier.scope == scope), (models.DataIdentifier.name == name), (models.DataIdentifier.is_archive == true())) for (scope, name) in clt_to_update))).update({'is_archive': False}, synchronize_session=False) | -9,200,459,903,563,746,000 | Perform update of collections/archive associations/dids after the removal of their replicas
:param rse_id: the rse id
:param files: list of files whose replica got deleted
:param session: The database session in use. | lib/rucio/core/replica.py | __cleanup_after_replica_deletion | bari12/rucio | python | @transactional_session
def __cleanup_after_replica_deletion(rse_id, files, session=None):
'\n Perform update of collections/archive associations/dids after the removal of their replicas\n :param rse_id: the rse id\n :param files: list of files whose replica got deleted\n :param session: The database session in use.\n '
(parent_condition, did_condition) = ([], [])
(clt_replica_condition, dst_replica_condition) = ([], [])
(incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition) = ([], [], [], [])
for file in files:
dst_replica_condition.append(and_((models.DataIdentifierAssociation.child_scope == file['scope']), (models.DataIdentifierAssociation.child_name == file['name']), exists(select([1]).prefix_with('/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */', dialect='oracle')).where(and_((models.CollectionReplica.scope == models.DataIdentifierAssociation.scope), (models.CollectionReplica.name == models.DataIdentifierAssociation.name), (models.CollectionReplica.rse_id == rse_id)))))
parent_condition.append(and_((models.DataIdentifierAssociation.child_scope == file['scope']), (models.DataIdentifierAssociation.child_name == file['name']), (~ exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == file['scope']), (models.DataIdentifier.name == file['name']), (models.DataIdentifier.availability == DIDAvailability.LOST)))), (~ exists(select([1]).prefix_with('/*+ INDEX(REPLICAS REPLICAS_PK) */', dialect='oracle')).where(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name'])))), (~ exists(select([1]).prefix_with('/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */', dialect='oracle')).where(and_((models.ConstituentAssociation.child_scope == file['scope']), (models.ConstituentAssociation.child_name == file['name']))))))
did_condition.append(and_((models.DataIdentifier.scope == file['scope']), (models.DataIdentifier.name == file['name']), (models.DataIdentifier.availability != DIDAvailability.LOST), (~ exists(select([1]).prefix_with('/*+ INDEX(REPLICAS REPLICAS_PK) */', dialect='oracle')).where(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name'])))), (~ exists(select([1]).prefix_with('/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */', dialect='oracle')).where(and_((models.ConstituentAssociation.child_scope == file['scope']), (models.ConstituentAssociation.child_name == file['name']))))))
archive_contents_condition.append(and_((models.ConstituentAssociation.scope == file['scope']), (models.ConstituentAssociation.name == file['name']), (~ exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == file['scope']), (models.DataIdentifier.name == file['name']), (models.DataIdentifier.availability == DIDAvailability.LOST)))), (~ exists(select([1]).prefix_with('/*+ INDEX(REPLICAS REPLICAS_PK) */', dialect='oracle')).where(and_((models.RSEFileAssociation.scope == file['scope']), (models.RSEFileAssociation.name == file['name']))))))
if dst_replica_condition:
for chunk in chunks(dst_replica_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter(or_(*chunk)).distinct()
for (parent_scope, parent_name) in query:
models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).save(session=session, flush=False)
while parent_condition:
(child_did_condition, tmp_parent_condition) = ([], [])
for chunk in chunks(parent_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).filter(or_(*chunk))
for (parent_scope, parent_name, did_type, child_scope, child_name) in query:
child_did_condition.append(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name), (models.DataIdentifierAssociation.child_scope == child_scope), (models.DataIdentifierAssociation.child_name == child_name)))
clt_is_not_archive_condition.append(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name), exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == models.DataIdentifierAssociation.scope), (models.DataIdentifier.name == models.DataIdentifierAssociation.name), (models.DataIdentifier.is_archive == true()))), (~ exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope), (models.DataIdentifier.name == models.DataIdentifierAssociation.child_name), (models.DataIdentifier.is_archive == true()))))))
clt_replica_condition.append(and_((models.CollectionReplica.scope == parent_scope), (models.CollectionReplica.name == parent_name), exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifier.scope == parent_scope), (models.DataIdentifier.name == parent_name), (models.DataIdentifier.is_open == False))), (~ exists(select([1]).prefix_with('/*+ INDEX(CONTENTS CONTENTS_PK) */', dialect='oracle')).where(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name))))))
tmp_parent_condition.append(and_((models.DataIdentifierAssociation.child_scope == parent_scope), (models.DataIdentifierAssociation.child_name == parent_name), (~ exists(select([1]).prefix_with('/*+ INDEX(CONTENTS CONTENTS_PK) */', dialect='oracle')).where(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name))))))
did_condition.append(and_((models.DataIdentifier.scope == parent_scope), (models.DataIdentifier.name == parent_name), (models.DataIdentifier.is_open == False), (~ exists([1]).where(and_((models.DataIdentifierAssociation.child_scope == parent_scope), (models.DataIdentifierAssociation.child_name == parent_name)))), (~ exists([1]).where(and_((models.DataIdentifierAssociation.scope == parent_scope), (models.DataIdentifierAssociation.name == parent_name))))))
if child_did_condition:
for chunk in chunks(child_did_condition, 10):
modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).distinct().with_hint(models.DataIdentifierAssociation, 'INDEX(CONTENTS CONTENTS_PK)', 'oracle').filter(or_(*chunk)).filter(exists(select([1]).prefix_with('/*+ INDEX(DIDS DIDS_PK) */', dialect='oracle')).where(and_((models.DataIdentifierAssociation.scope == models.DataIdentifier.scope), (models.DataIdentifierAssociation.name == models.DataIdentifier.name), or_((models.DataIdentifier.complete == true()), (models.DataIdentifier.complete is None)))))
for (parent_scope, parent_name, parent_did_type) in modifieds:
message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'}
if (message not in messages):
messages.append(message)
incomplete_condition.append(and_((models.DataIdentifier.scope == parent_scope), (models.DataIdentifier.name == parent_name), (models.DataIdentifier.did_type == parent_did_type)))
for chunk in chunks(child_did_condition, 10):
rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session)
session.query(models.DataIdentifierAssociation).filter(or_(*chunk)).delete(synchronize_session=False)
parent_condition = tmp_parent_condition
for chunk in chunks(clt_replica_condition, 10):
session.query(models.CollectionReplica).filter(or_(*chunk)).delete(synchronize_session=False)
for chunk in chunks(incomplete_condition, 10):
session.query(models.DataIdentifier).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*chunk)).filter((models.DataIdentifier.complete != false())).update({'complete': False}, synchronize_session=False)
(messages, deleted_dids, deleted_rules, deleted_did_meta) = ([], [], [], [])
for chunk in chunks(did_condition, 100):
query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*chunk))
for (scope, name, did_type) in query:
if (did_type == DIDType.DATASET):
messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})})
deleted_rules.append(and_((models.ReplicationRule.scope == scope), (models.ReplicationRule.name == name)))
deleted_dids.append(and_((models.DataIdentifier.scope == scope), (models.DataIdentifier.name == name)))
if (session.bind.dialect.name == 'oracle'):
oracle_version = int(session.connection().connection.version.split('.')[0])
if (oracle_version >= 12):
deleted_did_meta.append(and_((models.DidMeta.scope == scope), (models.DidMeta.name == name)))
else:
deleted_did_meta.append(and_((models.DidMeta.scope == scope), (models.DidMeta.name == name)))
removed_constituents = []
constituents_to_delete_condition = []
for chunk in chunks(archive_contents_condition, 30):
query = session.query(models.ConstituentAssociation).with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)', 'oracle').filter(or_(*chunk))
for constituent in query:
removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name})
constituents_to_delete_condition.append(and_((models.ConstituentAssociation.scope == constituent.scope), (models.ConstituentAssociation.name == constituent.name), (models.ConstituentAssociation.child_scope == constituent.child_scope), (models.ConstituentAssociation.child_name == constituent.child_name)))
models.ConstituentAssociationHistory(child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at).save(session=session, flush=False)
if (len(constituents_to_delete_condition) > 200):
session.query(models.ConstituentAssociation).with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)', 'oracle').filter(or_(*constituents_to_delete_condition)).delete(synchronize_session=False)
constituents_to_delete_condition.clear()
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
removed_constituents.clear()
if constituents_to_delete_condition:
session.query(models.ConstituentAssociation).with_hint(models.ConstituentAssociation, 'INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)', 'oracle').filter(or_(*constituents_to_delete_condition)).delete(synchronize_session=False)
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
for chunk in chunks(deleted_rules, 100):
session.query(models.ReplicationRule).with_hint(models.ReplicationRule, 'INDEX(RULES RULES_SCOPE_NAME_IDX)', 'oracle').filter(or_(*chunk)).filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).delete(synchronize_session=False)
for chunk in chunks(deleted_did_meta, 100):
session.query(models.DidMeta).filter(or_(*chunk)).delete(synchronize_session=False)
for chunk in chunks(messages, 100):
session.bulk_insert_mappings(models.Message, chunk)
for chunk in chunks(deleted_dids, 100):
session.query(models.DataIdentifier).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_(*chunk)).delete(synchronize_session=False)
if (session.bind.dialect.name != 'oracle'):
rucio.core.did.insert_deleted_dids(chunk, session=session)
for chunk in chunks(clt_is_not_archive_condition, 100):
clt_to_update = list(session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).with_hint(models.DataIdentifierAssociation, 'INDEX(CONTENTS CONTENTS_PK)', 'oracle').filter(or_(*chunk)))
if clt_to_update:
session.query(models.DataIdentifier).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').filter(or_((and_((models.DataIdentifier.scope == scope), (models.DataIdentifier.name == name), (models.DataIdentifier.is_archive == true())) for (scope, name) in clt_to_update))).update({'is_archive': False}, synchronize_session=False) |
@transactional_session
def get_replica(rse_id, scope, name, session=None):
'\n Get File replica.\n\n :param rse_id: The RSE Id.\n :param scope: the scope name.\n :param name: The data identifier name.\n :param session: The database session in use.\n\n :returns: A dictionary with the list of replica attributes.\n '
try:
row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one()
result = {}
for column in row.__table__.columns:
result[column.name] = getattr(row, column.name)
return result
except NoResultFound:
raise exception.ReplicaNotFound(('No row found for scope: %s name: %s rse: %s' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))) | 867,883,035,569,628,500 | Get File replica.
:param rse_id: The RSE Id.
:param scope: the scope name.
:param name: The data identifier name.
:param session: The database session in use.
:returns: A dictionary with the list of replica attributes. | lib/rucio/core/replica.py | get_replica | bari12/rucio | python | @transactional_session
def get_replica(rse_id, scope, name, session=None):
'\n Get File replica.\n\n :param rse_id: The RSE Id.\n :param scope: the scope name.\n :param name: The data identifier name.\n :param session: The database session in use.\n\n :returns: A dictionary with the list of replica attributes.\n '
try:
row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one()
result = {}
for column in row.__table__.columns:
result[column.name] = getattr(row, column.name)
return result
except NoResultFound:
raise exception.ReplicaNotFound(('No row found for scope: %s name: %s rse: %s' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))) |
@transactional_session
def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None):
'\n List RSE File replicas with no locks.\n\n :param limit: Number of replicas returned.\n :param bytes: The amount of needed bytes.\n :param rse_id: The rse_id.\n :param delay_seconds: The delay to query replicas in BEING_DELETED state\n :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone\n :param session: The database session in use.\n\n :returns: a list of dictionary replica.\n '
none_value = None
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).with_hint(models.RSEFileAssociation, 'INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)', 'oracle').filter((models.RSEFileAssociation.tombstone < datetime.utcnow())).filter((models.RSEFileAssociation.lock_cnt == 0)).filter((case([((models.RSEFileAssociation.tombstone != none_value), models.RSEFileAssociation.rse_id)]) == rse_id)).filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_((models.RSEFileAssociation.state == ReplicaState.BEING_DELETED), (models.RSEFileAssociation.updated_at < (datetime.utcnow() - timedelta(seconds=delay_seconds)))))).filter((~ exists(select([1]).prefix_with('/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */', dialect='oracle').where(and_((models.RSEFileAssociation.scope == models.Source.scope), (models.RSEFileAssociation.name == models.Source.name), (models.RSEFileAssociation.rse_id == models.Source.rse_id)))))).with_for_update(skip_locked=True).order_by(models.RSEFileAssociation.tombstone)
needed_space = bytes
(total_bytes, total_files) = (0, 0)
rows = []
replica_clause = []
for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000):
replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').filter(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.rse_id != rse_id))).one()
if (replica_cnt[0] > 1):
if (state != ReplicaState.UNAVAILABLE):
if (tombstone != OBSOLETE):
if only_delete_obsolete:
break
if ((needed_space is not None) and (total_bytes > needed_space)):
break
total_bytes += bytes
total_files += 1
if (total_files > limit):
break
rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state})
replica_clause.append(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.rse_id == rse_id)))
else:
request_cnt = session.query(func.count()).with_hint(models.Request, 'INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)', 'oracle').filter(and_((models.Request.scope == scope), (models.Request.name == name))).one()
if (request_cnt[0] == 0):
if (tombstone != OBSOLETE):
if only_delete_obsolete:
break
if ((needed_space is not None) and (total_bytes > needed_space)):
break
total_bytes += bytes
total_files += 1
if (total_files > limit):
break
rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state})
replica_clause.append(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.rse_id == rse_id)))
for chunk in chunks(replica_clause, 100):
session.query(models.RSEFileAssociation).filter(or_(*chunk)).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle').update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False)
return rows | 8,043,752,399,939,749,000 | List RSE File replicas with no locks.
:param limit: Number of replicas returned.
:param bytes: The amount of needed bytes.
:param rse_id: The rse_id.
:param delay_seconds: The delay to query replicas in BEING_DELETED state
:param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone
:param session: The database session in use.
:returns: a list of dictionary replica. | lib/rucio/core/replica.py | list_and_mark_unlocked_replicas | bari12/rucio | python | @transactional_session
def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None):
'\n List RSE File replicas with no locks.\n\n :param limit: Number of replicas returned.\n :param bytes: The amount of needed bytes.\n :param rse_id: The rse_id.\n :param delay_seconds: The delay to query replicas in BEING_DELETED state\n :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone\n :param session: The database session in use.\n\n :returns: a list of dictionary replica.\n '
none_value = None
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).with_hint(models.RSEFileAssociation, 'INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)', 'oracle').filter((models.RSEFileAssociation.tombstone < datetime.utcnow())).filter((models.RSEFileAssociation.lock_cnt == 0)).filter((case([((models.RSEFileAssociation.tombstone != none_value), models.RSEFileAssociation.rse_id)]) == rse_id)).filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_((models.RSEFileAssociation.state == ReplicaState.BEING_DELETED), (models.RSEFileAssociation.updated_at < (datetime.utcnow() - timedelta(seconds=delay_seconds)))))).filter((~ exists(select([1]).prefix_with('/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */', dialect='oracle').where(and_((models.RSEFileAssociation.scope == models.Source.scope), (models.RSEFileAssociation.name == models.Source.name), (models.RSEFileAssociation.rse_id == models.Source.rse_id)))))).with_for_update(skip_locked=True).order_by(models.RSEFileAssociation.tombstone)
needed_space = bytes
(total_bytes, total_files) = (0, 0)
rows = []
replica_clause = []
for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000):
replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').filter(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.rse_id != rse_id))).one()
if (replica_cnt[0] > 1):
if (state != ReplicaState.UNAVAILABLE):
if (tombstone != OBSOLETE):
if only_delete_obsolete:
break
if ((needed_space is not None) and (total_bytes > needed_space)):
break
total_bytes += bytes
total_files += 1
if (total_files > limit):
break
rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state})
replica_clause.append(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.rse_id == rse_id)))
else:
request_cnt = session.query(func.count()).with_hint(models.Request, 'INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)', 'oracle').filter(and_((models.Request.scope == scope), (models.Request.name == name))).one()
if (request_cnt[0] == 0):
if (tombstone != OBSOLETE):
if only_delete_obsolete:
break
if ((needed_space is not None) and (total_bytes > needed_space)):
break
total_bytes += bytes
total_files += 1
if (total_files > limit):
break
rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state})
replica_clause.append(and_((models.RSEFileAssociation.scope == scope), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.rse_id == rse_id)))
for chunk in chunks(replica_clause, 100):
session.query(models.RSEFileAssociation).filter(or_(*chunk)).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle').update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False)
return rows |
@transactional_session
def update_replicas_states(replicas, nowait=False, session=None):
'\n Update File replica information and state.\n\n :param replicas: The list of replicas.\n :param nowait: Nowait parameter for the for_update queries.\n :param session: The database session in use.\n '
for replica in replicas:
query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name'])
try:
if nowait:
query.with_for_update(nowait=True).one()
except NoResultFound:
raise exception.ReplicaNotFound(('No row found for scope: %s name: %s rse: %s' % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))))
if isinstance(replica['state'], string_types):
replica['state'] = ReplicaState(replica['state'])
values = {'state': replica['state']}
if (replica['state'] == ReplicaState.BEING_DELETED):
query = query.filter_by(lock_cnt=0)
stmt = exists([1]).where(and_((models.RSEFileAssociation.scope == models.Source.scope), (models.RSEFileAssociation.name == models.Source.name), (models.RSEFileAssociation.rse_id == models.Source.rse_id)))
query = query.filter(not_(stmt))
values['tombstone'] = OBSOLETE
elif (replica['state'] == ReplicaState.AVAILABLE):
rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session)
elif (replica['state'] == ReplicaState.UNAVAILABLE):
rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session)
elif (replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE):
query = query.filter(or_((models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)))
if (('path' in replica) and replica['path']):
values['path'] = replica['path']
if (not query.update(values, synchronize_session=False)):
if ('rse' not in replica):
replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session)
raise exception.UnsupportedOperation(('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica))
return True | 4,675,642,054,458,534,000 | Update File replica information and state.
:param replicas: The list of replicas.
:param nowait: Nowait parameter for the for_update queries.
:param session: The database session in use. | lib/rucio/core/replica.py | update_replicas_states | bari12/rucio | python | @transactional_session
def update_replicas_states(replicas, nowait=False, session=None):
'\n Update File replica information and state.\n\n :param replicas: The list of replicas.\n :param nowait: Nowait parameter for the for_update queries.\n :param session: The database session in use.\n '
for replica in replicas:
query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name'])
try:
if nowait:
query.with_for_update(nowait=True).one()
except NoResultFound:
raise exception.ReplicaNotFound(('No row found for scope: %s name: %s rse: %s' % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))))
if isinstance(replica['state'], string_types):
replica['state'] = ReplicaState(replica['state'])
values = {'state': replica['state']}
if (replica['state'] == ReplicaState.BEING_DELETED):
query = query.filter_by(lock_cnt=0)
stmt = exists([1]).where(and_((models.RSEFileAssociation.scope == models.Source.scope), (models.RSEFileAssociation.name == models.Source.name), (models.RSEFileAssociation.rse_id == models.Source.rse_id)))
query = query.filter(not_(stmt))
values['tombstone'] = OBSOLETE
elif (replica['state'] == ReplicaState.AVAILABLE):
rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session)
elif (replica['state'] == ReplicaState.UNAVAILABLE):
rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session)
elif (replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE):
query = query.filter(or_((models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)))
if (('path' in replica) and replica['path']):
values['path'] = replica['path']
if (not query.update(values, synchronize_session=False)):
if ('rse' not in replica):
replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session)
raise exception.UnsupportedOperation(('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica))
return True |
@transactional_session
def touch_replica(replica, session=None):
"\n Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.\n\n :param replica: a dictionary with the information of the affected replica.\n :param session: The database session in use.\n\n :returns: True, if successful, False otherwise.\n "
try:
(accessed_at, none_value) = ((replica.get('accessed_at') or datetime.utcnow()), None)
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').with_for_update(nowait=True).one()
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').update({'accessed_at': accessed_at, 'tombstone': case([(and_((models.RSEFileAssociation.tombstone != none_value), (models.RSEFileAssociation.tombstone != OBSOLETE)), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False)
session.query(models.DataIdentifier).filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').with_for_update(nowait=True).one()
session.query(models.DataIdentifier).filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').update({'accessed_at': accessed_at}, synchronize_session=False)
except DatabaseError:
return False
except NoResultFound:
return True
return True | -6,751,399,618,882,462,000 | Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.
:param replica: a dictionary with the information of the affected replica.
:param session: The database session in use.
:returns: True, if successful, False otherwise. | lib/rucio/core/replica.py | touch_replica | bari12/rucio | python | @transactional_session
def touch_replica(replica, session=None):
"\n Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.\n\n :param replica: a dictionary with the information of the affected replica.\n :param session: The database session in use.\n\n :returns: True, if successful, False otherwise.\n "
try:
(accessed_at, none_value) = ((replica.get('accessed_at') or datetime.utcnow()), None)
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').with_for_update(nowait=True).one()
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').update({'accessed_at': accessed_at, 'tombstone': case([(and_((models.RSEFileAssociation.tombstone != none_value), (models.RSEFileAssociation.tombstone != OBSOLETE)), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False)
session.query(models.DataIdentifier).filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').with_for_update(nowait=True).one()
session.query(models.DataIdentifier).filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).with_hint(models.DataIdentifier, 'INDEX(DIDS DIDS_PK)', 'oracle').update({'accessed_at': accessed_at}, synchronize_session=False)
except DatabaseError:
return False
except NoResultFound:
return True
return True |
@transactional_session
def update_replica_state(rse_id, scope, name, state, session=None):
'\n Update File replica information and state.\n\n :param rse_id: the rse id.\n :param scope: the tag name.\n :param name: The data identifier name.\n :param state: The state.\n :param session: The database session in use.\n '
return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) | -7,170,540,824,155,534,000 | Update File replica information and state.
:param rse_id: the rse id.
:param scope: the tag name.
:param name: The data identifier name.
:param state: The state.
:param session: The database session in use. | lib/rucio/core/replica.py | update_replica_state | bari12/rucio | python | @transactional_session
def update_replica_state(rse_id, scope, name, state, session=None):
'\n Update File replica information and state.\n\n :param rse_id: the rse id.\n :param scope: the tag name.\n :param name: The data identifier name.\n :param state: The state.\n :param session: The database session in use.\n '
return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) |
@transactional_session
def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None):
'\n Get file replicas for a specific scope:name.\n\n :param scope: The scope of the did.\n :param name: The name of the did.\n :param nowait: Nowait parameter for the FOR UPDATE statement\n :param restrict_rses: Possible RSE_ids to filter on.\n :param session: The db session in use.\n :returns: List of SQLAlchemy Replica Objects\n '
query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter((models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))
if (restrict_rses is not None):
if (len(restrict_rses) < 10):
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = query.filter(or_(*rse_clause))
return query.with_for_update(nowait=nowait).all() | 3,420,387,852,648,353,300 | Get file replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects | lib/rucio/core/replica.py | get_and_lock_file_replicas | bari12/rucio | python | @transactional_session
def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None):
'\n Get file replicas for a specific scope:name.\n\n :param scope: The scope of the did.\n :param name: The name of the did.\n :param nowait: Nowait parameter for the FOR UPDATE statement\n :param restrict_rses: Possible RSE_ids to filter on.\n :param session: The db session in use.\n :returns: List of SQLAlchemy Replica Objects\n '
query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter((models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))
if (restrict_rses is not None):
if (len(restrict_rses) < 10):
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = query.filter(or_(*rse_clause))
return query.with_for_update(nowait=nowait).all() |
@transactional_session
def get_source_replicas(scope, name, source_rses=None, session=None):
'\n Get soruce replicas for a specific scope:name.\n\n :param scope: The scope of the did.\n :param name: The name of the did.\n :param soruce_rses: Possible RSE_ids to filter on.\n :param session: The db session in use.\n :returns: List of SQLAlchemy Replica Objects\n '
query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter((models.RSEFileAssociation.state == ReplicaState.AVAILABLE))
if source_rses:
if (len(source_rses) < 10):
rse_clause = []
for rse_id in source_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = query.filter(or_(*rse_clause))
return [a[0] for a in query.all()] | -6,186,249,393,269,582,000 | Get soruce replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param soruce_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects | lib/rucio/core/replica.py | get_source_replicas | bari12/rucio | python | @transactional_session
def get_source_replicas(scope, name, source_rses=None, session=None):
'\n Get soruce replicas for a specific scope:name.\n\n :param scope: The scope of the did.\n :param name: The name of the did.\n :param soruce_rses: Possible RSE_ids to filter on.\n :param session: The db session in use.\n :returns: List of SQLAlchemy Replica Objects\n '
query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter((models.RSEFileAssociation.state == ReplicaState.AVAILABLE))
if source_rses:
if (len(source_rses) < 10):
rse_clause = []
for rse_id in source_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = query.filter(or_(*rse_clause))
return [a[0] for a in query.all()] |
@transactional_session
def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None):
'\n Get file replicas for all files of a dataset.\n\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param nowait: Nowait parameter for the FOR UPDATE statement\n :param restrict_rses: Possible RSE_ids to filter on.\n :param total_threads: Total threads\n :param thread_id: This thread\n :param session: The db session in use.\n :returns: (files in dataset, replicas in dataset)\n '
(files, replicas) = ({}, {})
if (session.bind.dialect.name == 'postgresql'):
content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (total_threads and (total_threads > 1)):
content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name')
for (child_scope, child_name, bytes, md5, adler32) in content_query.yield_per(1000):
files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32}
replicas[(child_scope, child_name)] = []
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter(and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (restrict_rses is not None):
if (len(restrict_rses) < 10):
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter(and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED), or_(*rse_clause))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
else:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').with_hint(models.RSEFileAssociation, 'INDEX(REPLICAS REPLICAS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (restrict_rses is not None):
if (len(restrict_rses) < 10):
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED), or_(*rse_clause))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (total_threads and (total_threads > 1)):
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name')
query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt)
for (child_scope, child_name, bytes, md5, adler32, replica) in query.yield_per(1000):
if ((child_scope, child_name) not in files):
files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32}
if ((child_scope, child_name) in replicas):
if (replica is not None):
replicas[(child_scope, child_name)].append(replica)
else:
replicas[(child_scope, child_name)] = []
if (replica is not None):
replicas[(child_scope, child_name)].append(replica)
return (list(files.values()), replicas) | 3,723,481,605,392,194,600 | Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset) | lib/rucio/core/replica.py | get_and_lock_file_replicas_for_dataset | bari12/rucio | python | @transactional_session
def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None):
'\n Get file replicas for all files of a dataset.\n\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param nowait: Nowait parameter for the FOR UPDATE statement\n :param restrict_rses: Possible RSE_ids to filter on.\n :param total_threads: Total threads\n :param thread_id: This thread\n :param session: The db session in use.\n :returns: (files in dataset, replicas in dataset)\n '
(files, replicas) = ({}, {})
if (session.bind.dialect.name == 'postgresql'):
content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (total_threads and (total_threads > 1)):
content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name')
for (child_scope, child_name, bytes, md5, adler32) in content_query.yield_per(1000):
files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32}
replicas[(child_scope, child_name)] = []
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter(and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (restrict_rses is not None):
if (len(restrict_rses) < 10):
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter(and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED), or_(*rse_clause))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
else:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').with_hint(models.RSEFileAssociation, 'INDEX(REPLICAS REPLICAS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (restrict_rses is not None):
if (len(restrict_rses) < 10):
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED), or_(*rse_clause))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (total_threads and (total_threads > 1)):
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name')
query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt)
for (child_scope, child_name, bytes, md5, adler32, replica) in query.yield_per(1000):
if ((child_scope, child_name) not in files):
files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32}
if ((child_scope, child_name) in replicas):
if (replica is not None):
replicas[(child_scope, child_name)].append(replica)
else:
replicas[(child_scope, child_name)] = []
if (replica is not None):
replicas[(child_scope, child_name)].append(replica)
return (list(files.values()), replicas) |
@transactional_session
def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None):
'\n Get file replicas for all files of a dataset.\n\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param source_rses: Possible source RSE_ids to filter on.\n :param total_threads: Total threads\n :param thread_id: This thread\n :param session: The db session in use.\n :returns: (files in dataset, replicas in dataset)\n '
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if source_rses:
if (len(source_rses) < 10):
rse_clause = []
for rse_id in source_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE), or_(*rse_clause))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (total_threads and (total_threads > 1)):
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name')
replicas = {}
for (child_scope, child_name, rse_id) in query:
if ((child_scope, child_name) in replicas):
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
else:
replicas[(child_scope, child_name)] = []
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
return replicas | 4,626,561,482,023,698,000 | Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param source_rses: Possible source RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset) | lib/rucio/core/replica.py | get_source_replicas_for_dataset | bari12/rucio | python | @transactional_session
def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None):
'\n Get file replicas for all files of a dataset.\n\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param source_rses: Possible source RSE_ids to filter on.\n :param total_threads: Total threads\n :param thread_id: This thread\n :param session: The db session in use.\n :returns: (files in dataset, replicas in dataset)\n '
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if source_rses:
if (len(source_rses) < 10):
rse_clause = []
for rse_id in source_rses:
rse_clause.append((models.RSEFileAssociation.rse_id == rse_id))
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').outerjoin(models.RSEFileAssociation, and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE), or_(*rse_clause))).filter((models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name))
if (total_threads and (total_threads > 1)):
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name')
replicas = {}
for (child_scope, child_name, rse_id) in query:
if ((child_scope, child_name) in replicas):
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
else:
replicas[(child_scope, child_name)] = []
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
return replicas |
@read_session
def get_replica_atime(replica, session=None):
'\n Get the accessed_at timestamp for a replica. Just for testing.\n :param replicas: List of dictionaries {scope, name, rse_id, path}\n :param session: Database session to use.\n\n :returns: A datetime timestamp with the last access time.\n '
return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle').one()[0] | -2,012,700,357,308,795,600 | Get the accessed_at timestamp for a replica. Just for testing.
:param replicas: List of dictionaries {scope, name, rse_id, path}
:param session: Database session to use.
:returns: A datetime timestamp with the last access time. | lib/rucio/core/replica.py | get_replica_atime | bari12/rucio | python | @read_session
def get_replica_atime(replica, session=None):
'\n Get the accessed_at timestamp for a replica. Just for testing.\n :param replicas: List of dictionaries {scope, name, rse_id, path}\n :param session: Database session to use.\n\n :returns: A datetime timestamp with the last access time.\n '
return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).with_hint(models.RSEFileAssociation, text='INDEX(REPLICAS REPLICAS_PK)', dialect_name='oracle').one()[0] |
@transactional_session
def touch_collection_replicas(collection_replicas, session=None):
'\n Update the accessed_at timestamp of the given collection replicas.\n\n :param collection_replicas: the list of collection replicas.\n :param session: The database session in use.\n\n :returns: True, if successful, False otherwise.\n '
now = datetime.utcnow()
for collection_replica in collection_replicas:
try:
session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).update({'accessed_at': (collection_replica.get('accessed_at') or now)}, synchronize_session=False)
except DatabaseError:
return False
return True | -1,533,478,739,449,387,300 | Update the accessed_at timestamp of the given collection replicas.
:param collection_replicas: the list of collection replicas.
:param session: The database session in use.
:returns: True, if successful, False otherwise. | lib/rucio/core/replica.py | touch_collection_replicas | bari12/rucio | python | @transactional_session
def touch_collection_replicas(collection_replicas, session=None):
'\n Update the accessed_at timestamp of the given collection replicas.\n\n :param collection_replicas: the list of collection replicas.\n :param session: The database session in use.\n\n :returns: True, if successful, False otherwise.\n '
now = datetime.utcnow()
for collection_replica in collection_replicas:
try:
session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).update({'accessed_at': (collection_replica.get('accessed_at') or now)}, synchronize_session=False)
except DatabaseError:
return False
return True |
@stream_session
def list_dataset_replicas(scope, name, deep=False, session=None):
'\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param deep: Lookup at the file level.\n :param session: Database session to use.\n\n :returns: A list of dictionaries containing the dataset replicas\n with associated metrics and timestamps\n '
if (not deep):
query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label('available_length'), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at).filter_by(scope=scope, name=name, did_type=DIDType.DATASET).filter((models.CollectionReplica.rse_id == models.RSE.id)).filter((models.RSE.deleted == false()))
for row in query:
(yield row._asdict())
else:
content_query = session.query(func.sum(models.DataIdentifierAssociation.bytes).label('bytes'), func.count().label('length')).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter((models.DataIdentifierAssociation.scope == scope)).filter((models.DataIdentifierAssociation.name == name))
(bytes, length) = (0, 0)
for row in content_query:
(bytes, length) = (row.bytes, row.length)
sub_query_archives = session.query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at).filter((models.DataIdentifierAssociation.scope == scope)).filter((models.DataIdentifierAssociation.name == name)).filter((models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)).filter((models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)).filter((models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)).filter((models.ConstituentAssociation.name == models.RSEFileAssociation.name)).filter((models.RSEFileAssociation.rse_id == models.RSE.id)).filter((models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).filter((models.RSE.deleted == false())).subquery()
group_query_archives = session.query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at')).group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse).subquery()
full_query_archives = session.query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at')).group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse)
sub_query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label('available_bytes'), func.count().label('available_length'), func.min(models.RSEFileAssociation.created_at).label('created_at'), func.max(models.RSEFileAssociation.updated_at).label('updated_at'), func.max(models.RSEFileAssociation.accessed_at).label('accessed_at')).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)).filter((models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).filter((models.DataIdentifierAssociation.scope == scope)).filter((models.DataIdentifierAssociation.name == name)).filter((models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id).subquery()
query = session.query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at).filter((models.RSE.id == sub_query.c.rse_id)).filter((models.RSE.deleted == false()))
final_query = query.union_all(full_query_archives)
for row in final_query.all():
replica = row._asdict()
(replica['length'], replica['bytes']) = (length, bytes)
if (replica['length'] == row.available_length):
replica['state'] = ReplicaState.AVAILABLE
else:
replica['state'] = ReplicaState.UNAVAILABLE
(yield replica) | -6,567,382,506,538,051,000 | :param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps | lib/rucio/core/replica.py | list_dataset_replicas | bari12/rucio | python | @stream_session
def list_dataset_replicas(scope, name, deep=False, session=None):
'\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param deep: Lookup at the file level.\n :param session: Database session to use.\n\n :returns: A list of dictionaries containing the dataset replicas\n with associated metrics and timestamps\n '
if (not deep):
query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label('available_length'), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at).filter_by(scope=scope, name=name, did_type=DIDType.DATASET).filter((models.CollectionReplica.rse_id == models.RSE.id)).filter((models.RSE.deleted == false()))
for row in query:
(yield row._asdict())
else:
content_query = session.query(func.sum(models.DataIdentifierAssociation.bytes).label('bytes'), func.count().label('length')).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter((models.DataIdentifierAssociation.scope == scope)).filter((models.DataIdentifierAssociation.name == name))
(bytes, length) = (0, 0)
for row in content_query:
(bytes, length) = (row.bytes, row.length)
sub_query_archives = session.query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at).filter((models.DataIdentifierAssociation.scope == scope)).filter((models.DataIdentifierAssociation.name == name)).filter((models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)).filter((models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)).filter((models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)).filter((models.ConstituentAssociation.name == models.RSEFileAssociation.name)).filter((models.RSEFileAssociation.rse_id == models.RSE.id)).filter((models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).filter((models.RSE.deleted == false())).subquery()
group_query_archives = session.query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at')).group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse).subquery()
full_query_archives = session.query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at')).group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse)
sub_query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label('available_bytes'), func.count().label('available_length'), func.min(models.RSEFileAssociation.created_at).label('created_at'), func.max(models.RSEFileAssociation.updated_at).label('updated_at'), func.max(models.RSEFileAssociation.accessed_at).label('accessed_at')).with_hint(models.DataIdentifierAssociation, 'INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)', 'oracle').filter((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)).filter((models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).filter((models.DataIdentifierAssociation.scope == scope)).filter((models.DataIdentifierAssociation.name == name)).filter((models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id).subquery()
query = session.query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at).filter((models.RSE.id == sub_query.c.rse_id)).filter((models.RSE.deleted == false()))
final_query = query.union_all(full_query_archives)
for row in final_query.all():
replica = row._asdict()
(replica['length'], replica['bytes']) = (length, bytes)
if (replica['length'] == row.available_length):
replica['state'] = ReplicaState.AVAILABLE
else:
replica['state'] = ReplicaState.UNAVAILABLE
(yield replica) |
@stream_session
def list_dataset_replicas_bulk(names_by_intscope, session=None):
'\n :param names_by_intscope: The dictionary of internal scopes pointing at the list of names.\n :param session: Database session to use.\n\n :returns: A list of dictionaries containing the dataset replicas\n with associated metrics and timestamps\n '
condition = []
for scope in names_by_intscope:
condition.append(and_((models.CollectionReplica.scope == scope), models.CollectionReplica.name.in_(names_by_intscope[scope])))
try:
for chunk in chunks(condition, 10):
query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label('available_length'), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at).filter((models.CollectionReplica.did_type == DIDType.DATASET)).filter((models.CollectionReplica.rse_id == models.RSE.id)).filter(or_(*chunk)).filter((models.RSE.deleted == false()))
for row in query:
(yield row._asdict())
except NoResultFound:
raise exception.DataIdentifierNotFound('No Data Identifiers found') | 2,755,515,626,914,224,000 | :param names_by_intscope: The dictionary of internal scopes pointing at the list of names.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps | lib/rucio/core/replica.py | list_dataset_replicas_bulk | bari12/rucio | python | @stream_session
def list_dataset_replicas_bulk(names_by_intscope, session=None):
'\n :param names_by_intscope: The dictionary of internal scopes pointing at the list of names.\n :param session: Database session to use.\n\n :returns: A list of dictionaries containing the dataset replicas\n with associated metrics and timestamps\n '
condition = []
for scope in names_by_intscope:
condition.append(and_((models.CollectionReplica.scope == scope), models.CollectionReplica.name.in_(names_by_intscope[scope])))
try:
for chunk in chunks(condition, 10):
query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label('available_length'), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at).filter((models.CollectionReplica.did_type == DIDType.DATASET)).filter((models.CollectionReplica.rse_id == models.RSE.id)).filter(or_(*chunk)).filter((models.RSE.deleted == false()))
for row in query:
(yield row._asdict())
except NoResultFound:
raise exception.DataIdentifierNotFound('No Data Identifiers found') |
@stream_session
def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log):
'\n List dataset replicas for a DID (scope:name) using the\n Virtual Placement service.\n\n NOTICE: This is an RnD function and might change or go away at any time.\n\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param deep: Lookup at the file level.\n :param session: Database session to use.\n\n :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites\n '
vp_endpoint = get_vp_endpoint()
vp_replies = ['other']
nr_replies = 5
if (not vp_endpoint):
return vp_replies
try:
vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1)
if (vp_replies.status_code == 200):
vp_replies = vp_replies.json()
else:
vp_replies = ['other']
except requests.exceptions.RequestException as re:
logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re))
vp_replies = ['other']
if (vp_replies != ['other']):
accessible_replica_exists = False
for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session):
rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session)
if (rse_info['rse_type'] == 'TAPE'):
continue
for prot in rse_info['protocols']:
if ((prot['scheme'] == 'root') and prot['domains']['wan']['read']):
accessible_replica_exists = True
break
if (accessible_replica_exists is True):
break
if (accessible_replica_exists is True):
for vp_reply in vp_replies:
(yield {'vp': True, 'site': vp_reply}) | 6,437,509,760,921,229,000 | List dataset replicas for a DID (scope:name) using the
Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites | lib/rucio/core/replica.py | list_dataset_replicas_vp | bari12/rucio | python | @stream_session
def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log):
'\n List dataset replicas for a DID (scope:name) using the\n Virtual Placement service.\n\n NOTICE: This is an RnD function and might change or go away at any time.\n\n :param scope: The scope of the dataset.\n :param name: The name of the dataset.\n :param deep: Lookup at the file level.\n :param session: Database session to use.\n\n :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites\n '
vp_endpoint = get_vp_endpoint()
vp_replies = ['other']
nr_replies = 5
if (not vp_endpoint):
return vp_replies
try:
vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1)
if (vp_replies.status_code == 200):
vp_replies = vp_replies.json()
else:
vp_replies = ['other']
except requests.exceptions.RequestException as re:
logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re))
vp_replies = ['other']
if (vp_replies != ['other']):
accessible_replica_exists = False
for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session):
rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session)
if (rse_info['rse_type'] == 'TAPE'):
continue
for prot in rse_info['protocols']:
if ((prot['scheme'] == 'root') and prot['domains']['wan']['read']):
accessible_replica_exists = True
break
if (accessible_replica_exists is True):
break
if (accessible_replica_exists is True):
for vp_reply in vp_replies:
(yield {'vp': True, 'site': vp_reply}) |
@stream_session
def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None):
'\n List datasets at a RSE.\n\n :param rse: the rse id.\n :param filters: dictionary of attributes by which the results should be filtered.\n :param limit: limit number.\n :param session: Database session to use.\n\n :returns: A list of dict dataset replicas\n '
query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label('available_length'), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at).filter_by(did_type=DIDType.DATASET).filter((models.CollectionReplica.rse_id == models.RSE.id)).filter((models.RSE.id == rse_id)).filter((models.RSE.deleted == false()))
for (k, v) in ((filters and filters.items()) or []):
if ((k == 'name') or (k == 'scope')):
v_str = (v if (k != 'scope') else v.internal)
if (('*' in v_str) or ('%' in v_str)):
if (session.bind.dialect.name == 'postgresql'):
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%')))
else:
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\'))
else:
query = query.filter((getattr(models.CollectionReplica, k) == v))
elif (k == 'created_before'):
created_before = str_to_date(v)
query = query.filter((models.CollectionReplica.created_at <= created_before))
elif (k == 'created_after'):
created_after = str_to_date(v)
query = query.filter((models.CollectionReplica.created_at >= created_after))
else:
query = query.filter((getattr(models.CollectionReplica, k) == v))
if limit:
query = query.limit(limit)
for row in query:
(yield row._asdict()) | -8,708,544,273,493,562,000 | List datasets at a RSE.
:param rse: the rse id.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:param session: Database session to use.
:returns: A list of dict dataset replicas | lib/rucio/core/replica.py | list_datasets_per_rse | bari12/rucio | python | @stream_session
def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None):
'\n List datasets at a RSE.\n\n :param rse: the rse id.\n :param filters: dictionary of attributes by which the results should be filtered.\n :param limit: limit number.\n :param session: Database session to use.\n\n :returns: A list of dict dataset replicas\n '
query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label('available_length'), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at).filter_by(did_type=DIDType.DATASET).filter((models.CollectionReplica.rse_id == models.RSE.id)).filter((models.RSE.id == rse_id)).filter((models.RSE.deleted == false()))
for (k, v) in ((filters and filters.items()) or []):
if ((k == 'name') or (k == 'scope')):
v_str = (v if (k != 'scope') else v.internal)
if (('*' in v_str) or ('%' in v_str)):
if (session.bind.dialect.name == 'postgresql'):
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%')))
else:
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\'))
else:
query = query.filter((getattr(models.CollectionReplica, k) == v))
elif (k == 'created_before'):
created_before = str_to_date(v)
query = query.filter((models.CollectionReplica.created_at <= created_before))
elif (k == 'created_after'):
created_after = str_to_date(v)
query = query.filter((models.CollectionReplica.created_at >= created_after))
else:
query = query.filter((getattr(models.CollectionReplica, k) == v))
if limit:
query = query.limit(limit)
for row in query:
(yield row._asdict()) |
@transactional_session
def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None):
'\n Get update request for collection replicas.\n :param total_workers: Number of total workers.\n :param worker_number: id of the executing worker.\n :param limit: Maximum numberws to return.\n :param session: Database session in use.\n :returns: List of update requests for collection replicas.\n '
session.query(models.UpdatedCollectionReplica).filter((models.UpdatedCollectionReplica.rse_id.is_(None) & (~ exists().where(and_((models.CollectionReplica.name == models.UpdatedCollectionReplica.name), (models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope)))))).delete(synchronize_session=False)
session.query(models.UpdatedCollectionReplica).filter((models.UpdatedCollectionReplica.rse_id.isnot(None) & (~ exists().where(and_((models.CollectionReplica.name == models.UpdatedCollectionReplica.name), (models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope), (models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id)))))).delete(synchronize_session=False)
if (session.bind.dialect.name == 'oracle'):
schema = ''
if BASE.metadata.schema:
schema = (BASE.metadata.schema + '.')
session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema))
elif (session.bind.dialect.name == 'mysql'):
subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery()
subquery2 = session.query(subquery1.c.max_id).subquery()
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False)
else:
replica_update_requests = session.query(models.UpdatedCollectionReplica)
update_requests_with_rse_id = []
update_requests_without_rse_id = []
duplicate_request_ids = []
for update_request in replica_update_requests.all():
if (update_request.rse_id is not None):
small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id}
if (small_request not in update_requests_with_rse_id):
update_requests_with_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
else:
small_request = {'name': update_request.name, 'scope': update_request.scope}
if (small_request not in update_requests_without_rse_id):
update_requests_without_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
for chunk in chunks(duplicate_request_ids, 100):
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False)
query = session.query(models.UpdatedCollectionReplica)
if limit:
query = query.limit(limit)
return [update_request.to_dict() for update_request in query.all()] | -3,051,547,904,663,297,500 | Get update request for collection replicas.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum numberws to return.
:param session: Database session in use.
:returns: List of update requests for collection replicas. | lib/rucio/core/replica.py | get_cleaned_updated_collection_replicas | bari12/rucio | python | @transactional_session
def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None):
'\n Get update request for collection replicas.\n :param total_workers: Number of total workers.\n :param worker_number: id of the executing worker.\n :param limit: Maximum numberws to return.\n :param session: Database session in use.\n :returns: List of update requests for collection replicas.\n '
session.query(models.UpdatedCollectionReplica).filter((models.UpdatedCollectionReplica.rse_id.is_(None) & (~ exists().where(and_((models.CollectionReplica.name == models.UpdatedCollectionReplica.name), (models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope)))))).delete(synchronize_session=False)
session.query(models.UpdatedCollectionReplica).filter((models.UpdatedCollectionReplica.rse_id.isnot(None) & (~ exists().where(and_((models.CollectionReplica.name == models.UpdatedCollectionReplica.name), (models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope), (models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id)))))).delete(synchronize_session=False)
if (session.bind.dialect.name == 'oracle'):
schema =
if BASE.metadata.schema:
schema = (BASE.metadata.schema + '.')
session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema))
elif (session.bind.dialect.name == 'mysql'):
subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery()
subquery2 = session.query(subquery1.c.max_id).subquery()
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False)
else:
replica_update_requests = session.query(models.UpdatedCollectionReplica)
update_requests_with_rse_id = []
update_requests_without_rse_id = []
duplicate_request_ids = []
for update_request in replica_update_requests.all():
if (update_request.rse_id is not None):
small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id}
if (small_request not in update_requests_with_rse_id):
update_requests_with_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
else:
small_request = {'name': update_request.name, 'scope': update_request.scope}
if (small_request not in update_requests_without_rse_id):
update_requests_without_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
for chunk in chunks(duplicate_request_ids, 100):
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False)
query = session.query(models.UpdatedCollectionReplica)
if limit:
query = query.limit(limit)
return [update_request.to_dict() for update_request in query.all()] |
@transactional_session
def update_collection_replica(update_request, session=None):
'\n Update a collection replica.\n :param update_request: update request from the upated_col_rep table.\n '
if (update_request['rse_id'] is not None):
ds_length = 0
old_available_replicas = 0
ds_bytes = 0
ds_replica_state = None
ds_available_bytes = 0
available_replicas = 0
try:
collection_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id']).one()
ds_length = collection_replica.length
old_available_replicas = collection_replica.available_replicas_cnt
ds_bytes = collection_replica.bytes
except NoResultFound:
pass
try:
file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation).filter((models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope), (models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name), (models.DataIdentifierAssociation.name == update_request['name']), (models.RSEFileAssociation.rse_id == update_request['rse_id']), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (update_request['scope'] == models.DataIdentifierAssociation.scope)).with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count())).one()
available_replicas = file_replica.available_replicas
ds_available_bytes = file_replica.ds_available_bytes
except NoResultFound:
pass
if (available_replicas >= ds_length):
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
if ((old_available_replicas > 0) and (available_replicas == 0)):
session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id']).delete()
else:
updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id']).one()
updated_replica.state = ds_replica_state
updated_replica.available_replicas_cnt = available_replicas
updated_replica.length = ds_length
updated_replica.bytes = ds_bytes
updated_replica.available_bytes = ds_available_bytes
else:
association = session.query(models.DataIdentifierAssociation).filter_by(scope=update_request['scope'], name=update_request['name']).with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes))).one()
ds_length = association.ds_length
ds_bytes = association.ds_bytes
ds_replica_state = None
collection_replicas = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name']).all()
for collection_replica in collection_replicas:
if ds_length:
collection_replica.length = ds_length
else:
collection_replica.length = 0
if ds_bytes:
collection_replica.bytes = ds_bytes
else:
collection_replica.bytes = 0
file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation).filter((models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope), (models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name), (models.DataIdentifierAssociation.name == update_request['name']), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (update_request['scope'] == models.DataIdentifierAssociation.scope)).with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count())).group_by(models.RSEFileAssociation.rse_id).all()
for file_replica in file_replicas:
if (file_replica.available_replicas >= ds_length):
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
collection_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id).first()
if collection_replica:
collection_replica.state = ds_replica_state
collection_replica.available_replicas_cnt = file_replica.available_replicas
collection_replica.available_bytes = file_replica.ds_available_bytes
session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() | 4,138,301,468,626,472,000 | Update a collection replica.
:param update_request: update request from the upated_col_rep table. | lib/rucio/core/replica.py | update_collection_replica | bari12/rucio | python | @transactional_session
def update_collection_replica(update_request, session=None):
'\n Update a collection replica.\n :param update_request: update request from the upated_col_rep table.\n '
if (update_request['rse_id'] is not None):
ds_length = 0
old_available_replicas = 0
ds_bytes = 0
ds_replica_state = None
ds_available_bytes = 0
available_replicas = 0
try:
collection_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id']).one()
ds_length = collection_replica.length
old_available_replicas = collection_replica.available_replicas_cnt
ds_bytes = collection_replica.bytes
except NoResultFound:
pass
try:
file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation).filter((models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope), (models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name), (models.DataIdentifierAssociation.name == update_request['name']), (models.RSEFileAssociation.rse_id == update_request['rse_id']), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (update_request['scope'] == models.DataIdentifierAssociation.scope)).with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count())).one()
available_replicas = file_replica.available_replicas
ds_available_bytes = file_replica.ds_available_bytes
except NoResultFound:
pass
if (available_replicas >= ds_length):
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
if ((old_available_replicas > 0) and (available_replicas == 0)):
session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id']).delete()
else:
updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id']).one()
updated_replica.state = ds_replica_state
updated_replica.available_replicas_cnt = available_replicas
updated_replica.length = ds_length
updated_replica.bytes = ds_bytes
updated_replica.available_bytes = ds_available_bytes
else:
association = session.query(models.DataIdentifierAssociation).filter_by(scope=update_request['scope'], name=update_request['name']).with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes))).one()
ds_length = association.ds_length
ds_bytes = association.ds_bytes
ds_replica_state = None
collection_replicas = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name']).all()
for collection_replica in collection_replicas:
if ds_length:
collection_replica.length = ds_length
else:
collection_replica.length = 0
if ds_bytes:
collection_replica.bytes = ds_bytes
else:
collection_replica.bytes = 0
file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation).filter((models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope), (models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name), (models.DataIdentifierAssociation.name == update_request['name']), (models.RSEFileAssociation.state == ReplicaState.AVAILABLE), (update_request['scope'] == models.DataIdentifierAssociation.scope)).with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count())).group_by(models.RSEFileAssociation.rse_id).all()
for file_replica in file_replicas:
if (file_replica.available_replicas >= ds_length):
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
collection_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id).first()
if collection_replica:
collection_replica.state = ds_replica_state
collection_replica.available_replicas_cnt = file_replica.available_replicas
collection_replica.available_bytes = file_replica.ds_available_bytes
session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() |
@read_session
def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None):
"\n Returns a list of bad PFNs\n\n :param limit: The maximum number of replicas returned.\n :param thread: The assigned thread for this minos instance.\n :param total_threads: The total number of minos threads.\n :param session: The database session in use.\n\n returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}\n "
result = []
query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path')
query.order_by(models.BadPFNs.created_at)
query = query.limit(limit)
for (path, state, reason, account, expires_at) in query.yield_per(1000):
result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at})
return result | -8,924,492,310,788,700,000 | Returns a list of bad PFNs
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this minos instance.
:param total_threads: The total number of minos threads.
:param session: The database session in use.
returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} | lib/rucio/core/replica.py | get_bad_pfns | bari12/rucio | python | @read_session
def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None):
"\n Returns a list of bad PFNs\n\n :param limit: The maximum number of replicas returned.\n :param thread: The assigned thread for this minos instance.\n :param total_threads: The total number of minos threads.\n :param session: The database session in use.\n\n returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}\n "
result = []
query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path')
query.order_by(models.BadPFNs.created_at)
query = query.limit(limit)
for (path, state, reason, account, expires_at) in query.yield_per(1000):
result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at})
return result |
@transactional_session
def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None):
'\n Bulk add new bad replicas.\n\n :param replicas: the list of bad replicas.\n :param account: The account who declared the bad replicas.\n :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE).\n :param session: The database session in use.\n\n :returns: True is successful.\n '
for replica in replicas:
insert_new_row = True
if (state == BadFilesStatus.TEMPORARY_UNAVAILABLE):
query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state)
if query.count():
query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False)
insert_new_row = False
if insert_new_row:
new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at)
new_bad_replica.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True | 4,175,332,033,690,639,000 | Bulk add new bad replicas.
:param replicas: the list of bad replicas.
:param account: The account who declared the bad replicas.
:param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE).
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | bulk_add_bad_replicas | bari12/rucio | python | @transactional_session
def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None):
'\n Bulk add new bad replicas.\n\n :param replicas: the list of bad replicas.\n :param account: The account who declared the bad replicas.\n :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE).\n :param session: The database session in use.\n\n :returns: True is successful.\n '
for replica in replicas:
insert_new_row = True
if (state == BadFilesStatus.TEMPORARY_UNAVAILABLE):
query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state)
if query.count():
query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False)
insert_new_row = False
if insert_new_row:
new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at)
new_bad_replica.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True |
@transactional_session
def bulk_delete_bad_pfns(pfns, session=None):
'\n Bulk delete bad PFNs.\n\n :param pfns: the list of new files.\n :param session: The database session in use.\n\n :returns: True is successful.\n '
pfn_clause = []
for pfn in pfns:
pfn_clause.append((models.BadPFNs.path == pfn))
for chunk in chunks(pfn_clause, 100):
query = session.query(models.BadPFNs).filter(or_(*chunk))
query.delete(synchronize_session=False)
return True | -2,324,208,928,254,189,600 | Bulk delete bad PFNs.
:param pfns: the list of new files.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | bulk_delete_bad_pfns | bari12/rucio | python | @transactional_session
def bulk_delete_bad_pfns(pfns, session=None):
'\n Bulk delete bad PFNs.\n\n :param pfns: the list of new files.\n :param session: The database session in use.\n\n :returns: True is successful.\n '
pfn_clause = []
for pfn in pfns:
pfn_clause.append((models.BadPFNs.path == pfn))
for chunk in chunks(pfn_clause, 100):
query = session.query(models.BadPFNs).filter(or_(*chunk))
query.delete(synchronize_session=False)
return True |
@transactional_session
def bulk_delete_bad_replicas(bad_replicas, session=None):
'\n Bulk delete bad replica.\n\n :param bad_replicas: The list of bad replicas to delete (Dictionaries).\n :param session: The database session in use.\n\n :returns: True is successful.\n '
replica_clause = []
for replica in bad_replicas:
replica_clause.append(and_((models.BadReplicas.scope == replica['scope']), (models.BadReplicas.name == replica['name']), (models.BadReplicas.rse_id == replica['rse_id']), (models.BadReplicas.state == replica['state'])))
for chunk in chunks(replica_clause, 100):
session.query(models.BadReplicas).filter(or_(*chunk)).delete(synchronize_session=False)
return True | 2,723,186,127,305,752,600 | Bulk delete bad replica.
:param bad_replicas: The list of bad replicas to delete (Dictionaries).
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | bulk_delete_bad_replicas | bari12/rucio | python | @transactional_session
def bulk_delete_bad_replicas(bad_replicas, session=None):
'\n Bulk delete bad replica.\n\n :param bad_replicas: The list of bad replicas to delete (Dictionaries).\n :param session: The database session in use.\n\n :returns: True is successful.\n '
replica_clause = []
for replica in bad_replicas:
replica_clause.append(and_((models.BadReplicas.scope == replica['scope']), (models.BadReplicas.name == replica['name']), (models.BadReplicas.rse_id == replica['rse_id']), (models.BadReplicas.state == replica['state'])))
for chunk in chunks(replica_clause, 100):
session.query(models.BadReplicas).filter(or_(*chunk)).delete(synchronize_session=False)
return True |
@transactional_session
def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None):
'\n Add bad PFNs.\n\n :param pfns: the list of new files.\n :param account: The account who declared the bad replicas.\n :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.\n :param reason: A string describing the reason of the loss.\n :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.\n :param session: The database session in use.\n\n :returns: True is successful.\n '
if isinstance(state, string_types):
rep_state = BadPFNStatus[state]
else:
rep_state = state
pfns = clean_surls(pfns)
for pfn in pfns:
new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at)
new_pfn = session.merge(new_pfn)
new_pfn.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.Duplicate('One PFN already exists!')
raise exception.RucioException(error.args)
return True | 1,059,017,633,819,548,800 | Add bad PFNs.
:param pfns: the list of new files.
:param account: The account who declared the bad replicas.
:param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.
:param reason: A string describing the reason of the loss.
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:param session: The database session in use.
:returns: True is successful. | lib/rucio/core/replica.py | add_bad_pfns | bari12/rucio | python | @transactional_session
def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None):
'\n Add bad PFNs.\n\n :param pfns: the list of new files.\n :param account: The account who declared the bad replicas.\n :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.\n :param reason: A string describing the reason of the loss.\n :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.\n :param session: The database session in use.\n\n :returns: True is successful.\n '
if isinstance(state, string_types):
rep_state = BadPFNStatus[state]
else:
rep_state = state
pfns = clean_surls(pfns)
for pfn in pfns:
new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at)
new_pfn = session.merge(new_pfn)
new_pfn.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.Duplicate('One PFN already exists!')
raise exception.RucioException(error.args)
return True |
@read_session
def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None):
'\n List the expired temporary unavailable replicas\n\n :param total_workers: Number of total workers.\n :param worker_number: id of the executing worker.\n :param limit: The maximum number of replicas returned.\n :param session: The database session in use.\n '
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).filter((models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE)).filter((models.BadReplicas.expires_at < datetime.utcnow())).with_hint(models.ReplicationRule, 'index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)', 'oracle').order_by(models.BadReplicas.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
query = query.limit(limit)
return query.all() | 4,703,407,185,827,953,000 | List the expired temporary unavailable replicas
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: The maximum number of replicas returned.
:param session: The database session in use. | lib/rucio/core/replica.py | list_expired_temporary_unavailable_replicas | bari12/rucio | python | @read_session
def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None):
'\n List the expired temporary unavailable replicas\n\n :param total_workers: Number of total workers.\n :param worker_number: id of the executing worker.\n :param limit: The maximum number of replicas returned.\n :param session: The database session in use.\n '
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).filter((models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE)).filter((models.BadReplicas.expires_at < datetime.utcnow())).with_hint(models.ReplicationRule, 'index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)', 'oracle').order_by(models.BadReplicas.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
query = query.limit(limit)
return query.all() |
@read_session
def get_replicas_state(scope=None, name=None, session=None):
'\n Method used by the necromancer to get all the replicas of a DIDs\n :param scope: The scope of the file.\n :param name: The name of the file.\n :param session: The database session in use.\n\n :returns: A dictionary with the list of states as keys and the rse_ids as value\n '
query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name)
states = {}
for res in query.all():
(rse_id, state) = res
if (state not in states):
states[state] = []
states[state].append(rse_id)
return states | -4,963,462,164,844,283,000 | Method used by the necromancer to get all the replicas of a DIDs
:param scope: The scope of the file.
:param name: The name of the file.
:param session: The database session in use.
:returns: A dictionary with the list of states as keys and the rse_ids as value | lib/rucio/core/replica.py | get_replicas_state | bari12/rucio | python | @read_session
def get_replicas_state(scope=None, name=None, session=None):
'\n Method used by the necromancer to get all the replicas of a DIDs\n :param scope: The scope of the file.\n :param name: The name of the file.\n :param session: The database session in use.\n\n :returns: A dictionary with the list of states as keys and the rse_ids as value\n '
query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name)
states = {}
for res in query.all():
(rse_id, state) = res
if (state not in states):
states[state] = []
states[state].append(rse_id)
return states |
@read_session
def get_suspicious_files(rse_expression, filter=None, **kwargs):
"\n Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date,\n present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list.\n Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or\n be declared as <is_suspicious> in the bad_replicas table.\n Keyword Arguments:\n :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.\n :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.\n :param rse_expression: The RSE expression where the replicas are located.\n :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}\n :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list\n was declared for a replica since younger_than date. Allowed values\n = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS').\n :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE\n than the one in the bad_replicas table will be taken into account. Default value = False.\n :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False.\n :param session: The database session in use. Default value = None.\n\n :returns: a list of replicas:\n [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...]\n "
younger_than = kwargs.get('younger_than', (datetime.now() - timedelta(days=10)))
nattempts = kwargs.get('nattempts', 0)
session = kwargs.get('session', None)
exclude_states = kwargs.get('exclude_states', ['B', 'R', 'D'])
available_elsewhere = kwargs.get('available_elsewhere', False)
is_suspicious = kwargs.get('is_suspicious', False)
if (not isinstance(nattempts, int)):
nattempts = 0
if (not isinstance(younger_than, datetime)):
younger_than = (datetime.now() - timedelta(days=10))
exclude_states_clause = []
for state in exclude_states:
exclude_states_clause.append(BadFilesStatus(state))
bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias')
replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias')
rse_clause = []
if rse_expression:
parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session)
for rse in parsedexp:
rse_clause.append((models.RSEFileAssociation.rse_id == rse['id']))
query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at)).filter((models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id), (models.RSEFileAssociation.scope == bad_replicas_alias.scope), (models.RSEFileAssociation.name == bad_replicas_alias.name), (bad_replicas_alias.created_at >= younger_than))
if is_suspicious:
query.filter((bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS))
if rse_clause:
query = query.filter(or_(*rse_clause))
if available_elsewhere:
available_replica = exists(select([1]).where(and_((replicas_alias.state == ReplicaState.AVAILABLE), (replicas_alias.scope == bad_replicas_alias.scope), (replicas_alias.name == bad_replicas_alias.name), (replicas_alias.rse_id != bad_replicas_alias.rse_id))))
query = query.filter(available_replica)
other_states_present = exists(select([1]).where(and_((models.BadReplicas.scope == bad_replicas_alias.scope), (models.BadReplicas.name == bad_replicas_alias.name), (models.BadReplicas.created_at >= younger_than), (models.BadReplicas.rse_id == bad_replicas_alias.rse_id), models.BadReplicas.state.in_(exclude_states_clause))))
query = query.filter(not_(other_states_present))
query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having((func.count() > nattempts)).all()
result = []
rses = {}
for (cnt, scope, name, rse_id, created_at) in query_result:
if (rse_id not in rses):
rse = get_rse_name(rse_id=rse_id, session=session)
rses[rse_id] = rse
result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at})
return result | 7,221,747,538,460,163,000 | Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date,
present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list.
Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or
be declared as <is_suspicious> in the bad_replicas table.
Keyword Arguments:
:param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.
:param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.
:param rse_expression: The RSE expression where the replicas are located.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list
was declared for a replica since younger_than date. Allowed values
= ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS').
:param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE
than the one in the bad_replicas table will be taken into account. Default value = False.
:param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False.
:param session: The database session in use. Default value = None.
:returns: a list of replicas:
[{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] | lib/rucio/core/replica.py | get_suspicious_files | bari12/rucio | python | @read_session
def get_suspicious_files(rse_expression, filter=None, **kwargs):
"\n Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date,\n present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list.\n Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or\n be declared as <is_suspicious> in the bad_replicas table.\n Keyword Arguments:\n :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.\n :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.\n :param rse_expression: The RSE expression where the replicas are located.\n :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}\n :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list\n was declared for a replica since younger_than date. Allowed values\n = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS').\n :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE\n than the one in the bad_replicas table will be taken into account. Default value = False.\n :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False.\n :param session: The database session in use. Default value = None.\n\n :returns: a list of replicas:\n [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...]\n "
younger_than = kwargs.get('younger_than', (datetime.now() - timedelta(days=10)))
nattempts = kwargs.get('nattempts', 0)
session = kwargs.get('session', None)
exclude_states = kwargs.get('exclude_states', ['B', 'R', 'D'])
available_elsewhere = kwargs.get('available_elsewhere', False)
is_suspicious = kwargs.get('is_suspicious', False)
if (not isinstance(nattempts, int)):
nattempts = 0
if (not isinstance(younger_than, datetime)):
younger_than = (datetime.now() - timedelta(days=10))
exclude_states_clause = []
for state in exclude_states:
exclude_states_clause.append(BadFilesStatus(state))
bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias')
replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias')
rse_clause = []
if rse_expression:
parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session)
for rse in parsedexp:
rse_clause.append((models.RSEFileAssociation.rse_id == rse['id']))
query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at)).filter((models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id), (models.RSEFileAssociation.scope == bad_replicas_alias.scope), (models.RSEFileAssociation.name == bad_replicas_alias.name), (bad_replicas_alias.created_at >= younger_than))
if is_suspicious:
query.filter((bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS))
if rse_clause:
query = query.filter(or_(*rse_clause))
if available_elsewhere:
available_replica = exists(select([1]).where(and_((replicas_alias.state == ReplicaState.AVAILABLE), (replicas_alias.scope == bad_replicas_alias.scope), (replicas_alias.name == bad_replicas_alias.name), (replicas_alias.rse_id != bad_replicas_alias.rse_id))))
query = query.filter(available_replica)
other_states_present = exists(select([1]).where(and_((models.BadReplicas.scope == bad_replicas_alias.scope), (models.BadReplicas.name == bad_replicas_alias.name), (models.BadReplicas.created_at >= younger_than), (models.BadReplicas.rse_id == bad_replicas_alias.rse_id), models.BadReplicas.state.in_(exclude_states_clause))))
query = query.filter(not_(other_states_present))
query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having((func.count() > nattempts)).all()
result = []
rses = {}
for (cnt, scope, name, rse_id, created_at) in query_result:
if (rse_id not in rses):
rse = get_rse_name(rse_id=rse_id, session=session)
rses[rse_id] = rse
result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at})
return result |
@transactional_session
def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None):
'\n Sets a tombstone on a replica.\n\n :param rse_id: ID of RSE.\n :param scope: scope of the replica DID.\n :param name: name of the replica DID.\n :param tombstone: the tombstone to set. Default is OBSOLETE\n :param session: database session in use.\n '
rowcount = session.query(models.RSEFileAssociation).filter(and_((models.RSEFileAssociation.rse_id == rse_id), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.scope == scope), (~ exists().where(and_((models.ReplicaLock.rse_id == rse_id), (models.ReplicaLock.name == name), (models.ReplicaLock.scope == scope)))))).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False)
if (rowcount == 0):
try:
session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one()
raise exception.ReplicaIsLocked(('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))))
except NoResultFound:
raise exception.ReplicaNotFound(('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))) | -6,601,409,596,648,705,000 | Sets a tombstone on a replica.
:param rse_id: ID of RSE.
:param scope: scope of the replica DID.
:param name: name of the replica DID.
:param tombstone: the tombstone to set. Default is OBSOLETE
:param session: database session in use. | lib/rucio/core/replica.py | set_tombstone | bari12/rucio | python | @transactional_session
def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None):
'\n Sets a tombstone on a replica.\n\n :param rse_id: ID of RSE.\n :param scope: scope of the replica DID.\n :param name: name of the replica DID.\n :param tombstone: the tombstone to set. Default is OBSOLETE\n :param session: database session in use.\n '
rowcount = session.query(models.RSEFileAssociation).filter(and_((models.RSEFileAssociation.rse_id == rse_id), (models.RSEFileAssociation.name == name), (models.RSEFileAssociation.scope == scope), (~ exists().where(and_((models.ReplicaLock.rse_id == rse_id), (models.ReplicaLock.name == name), (models.ReplicaLock.scope == scope)))))).with_hint(models.RSEFileAssociation, 'index(REPLICAS REPLICAS_PK)', 'oracle').update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False)
if (rowcount == 0):
try:
session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one()
raise exception.ReplicaIsLocked(('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))))
except NoResultFound:
raise exception.ReplicaNotFound(('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))) |
@read_session
def get_RSEcoverage_of_dataset(scope, name, session=None):
'\n Get total bytes present on RSEs\n\n :param scope: Scope of the dataset\n :param name: Name of the dataset\n :param session: The db session.\n :return: Dictionary { rse_id : <total bytes present at rse_id> }\n '
query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes))
query = query.filter(and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)))
query = query.group_by(models.RSEFileAssociation.rse_id)
result = {}
for (rse_id, total) in query:
if total:
result[rse_id] = total
return result | 4,917,449,103,829,381,000 | Get total bytes present on RSEs
:param scope: Scope of the dataset
:param name: Name of the dataset
:param session: The db session.
:return: Dictionary { rse_id : <total bytes present at rse_id> } | lib/rucio/core/replica.py | get_RSEcoverage_of_dataset | bari12/rucio | python | @read_session
def get_RSEcoverage_of_dataset(scope, name, session=None):
'\n Get total bytes present on RSEs\n\n :param scope: Scope of the dataset\n :param name: Name of the dataset\n :param session: The db session.\n :return: Dictionary { rse_id : <total bytes present at rse_id> }\n '
query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes))
query = query.filter(and_((models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope), (models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name), (models.DataIdentifierAssociation.scope == scope), (models.DataIdentifierAssociation.name == name), (models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)))
query = query.group_by(models.RSEFileAssociation.rse_id)
result = {}
for (rse_id, total) in query:
if total:
result[rse_id] = total
return result |
def __init__(self, errors=None):
'ErrorList - a model defined in Swagger'
self._errors = None
self.discriminator = None
if (errors is not None):
self.errors = errors | -8,961,291,312,829,862,000 | ErrorList - a model defined in Swagger | controlm_py/models/error_list.py | __init__ | dcompane/controlm_py | python | def __init__(self, errors=None):
self._errors = None
self.discriminator = None
if (errors is not None):
self.errors = errors |
@property
def errors(self):
'Gets the errors of this ErrorList. # noqa: E501\n\n\n :return: The errors of this ErrorList. # noqa: E501\n :rtype: list[ErrorData]\n '
return self._errors | 5,822,717,732,938,109,000 | Gets the errors of this ErrorList. # noqa: E501
:return: The errors of this ErrorList. # noqa: E501
:rtype: list[ErrorData] | controlm_py/models/error_list.py | errors | dcompane/controlm_py | python | @property
def errors(self):
'Gets the errors of this ErrorList. # noqa: E501\n\n\n :return: The errors of this ErrorList. # noqa: E501\n :rtype: list[ErrorData]\n '
return self._errors |
@errors.setter
def errors(self, errors):
'Sets the errors of this ErrorList.\n\n\n :param errors: The errors of this ErrorList. # noqa: E501\n :type: list[ErrorData]\n '
self._errors = errors | -2,227,954,922,095,561,200 | Sets the errors of this ErrorList.
:param errors: The errors of this ErrorList. # noqa: E501
:type: list[ErrorData] | controlm_py/models/error_list.py | errors | dcompane/controlm_py | python | @errors.setter
def errors(self, errors):
'Sets the errors of this ErrorList.\n\n\n :param errors: The errors of this ErrorList. # noqa: E501\n :type: list[ErrorData]\n '
self._errors = errors |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(ErrorList, dict):
for (key, value) in self.items():
result[key] = value
return result | 3,112,151,779,715,025,400 | Returns the model properties as a dict | controlm_py/models/error_list.py | to_dict | dcompane/controlm_py | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(ErrorList, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | controlm_py/models/error_list.py | to_str | dcompane/controlm_py | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | controlm_py/models/error_list.py | __repr__ | dcompane/controlm_py | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ErrorList)):
return False
return (self.__dict__ == other.__dict__) | -3,619,122,469,386,630,000 | Returns true if both objects are equal | controlm_py/models/error_list.py | __eq__ | dcompane/controlm_py | python | def __eq__(self, other):
if (not isinstance(other, ErrorList)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | controlm_py/models/error_list.py | __ne__ | dcompane/controlm_py | python | def __ne__(self, other):
return (not (self == other)) |
def __call__(self, p):
'\n Return the value of the link function. This is just a placeholder.\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g(p) : array_like\n The value of the link function g(p) = z\n '
return NotImplementedError | 3,439,035,475,943,891,500 | Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g(p) : array_like
The value of the link function g(p) = z | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n Return the value of the link function. This is just a placeholder.\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g(p) : array_like\n The value of the link function g(p) = z\n '
return NotImplementedError |
def inverse(self, z):
'\n Inverse of the link function. Just a placeholder.\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor of the transformed variable\n in the IRLS algorithm for GLM.\n\n Returns\n -------\n g^(-1)(z) : ndarray\n The value of the inverse of the link function g^(-1)(z) = p\n '
return NotImplementedError | 589,133,332,174,300,900 | Inverse of the link function. Just a placeholder.
Parameters
----------
z : array_like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : ndarray
The value of the inverse of the link function g^(-1)(z) = p | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of the link function. Just a placeholder.\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor of the transformed variable\n in the IRLS algorithm for GLM.\n\n Returns\n -------\n g^(-1)(z) : ndarray\n The value of the inverse of the link function g^(-1)(z) = p\n '
return NotImplementedError |
def deriv(self, p):
"\n Derivative of the link function g'(p). Just a placeholder.\n\n Parameters\n ----------\n p : array_like\n\n Returns\n -------\n g'(p) : ndarray\n The value of the derivative of the link function g'(p)\n "
return NotImplementedError | -7,053,478,128,397,519,000 | Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array_like
Returns
-------
g'(p) : ndarray
The value of the derivative of the link function g'(p) | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of the link function g'(p). Just a placeholder.\n\n Parameters\n ----------\n p : array_like\n\n Returns\n -------\n g'(p) : ndarray\n The value of the derivative of the link function g'(p)\n "
return NotImplementedError |
def deriv2(self, p):
"Second derivative of the link function g''(p)\n\n implemented through numerical differentiation\n "
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
return _approx_fprime_cs_scalar(p, self.deriv) | 8,712,612,717,014,624,000 | Second derivative of the link function g''(p)
implemented through numerical differentiation | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"Second derivative of the link function g(p)\n\n implemented through numerical differentiation\n "
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
return _approx_fprime_cs_scalar(p, self.deriv) |
def inverse_deriv(self, z):
"\n Derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the derivative of the inverse of the link function\n\n Notes\n -----\n This reference implementation gives the correct result but is\n inefficient, so it can be overridden in subclasses.\n "
return (1 / self.deriv(self.inverse(z))) | 9,001,918,539,944,982,000 | Derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the link function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses. | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the derivative of the inverse of the link function\n\n Notes\n -----\n This reference implementation gives the correct result but is\n inefficient, so it can be overridden in subclasses.\n "
return (1 / self.deriv(self.inverse(z))) |
def inverse_deriv2(self, z):
"\n Second derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the second derivative of the inverse of the link\n function\n\n Notes\n -----\n This reference implementation gives the correct result but is\n inefficient, so it can be overridden in subclasses.\n "
iz = self.inverse(z)
return ((- self.deriv2(iz)) / (self.deriv(iz) ** 3)) | -3,968,055,418,984,185,300 | Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses. | statsmodels/genmod/families/links.py | inverse_deriv2 | BioGeneTools/statsmodels | python | def inverse_deriv2(self, z):
"\n Second derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the second derivative of the inverse of the link\n function\n\n Notes\n -----\n This reference implementation gives the correct result but is\n inefficient, so it can be overridden in subclasses.\n "
iz = self.inverse(z)
return ((- self.deriv2(iz)) / (self.deriv(iz) ** 3)) |
def _clean(self, p):
'\n Clip logistic values to range (eps, 1-eps)\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n pclip : ndarray\n Clipped probabilities\n '
return np.clip(p, FLOAT_EPS, (1.0 - FLOAT_EPS)) | -3,440,027,265,344,145,400 | Clip logistic values to range (eps, 1-eps)
Parameters
----------
p : array_like
Probabilities
Returns
-------
pclip : ndarray
Clipped probabilities | statsmodels/genmod/families/links.py | _clean | BioGeneTools/statsmodels | python | def _clean(self, p):
'\n Clip logistic values to range (eps, 1-eps)\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n pclip : ndarray\n Clipped probabilities\n '
return np.clip(p, FLOAT_EPS, (1.0 - FLOAT_EPS)) |
def __call__(self, p):
'\n The logit transform\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n z : ndarray\n Logit transform of `p`\n\n Notes\n -----\n g(p) = log(p / (1 - p))\n '
p = self._clean(p)
return np.log((p / (1.0 - p))) | 240,232,997,412,097,760 | The logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
z : ndarray
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p)) | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n The logit transform\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n z : ndarray\n Logit transform of `p`\n\n Notes\n -----\n g(p) = log(p / (1 - p))\n '
p = self._clean(p)
return np.log((p / (1.0 - p))) |
def inverse(self, z):
'\n Inverse of the logit transform\n\n Parameters\n ----------\n z : array_like\n The value of the logit transform at `p`\n\n Returns\n -------\n p : ndarray\n Probabilities\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(1+exp(z))\n '
z = np.asarray(z)
t = np.exp((- z))
return (1.0 / (1.0 + t)) | 5,521,868,919,720,613,000 | Inverse of the logit transform
Parameters
----------
z : array_like
The value of the logit transform at `p`
Returns
-------
p : ndarray
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z)) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of the logit transform\n\n Parameters\n ----------\n z : array_like\n The value of the logit transform at `p`\n\n Returns\n -------\n p : ndarray\n Probabilities\n\n Notes\n -----\n g^(-1)(z) = exp(z)/(1+exp(z))\n '
z = np.asarray(z)
t = np.exp((- z))
return (1.0 / (1.0 + t)) |
def deriv(self, p):
"\n Derivative of the logit transform\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g'(p) : ndarray\n Value of the derivative of logit transform at `p`\n\n Notes\n -----\n g'(p) = 1 / (p * (1 - p))\n\n Alias for `Logit`:\n logit = Logit()\n "
p = self._clean(p)
return (1.0 / (p * (1 - p))) | -3,496,373,472,863,382,500 | Derivative of the logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
g'(p) : ndarray
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit() | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of the logit transform\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g'(p) : ndarray\n Value of the derivative of logit transform at `p`\n\n Notes\n -----\n g'(p) = 1 / (p * (1 - p))\n\n Alias for `Logit`:\n logit = Logit()\n "
p = self._clean(p)
return (1.0 / (p * (1 - p))) |
def inverse_deriv(self, z):
"\n Derivative of the inverse of the logit transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the derivative of the inverse of the logit function\n "
t = np.exp(z)
return (t / ((1 + t) ** 2)) | -8,336,254,912,463,244,000 | Derivative of the inverse of the logit transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the logit function | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse of the logit transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g'^(-1)(z) : ndarray\n The value of the derivative of the inverse of the logit function\n "
t = np.exp(z)
return (t / ((1 + t) ** 2)) |
def deriv2(self, p):
"\n Second derivative of the logit function.\n\n Parameters\n ----------\n p : array_like\n probabilities\n\n Returns\n -------\n g''(z) : ndarray\n The value of the second derivative of the logit function\n "
v = (p * (1 - p))
return (((2 * p) - 1) / (v ** 2)) | -3,825,766,102,883,364,400 | Second derivative of the logit function.
Parameters
----------
p : array_like
probabilities
Returns
-------
g''(z) : ndarray
The value of the second derivative of the logit function | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the logit function.\n\n Parameters\n ----------\n p : array_like\n probabilities\n\n Returns\n -------\n g(z) : ndarray\n The value of the second derivative of the logit function\n "
v = (p * (1 - p))
return (((2 * p) - 1) / (v ** 2)) |
def __call__(self, p):
'\n Power transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : array_like\n Power transform of x\n\n Notes\n -----\n g(p) = x**self.power\n '
if (self.power == 1):
return p
else:
return np.power(p, self.power) | 6,490,001,479,540,673,000 | Power transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array_like
Power transform of x
Notes
-----
g(p) = x**self.power | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n Power transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : array_like\n Power transform of x\n\n Notes\n -----\n g(p) = x**self.power\n '
if (self.power == 1):
return p
else:
return np.power(p, self.power) |
def inverse(self, z):
'\n Inverse of the power transform link function\n\n Parameters\n ----------\n `z` : array_like\n Value of the transformed mean parameters at `p`\n\n Returns\n -------\n `p` : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(z`) = `z`**(1/`power`)\n '
if (self.power == 1):
return z
else:
return np.power(z, (1.0 / self.power)) | 766,649,145,307,345,200 | Inverse of the power transform link function
Parameters
----------
`z` : array_like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : ndarray
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of the power transform link function\n\n Parameters\n ----------\n `z` : array_like\n Value of the transformed mean parameters at `p`\n\n Returns\n -------\n `p` : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(z`) = `z`**(1/`power`)\n '
if (self.power == 1):
return z
else:
return np.power(z, (1.0 / self.power)) |
def deriv(self, p):
"\n Derivative of the power transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n Derivative of power transform of `p`\n\n Notes\n -----\n g'(`p`) = `power` * `p`**(`power` - 1)\n "
if (self.power == 1):
return np.ones_like(p)
else:
return (self.power * np.power(p, (self.power - 1))) | -4,093,614,081,125,798,000 | Derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1) | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of the power transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n Derivative of power transform of `p`\n\n Notes\n -----\n g'(`p`) = `power` * `p`**(`power` - 1)\n "
if (self.power == 1):
return np.ones_like(p)
else:
return (self.power * np.power(p, (self.power - 1))) |
def deriv2(self, p):
"\n Second derivative of the power transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n Second derivative of the power transform of `p`\n\n Notes\n -----\n g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)\n "
if (self.power == 1):
return np.zeros_like(p)
else:
return ((self.power * (self.power - 1)) * np.power(p, (self.power - 2))) | -2,521,458,613,253,394,400 | Second derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2) | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the power transform\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g(p) : ndarray\n Second derivative of the power transform of `p`\n\n Notes\n -----\n g(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)\n "
if (self.power == 1):
return np.zeros_like(p)
else:
return ((self.power * (self.power - 1)) * np.power(p, (self.power - 2))) |
def inverse_deriv(self, z):
"\n Derivative of the inverse of the power transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the power transform\n function\n "
if (self.power == 1):
return np.ones_like(z)
else:
return (np.power(z, ((1 - self.power) / self.power)) / self.power) | 8,179,986,447,003,525,000 | Derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse of the power transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the power transform\n function\n "
if (self.power == 1):
return np.ones_like(z)
else:
return (np.power(z, ((1 - self.power) / self.power)) / self.power) |
def inverse_deriv2(self, z):
"\n Second derivative of the inverse of the power transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the power transform\n function\n "
if (self.power == 1):
return np.zeros_like(z)
else:
return (((1 - self.power) * np.power(z, ((1 - (2 * self.power)) / self.power))) / (self.power ** 2)) | 4,198,307,413,071,056,000 | Second derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function | statsmodels/genmod/families/links.py | inverse_deriv2 | BioGeneTools/statsmodels | python | def inverse_deriv2(self, z):
"\n Second derivative of the inverse of the power transform\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the power transform\n function\n "
if (self.power == 1):
return np.zeros_like(z)
else:
return (((1 - self.power) * np.power(z, ((1 - (2 * self.power)) / self.power))) / (self.power ** 2)) |
def __call__(self, p, **extra):
'\n Log transform link function\n\n Parameters\n ----------\n x : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n log(x)\n\n Notes\n -----\n g(p) = log(p)\n '
x = self._clean(p)
return np.log(x) | -9,197,943,716,248,332,000 | Log transform link function
Parameters
----------
x : array_like
Mean parameters
Returns
-------
z : ndarray
log(x)
Notes
-----
g(p) = log(p) | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p, **extra):
'\n Log transform link function\n\n Parameters\n ----------\n x : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n log(x)\n\n Notes\n -----\n g(p) = log(p)\n '
x = self._clean(p)
return np.log(x) |
def inverse(self, z):
'\n Inverse of log transform link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n p : ndarray\n The mean probabilities given the value of the inverse `z`\n\n Notes\n -----\n g^{-1}(z) = exp(z)\n '
return np.exp(z) | -7,004,327,152,976,738,000 | Inverse of log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
p : ndarray
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of log transform link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n p : ndarray\n The mean probabilities given the value of the inverse `z`\n\n Notes\n -----\n g^{-1}(z) = exp(z)\n '
return np.exp(z) |
def deriv(self, p):
"\n Derivative of log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n derivative of log transform of x\n\n Notes\n -----\n g'(x) = 1/x\n "
p = self._clean(p)
return (1.0 / p) | 4,113,575,122,088,470,500 | Derivative of log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
derivative of log transform of x
Notes
-----
g'(x) = 1/x | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n derivative of log transform of x\n\n Notes\n -----\n g'(x) = 1/x\n "
p = self._clean(p)
return (1.0 / p) |
def deriv2(self, p):
"\n Second derivative of the log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n Second derivative of log transform of x\n\n Notes\n -----\n g''(x) = -1/x^2\n "
p = self._clean(p)
return ((- 1.0) / (p ** 2)) | 5,554,803,495,933,919,000 | Second derivative of the log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2 | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g(p) : ndarray\n Second derivative of log transform of x\n\n Notes\n -----\n g(x) = -1/x^2\n "
p = self._clean(p)
return ((- 1.0) / (p ** 2)) |
def inverse_deriv(self, z):
"\n Derivative of the inverse of the log transform link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the log function,\n the exponential function\n "
return np.exp(z) | -7,386,214,934,868,334,000 | Derivative of the inverse of the log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the log function,
the exponential function | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse of the log transform link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the log function,\n the exponential function\n "
return np.exp(z) |
def __call__(self, p):
'\n CDF link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n (ppf) inverse of CDF transform of p\n\n Notes\n -----\n g(`p`) = `dbn`.ppf(`p`)\n '
p = self._clean(p)
return self.dbn.ppf(p) | -3,896,929,312,686,040,600 | CDF link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`) | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n CDF link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n z : ndarray\n (ppf) inverse of CDF transform of p\n\n Notes\n -----\n g(`p`) = `dbn`.ppf(`p`)\n '
p = self._clean(p)
return self.dbn.ppf(p) |
def inverse(self, z):
'\n The inverse of the CDF link\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean probabilities. The value of the inverse of CDF link of `z`\n\n Notes\n -----\n g^(-1)(`z`) = `dbn`.cdf(`z`)\n '
return self.dbn.cdf(z) | -6,597,804,467,319,204,000 | The inverse of the CDF link
Parameters
----------
z : array_like
The value of the inverse of the link function at `p`
Returns
-------
p : ndarray
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n The inverse of the CDF link\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean probabilities. The value of the inverse of CDF link of `z`\n\n Notes\n -----\n g^(-1)(`z`) = `dbn`.cdf(`z`)\n '
return self.dbn.cdf(z) |
def deriv(self, p):
"\n Derivative of CDF link\n\n Parameters\n ----------\n p : array_like\n mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of CDF transform at `p`\n\n Notes\n -----\n g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))\n "
p = self._clean(p)
return (1.0 / self.dbn.pdf(self.dbn.ppf(p))) | -9,205,382,292,249,141,000 | Derivative of CDF link
Parameters
----------
p : array_like
mean parameters
Returns
-------
g'(p) : ndarray
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`)) | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of CDF link\n\n Parameters\n ----------\n p : array_like\n mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of CDF transform at `p`\n\n Notes\n -----\n g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))\n "
p = self._clean(p)
return (1.0 / self.dbn.pdf(self.dbn.ppf(p))) |
def deriv2(self, p):
"\n Second derivative of the link function g''(p)\n\n implemented through numerical differentiation\n "
p = self._clean(p)
linpred = self.dbn.ppf(p)
return ((- self.inverse_deriv2(linpred)) / (self.dbn.pdf(linpred) ** 3)) | 424,918,090,788,082,240 | Second derivative of the link function g''(p)
implemented through numerical differentiation | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the link function g(p)\n\n implemented through numerical differentiation\n "
p = self._clean(p)
linpred = self.dbn.ppf(p)
return ((- self.inverse_deriv2(linpred)) / (self.dbn.pdf(linpred) ** 3)) |
def deriv2_numdiff(self, p):
"\n Second derivative of the link function g''(p)\n\n implemented through numerical differentiation\n "
from statsmodels.tools.numdiff import _approx_fprime_scalar
p = np.atleast_1d(p)
return _approx_fprime_scalar(p, self.deriv, centered=True) | -7,543,261,515,321,102,000 | Second derivative of the link function g''(p)
implemented through numerical differentiation | statsmodels/genmod/families/links.py | deriv2_numdiff | BioGeneTools/statsmodels | python | def deriv2_numdiff(self, p):
"\n Second derivative of the link function g(p)\n\n implemented through numerical differentiation\n "
from statsmodels.tools.numdiff import _approx_fprime_scalar
p = np.atleast_1d(p)
return _approx_fprime_scalar(p, self.deriv, centered=True) |
def inverse_deriv(self, z):
"\n Derivative of the inverse link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the logit function.\n This is just the pdf in a CDFLink,\n "
return self.dbn.pdf(z) | -7,022,887,122,084,683,000 | Derivative of the inverse link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the logit function.
This is just the pdf in a CDFLink, | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse link function\n\n Parameters\n ----------\n z : ndarray\n The inverse of the link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The value of the derivative of the inverse of the logit function.\n This is just the pdf in a CDFLink,\n "
return self.dbn.pdf(z) |
def inverse_deriv2(self, z):
"\n Second derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)''(z) : ndarray\n The value of the second derivative of the inverse of the link\n function\n\n Notes\n -----\n This method should be overwritten by subclasses.\n\n The inherited method is implemented through numerical differentiation.\n "
from statsmodels.tools.numdiff import _approx_fprime_scalar
z = np.atleast_1d(z)
return _approx_fprime_scalar(z, self.inverse_deriv, centered=True) | -7,714,157,241,861,151,000 | Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)''(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This method should be overwritten by subclasses.
The inherited method is implemented through numerical differentiation. | statsmodels/genmod/families/links.py | inverse_deriv2 | BioGeneTools/statsmodels | python | def inverse_deriv2(self, z):
"\n Second derivative of the inverse link function g^(-1)(z).\n\n Parameters\n ----------\n z : array_like\n `z` is usually the linear predictor for a GLM or GEE model.\n\n Returns\n -------\n g^(-1)(z) : ndarray\n The value of the second derivative of the inverse of the link\n function\n\n Notes\n -----\n This method should be overwritten by subclasses.\n\n The inherited method is implemented through numerical differentiation.\n "
from statsmodels.tools.numdiff import _approx_fprime_scalar
z = np.atleast_1d(z)
return _approx_fprime_scalar(z, self.inverse_deriv, centered=True) |
def inverse_deriv2(self, z):
'\n Second derivative of the inverse link function\n\n This is the derivative of the pdf in a CDFLink\n\n '
return ((- z) * self.dbn.pdf(z)) | -2,904,610,982,194,967,600 | Second derivative of the inverse link function
This is the derivative of the pdf in a CDFLink | statsmodels/genmod/families/links.py | inverse_deriv2 | BioGeneTools/statsmodels | python | def inverse_deriv2(self, z):
'\n Second derivative of the inverse link function\n\n This is the derivative of the pdf in a CDFLink\n\n '
return ((- z) * self.dbn.pdf(z)) |
def deriv2(self, p):
"\n Second derivative of the link function g''(p)\n\n "
p = self._clean(p)
linpred = self.dbn.ppf(p)
return (linpred / (self.dbn.pdf(linpred) ** 2)) | -5,048,292,477,726,417,000 | Second derivative of the link function g''(p) | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n \n\n "
p = self._clean(p)
linpred = self.dbn.ppf(p)
return (linpred / (self.dbn.pdf(linpred) ** 2)) |
def deriv2(self, p):
"\n Second derivative of the Cauchy link function.\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g''(p) : ndarray\n Value of the second derivative of Cauchy link function at `p`\n "
p = self._clean(p)
a = (np.pi * (p - 0.5))
d2 = (((2 * (np.pi ** 2)) * np.sin(a)) / (np.cos(a) ** 3))
return d2 | 2,386,467,962,168,369,000 | Second derivative of the Cauchy link function.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g''(p) : ndarray
Value of the second derivative of Cauchy link function at `p` | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the Cauchy link function.\n\n Parameters\n ----------\n p : array_like\n Probabilities\n\n Returns\n -------\n g(p) : ndarray\n Value of the second derivative of Cauchy link function at `p`\n "
p = self._clean(p)
a = (np.pi * (p - 0.5))
d2 = (((2 * (np.pi ** 2)) * np.sin(a)) / (np.cos(a) ** 3))
return d2 |
def __call__(self, p):
'\n C-Log-Log transform link function\n\n Parameters\n ----------\n p : ndarray\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The CLogLog transform of `p`\n\n Notes\n -----\n g(p) = log(-log(1-p))\n '
p = self._clean(p)
return np.log((- np.log((1 - p)))) | 1,949,863,504,250,344,400 | C-Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p)) | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n C-Log-Log transform link function\n\n Parameters\n ----------\n p : ndarray\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The CLogLog transform of `p`\n\n Notes\n -----\n g(p) = log(-log(1-p))\n '
p = self._clean(p)
return np.log((- np.log((1 - p)))) |
def inverse(self, z):
'\n Inverse of C-Log-Log transform link function\n\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = 1-exp(-exp(`z`))\n '
return (1 - np.exp((- np.exp(z)))) | 1,582,492,503,606,269,000 | Inverse of C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`)) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of C-Log-Log transform link function\n\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = 1-exp(-exp(`z`))\n '
return (1 - np.exp((- np.exp(z)))) |
def deriv(self, p):
"\n Derivative of C-Log-Log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the CLogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 / ((p-1)*log(1-p))\n "
p = self._clean(p)
return (1.0 / ((p - 1) * np.log((1 - p)))) | -2,819,306,609,872,138,000 | Derivative of C-Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p)) | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of C-Log-Log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the CLogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 / ((p-1)*log(1-p))\n "
p = self._clean(p)
return (1.0 / ((p - 1) * np.log((1 - p)))) |
def deriv2(self, p):
"\n Second derivative of the C-Log-Log ink function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n The second derivative of the CLogLog link function\n "
p = self._clean(p)
fl = np.log((1 - p))
d2 = ((- 1) / (((1 - p) ** 2) * fl))
d2 *= (1 + (1 / fl))
return d2 | 1,051,177,207,821,956,500 | Second derivative of the C-Log-Log ink function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the CLogLog link function | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the C-Log-Log ink function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g(p) : ndarray\n The second derivative of the CLogLog link function\n "
p = self._clean(p)
fl = np.log((1 - p))
d2 = ((- 1) / (((1 - p) ** 2) * fl))
d2 *= (1 + (1 / fl))
return d2 |
def inverse_deriv(self, z):
"\n Derivative of the inverse of the C-Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The derivative of the inverse of the CLogLog link function\n "
return np.exp((z - np.exp(z))) | 1,415,103,664,460,894,500 | Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the CLogLog link function | statsmodels/genmod/families/links.py | inverse_deriv | BioGeneTools/statsmodels | python | def inverse_deriv(self, z):
"\n Derivative of the inverse of the C-Log-Log transform link function\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the CLogLog link function at `p`\n\n Returns\n -------\n g^(-1)'(z) : ndarray\n The derivative of the inverse of the CLogLog link function\n "
return np.exp((z - np.exp(z))) |
def __call__(self, p):
'\n Log-Log transform link function\n\n Parameters\n ----------\n p : ndarray\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The LogLog transform of `p`\n\n Notes\n -----\n g(p) = -log(-log(p))\n '
p = self._clean(p)
return (- np.log((- np.log(p)))) | -5,263,666,381,483,923,000 | Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The LogLog transform of `p`
Notes
-----
g(p) = -log(-log(p)) | statsmodels/genmod/families/links.py | __call__ | BioGeneTools/statsmodels | python | def __call__(self, p):
'\n Log-Log transform link function\n\n Parameters\n ----------\n p : ndarray\n Mean parameters\n\n Returns\n -------\n z : ndarray\n The LogLog transform of `p`\n\n Notes\n -----\n g(p) = -log(-log(p))\n '
p = self._clean(p)
return (- np.log((- np.log(p)))) |
def inverse(self, z):
'\n Inverse of Log-Log transform link function\n\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = exp(-exp(-`z`))\n '
return np.exp((- np.exp((- z)))) | 455,481,010,670,740,100 | Inverse of Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = exp(-exp(-`z`)) | statsmodels/genmod/families/links.py | inverse | BioGeneTools/statsmodels | python | def inverse(self, z):
'\n Inverse of Log-Log transform link function\n\n\n Parameters\n ----------\n z : array_like\n The value of the inverse of the LogLog link function at `p`\n\n Returns\n -------\n p : ndarray\n Mean parameters\n\n Notes\n -----\n g^(-1)(`z`) = exp(-exp(-`z`))\n '
return np.exp((- np.exp((- z)))) |
def deriv(self, p):
"\n Derivative of Log-Log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the LogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 /(p * log(p))\n "
p = self._clean(p)
return ((- 1.0) / (p * np.log(p))) | 2,950,546,271,047,227,400 | Derivative of Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the LogLog transform link function
Notes
-----
g'(p) = - 1 /(p * log(p)) | statsmodels/genmod/families/links.py | deriv | BioGeneTools/statsmodels | python | def deriv(self, p):
"\n Derivative of Log-Log transform link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g'(p) : ndarray\n The derivative of the LogLog transform link function\n\n Notes\n -----\n g'(p) = - 1 /(p * log(p))\n "
p = self._clean(p)
return ((- 1.0) / (p * np.log(p))) |
def deriv2(self, p):
"\n Second derivative of the Log-Log link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g''(p) : ndarray\n The second derivative of the LogLog link function\n "
p = self._clean(p)
d2 = ((1 + np.log(p)) / ((p * np.log(p)) ** 2))
return d2 | 8,769,241,807,912,562,000 | Second derivative of the Log-Log link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the LogLog link function | statsmodels/genmod/families/links.py | deriv2 | BioGeneTools/statsmodels | python | def deriv2(self, p):
"\n Second derivative of the Log-Log link function\n\n Parameters\n ----------\n p : array_like\n Mean parameters\n\n Returns\n -------\n g(p) : ndarray\n The second derivative of the LogLog link function\n "
p = self._clean(p)
d2 = ((1 + np.log(p)) / ((p * np.log(p)) ** 2))
return d2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.