code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Any
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,):
'''simple docstring'''
_validation(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,)
# Creates data structures and fill initial step
lowerCAmelCase : dict = {}
lowerCAmelCase : dict = {}
for state in states_space:
lowerCAmelCase : Union[str, Any] = observations_space[0]
lowerCAmelCase : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 ,len(lowerCAmelCase_ ) ):
lowerCAmelCase : Optional[Any] = observations_space[o]
lowerCAmelCase : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase : List[str] = ""
lowerCAmelCase : Dict = -1
for k_state in states_space:
lowerCAmelCase : List[str] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase : List[Any] = probability
lowerCAmelCase : List[str] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase : Optional[Any] = arg_max
# The final observation
lowerCAmelCase : Dict = observations_space[len(lowerCAmelCase_ ) - 1]
# argmax for given final observation
lowerCAmelCase : Optional[Any] = ""
lowerCAmelCase : Tuple = -1
for k_state in states_space:
lowerCAmelCase : Optional[Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase : Optional[Any] = probability
lowerCAmelCase : Tuple = k_state
lowerCAmelCase : List[str] = arg_max
# Process pointers backwards
lowerCAmelCase : Union[str, Any] = last_state
lowerCAmelCase : Optional[Any] = []
for o in range(len(lowerCAmelCase_ ) - 1 ,-1 ,-1 ):
result.append(lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,):
'''simple docstring'''
_validate_not_empty(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,)
_validate_lists(lowerCAmelCase_ ,lowerCAmelCase_ )
_validate_dicts(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_validate_list(lowerCAmelCase_ ,"""observations_space""" )
_validate_list(lowerCAmelCase_ ,"""states_space""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(_object ,lowerCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = F"""{var_name} must be a list"""
raise ValueError(lowerCAmelCase_ )
else:
for x in _object:
if not isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
lowerCAmelCase : List[Any] = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCAmelCase_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,):
'''simple docstring'''
_validate_dict(lowerCAmelCase_ ,"""initial_probabilities""" ,lowerCAmelCase_ )
_validate_nested_dict(lowerCAmelCase_ ,"""transition_probabilities""" )
_validate_nested_dict(lowerCAmelCase_ ,"""emission_probabilities""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_validate_dict(_object ,lowerCAmelCase_ ,lowerCAmelCase_ )
for x in _object.values():
_validate_dict(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if not isinstance(_object ,lowerCAmelCase_ ):
lowerCAmelCase : List[str] = F"""{var_name} must be a dict"""
raise ValueError(lowerCAmelCase_ )
if not all(isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) for x in _object ):
lowerCAmelCase : List[str] = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCAmelCase_ )
if not all(isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) for x in _object.values() ):
lowerCAmelCase : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase : str = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : List[str] ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : List[str] ={
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : List[Any] ={
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
lowerCAmelCase : Union[str, Any] ={
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
_UpperCamelCase: Any = VOCAB_FILES_NAMES
_UpperCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: int = ElectraTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase : Tuple = getattr(lowercase_ , normalizer_state.pop("""type""" ) )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : Union[str, Any] = strip_accents
lowerCAmelCase : Optional[int] = tokenize_chinese_chars
lowerCAmelCase : Tuple = normalizer_class(**lowercase_ )
lowerCAmelCase : Optional[int] = do_lower_case
def _snake_case ( self , lowercase_ , lowercase_=None ) -> str:
lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> List[str]:
lowerCAmelCase : Tuple = [self.sep_token_id]
lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> List[str]:
lowerCAmelCase : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowerCAmelCase__ ):
_UpperCamelCase: List[Any] = "ClapFeatureExtractor"
_UpperCamelCase: List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , lowercase_ , lowercase_ ) -> Any:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> str:
lowerCAmelCase : str = kwargs.pop("""sampling_rate""" , _SCREAMING_SNAKE_CASE )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
lowerCAmelCase : str = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if audios is not None:
lowerCAmelCase : List[Any] = self.feature_extractor(
_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and audios is not None:
lowerCAmelCase : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> Tuple:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Any = self.tokenizer.model_input_names
lowerCAmelCase : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase : Tuple = getattr(A_ ,A_ )
if weight_type is not None:
lowerCAmelCase : Union[str, Any] = getattr(A_ ,A_ ).shape
else:
lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
lowerCAmelCase : Tuple = value
elif weight_type == "bias":
lowerCAmelCase : Optional[Any] = value
else:
lowerCAmelCase : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
lowerCAmelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
A_ ,A_ ,A_ ,A_ ,hf_model.config.feat_extract_norm == """group""" ,)
lowerCAmelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase : Any = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
lowerCAmelCase : int = name.split(A_ )[0].split(""".""" )[-2]
lowerCAmelCase : Union[str, Any] = mapped_key.replace("""*""" ,A_ )
if "weight_g" in name:
lowerCAmelCase : str = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase : str = """weight_v"""
elif "weight" in name:
lowerCAmelCase : List[Any] = """weight"""
elif "bias" in name:
lowerCAmelCase : str = """bias"""
else:
lowerCAmelCase : Union[str, Any] = None
set_recursively(A_ ,A_ ,A_ ,A_ ,A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase : Optional[int] = name.split(""".""" )
lowerCAmelCase : Optional[Any] = int(items[0] )
lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A_ )
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase : str = HubertConfig.from_pretrained(A_ )
else:
lowerCAmelCase : Dict = HubertConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase : List[str] = Dictionary.load(A_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase : Tuple = target_dict.pad_index
lowerCAmelCase : Optional[Any] = target_dict.bos_index
lowerCAmelCase : List[str] = target_dict.eos_index
lowerCAmelCase : Optional[Any] = len(target_dict.symbols )
lowerCAmelCase : Optional[int] = os.path.join(A_ ,"""vocab.json""" )
if not os.path.isdir(A_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A_ ) )
return
os.makedirs(A_ ,exist_ok=A_ )
with open(A_ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices ,A_ )
lowerCAmelCase : str = WavaVecaCTCTokenizer(
A_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=A_ ,)
lowerCAmelCase : int = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=A_ ,return_attention_mask=A_ ,)
lowerCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=A_ ,tokenizer=A_ )
processor.save_pretrained(A_ )
lowerCAmelCase : List[Any] = HubertForCTC(A_ )
else:
lowerCAmelCase : Any = HubertModel(A_ )
if is_finetuned:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase : Any = model[0].eval()
recursively_load_weights(A_ ,A_ ,A_ )
hf_wavavec.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase : Dict =parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = min(SCREAMING_SNAKE_CASE__ ) # min() finds the minimum value
lowerCAmelCase : str = max(SCREAMING_SNAKE_CASE__ ) # max() finds the maximum value
lowerCAmelCase : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCAmelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCAmelCase : List[Any] = 0
for count in range(SCREAMING_SNAKE_CASE__ ):
while holes[count] > 0:
holes[count] -= 1
lowerCAmelCase : Any = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(SCREAMING_SNAKE_CASE__ )
print("""Sorted order is:""" ,""" """.join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
import qiskit
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase : Optional[Any] = qiskit.QuantumCircuit(__snake_case ,__snake_case )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
lowerCAmelCase : List[str] = qiskit.execute(__snake_case ,__snake_case ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
lowerCAmelCase : List[str] =[
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
from __future__ import annotations
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(A__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = str(A__ )
lowerCAmelCase : List[str] = [n]
for i in range(1 ,len(A__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if len(str(A__ ) ) > 3:
if not is_prime(int(str(A__ )[-3:] ) ) or not is_prime(int(str(A__ )[:3] ) ):
return False
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_1 ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[Any] = 1_3
while len(A__ ) != count:
if validate(A__ ):
lowerCAmelCase : Optional[int] = list_truncated_nums(A__ )
if all(is_prime(A__ ) for i in list_nums ):
list_truncated_primes.append(A__ )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCamelCase__ ) ,lowerCamelCase__ )
return number - int(lowerCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any =logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase : Optional[int] ={
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _a ( snake_case_ ):
_UpperCamelCase: str = "esm"
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1026 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_="absolute" , lowercase_=True , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_ )
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Tuple = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : Optional[Any] = emb_layer_norm_before
lowerCAmelCase : List[str] = token_dropout
lowerCAmelCase : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowerCAmelCase : Any = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Optional[Any] = EsmFoldConfig(**lowercase_ )
lowerCAmelCase : str = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowerCAmelCase : List[Any] = get_default_vocab_list()
else:
lowerCAmelCase : int = vocab_list
else:
lowerCAmelCase : int = None
lowerCAmelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : str = super().to_dict()
if isinstance(self.esmfold_config , lowercase_ ):
lowerCAmelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class _a :
_UpperCamelCase: List[str] = None
_UpperCamelCase: List[Any] = True
_UpperCamelCase: Optional[int] = False
_UpperCamelCase: Optional[Any] = False
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Dict = 0
_UpperCamelCase: int = True
_UpperCamelCase: List[str] = False
_UpperCamelCase: List[Any] = 128
_UpperCamelCase: Optional[Any] = None
def _snake_case ( self ) -> Any:
if self.trunk is None:
lowerCAmelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk , lowercase_ ):
lowerCAmelCase : Optional[Any] = TrunkConfig(**self.trunk )
def _snake_case ( self ) -> int:
lowerCAmelCase : int = asdict(self )
lowerCAmelCase : Optional[Any] = self.trunk.to_dict()
return output
@dataclass
class _a :
_UpperCamelCase: Union[str, Any] = 48
_UpperCamelCase: Optional[int] = 1024
_UpperCamelCase: Dict = 128
_UpperCamelCase: Tuple = 32
_UpperCamelCase: Union[str, Any] = 32
_UpperCamelCase: Dict = 32
_UpperCamelCase: List[Any] = 0
_UpperCamelCase: List[Any] = 0
_UpperCamelCase: Union[str, Any] = False
_UpperCamelCase: Tuple = 4
_UpperCamelCase: Union[str, Any] = 128
_UpperCamelCase: Optional[Any] = None
def _snake_case ( self ) -> int:
if self.structure_module is None:
lowerCAmelCase : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_ ):
lowerCAmelCase : Tuple = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowerCAmelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase : Dict = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Dict = asdict(self )
lowerCAmelCase : Dict = self.structure_module.to_dict()
return output
@dataclass
class _a :
_UpperCamelCase: Tuple = 384
_UpperCamelCase: Union[str, Any] = 128
_UpperCamelCase: Any = 16
_UpperCamelCase: Optional[int] = 128
_UpperCamelCase: Any = 12
_UpperCamelCase: Any = 4
_UpperCamelCase: Any = 8
_UpperCamelCase: int = 0.1
_UpperCamelCase: Any = 8
_UpperCamelCase: str = 1
_UpperCamelCase: Union[str, Any] = 2
_UpperCamelCase: Dict = 7
_UpperCamelCase: Optional[int] = 10
_UpperCamelCase: Optional[int] = 1e-8
_UpperCamelCase: List[Any] = 1e5
def _snake_case ( self ) -> List[Any]:
return asdict(self )
def _UpperCAmelCase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
_UpperCamelCase = StableDiffusionXLImgaImgPipeline
_UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
_UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase : Optional[int] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCAmelCase : Union[str, Any] = CLIPTextModel(lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCAmelCase_ )
lowerCAmelCase : List[Any] = CLIPTextModelWithProjection(lowerCAmelCase_ )
lowerCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCAmelCase_ )
lowerCAmelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
lowerCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
lowerCAmelCase : List[Any] = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith("""mps""" ):
lowerCAmelCase : Dict = torch.manual_seed(lowerCAmelCase_ )
else:
lowerCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
lowerCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
lowerCAmelCase : List[str] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCAmelCase : Any = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCAmelCase : Dict = sd_pipe(**lowerCAmelCase_ ).images
lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : Optional[int] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case ( self ) -> Optional[Any]:
pass
def _snake_case ( self ) -> int:
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
lowerCAmelCase : Tuple = sd_pipe.to(lowerCAmelCase_ )
lowerCAmelCase : str = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# forward without prompt embeds
lowerCAmelCase : Tuple = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
lowerCAmelCase : int = negative_prompt
lowerCAmelCase : Dict = 3 * [inputs["""prompt"""]]
lowerCAmelCase : Optional[Any] = sd_pipe(**lowerCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase : List[str] = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCAmelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCAmelCase : Any = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = sd_pipe.encode_prompt(lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
lowerCAmelCase : Optional[int] = sd_pipe(
**lowerCAmelCase_ , prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , pooled_prompt_embeds=lowerCAmelCase_ , negative_pooled_prompt_embeds=lowerCAmelCase_ , )
lowerCAmelCase : Any = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , lowercase_ , lowercase_="cpu" , lowercase_=torch.floataa , lowercase_=0 ) -> Tuple:
lowerCAmelCase : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
lowerCAmelCase : Dict = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase : List[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
lowerCAmelCase : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self ) -> int:
lowerCAmelCase : Dict = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCAmelCase : Any = self.get_inputs(lowerCAmelCase_ )
lowerCAmelCase : Dict = pipe(**lowerCAmelCase_ ).images
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase : Optional[Any] = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class _a :
_UpperCamelCase: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _a :
_UpperCamelCase: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
_UpperCamelCase: str = field(metadata={"help": "Should contain the data files for the task."} )
_UpperCamelCase: int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase : int = processors[data_args.task_name]()
lowerCAmelCase : List[str] = processor.get_labels()
lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
lowerCAmelCase : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
lowerCAmelCase : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=SCREAMING_SNAKE_CASE__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
lowerCAmelCase : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=SCREAMING_SNAKE_CASE__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
lowerCAmelCase : str = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ ,p.label_ids )}
# Data collator
lowerCAmelCase : Dict = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,train_dataset=SCREAMING_SNAKE_CASE__ ,eval_dataset=SCREAMING_SNAKE_CASE__ ,compute_metrics=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : Optional[Any] = trainer.evaluate()
lowerCAmelCase : Union[str, Any] = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(SCREAMING_SNAKE_CASE__ )
return results
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = to_pil_image(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = pil_image.size
lowerCAmelCase : Union[str, Any] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE__ ,lang=SCREAMING_SNAKE_CASE__ ,output_type="""dict""" ,config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCAmelCase : List[Any] = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if not word.strip()]
lowerCAmelCase : int = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
lowerCAmelCase : Optional[int] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase : Dict = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE__ )
# finally, normalize the bounding boxes
lowerCAmelCase : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase: Tuple = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = "" , **lowercase_ , ) -> List[Any]:
super().__init__(**snake_case__ )
lowerCAmelCase : List[Any] = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : Tuple = resample
lowerCAmelCase : Tuple = do_rescale
lowerCAmelCase : Any = rescale_value
lowerCAmelCase : Optional[Any] = do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase : int = apply_ocr
lowerCAmelCase : Tuple = ocr_lang
lowerCAmelCase : str = tesseract_config
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
lowerCAmelCase : Dict = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Optional[Any]:
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> int:
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> List[str]:
lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Tuple = size if size is not None else self.size
lowerCAmelCase : List[str] = get_size_dict(snake_case__ )
lowerCAmelCase : int = resample if resample is not None else self.resample
lowerCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase : str = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase : Tuple = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
lowerCAmelCase : Any = [to_numpy_array(snake_case__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[Any] = []
for image in images:
lowerCAmelCase : Any = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
lowerCAmelCase : Dict = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : Union[str, Any] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : List[Any] = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Dict = BatchFeature(data={"""pixel_values""": images} , tensor_type=snake_case__ )
if apply_ocr:
lowerCAmelCase : List[str] = words_batch
lowerCAmelCase : Optional[int] = boxes_batch
return data
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = num % 8
lowerCAmelCase : List[Any] = octal + (remainder * math.floor(math.pow(1_0 ,__a ) ))
counter += 1
lowerCAmelCase : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__a )}"""
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(6_5 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_1_6 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase : List[Any] ={"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : str =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase : Optional[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase : str =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase : Tuple =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase : Union[str, Any] =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase : str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase : str =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase : str =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase : Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase : Dict =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase : Tuple =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase : Dict =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase : Optional[int] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase : List[Any] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase : List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase : Union[str, Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase : Union[str, Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase : List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase : Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase : Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase : Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase : Tuple =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase : Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase : Optional[int] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: str = FLAX_MODEL_MAPPING
lowerCAmelCase : List[Any] =auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase : int =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase : Union[str, Any] =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase : Union[str, Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase : Dict =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase : str =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Optional[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase : Dict =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase : List[str] =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase : Optional[int] =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: Optional[int] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase : List[Any] =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase : Dict =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _a ( _BaseAutoModelClass ):
_UpperCamelCase: List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase : Optional[Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if n == 1 or not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase : Optional[int] = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : int = 2
while digits < n:
index += 1
lowerCAmelCase : str = len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( lowerCamelCase__ , unittest.TestCase ):
_UpperCamelCase: Optional[int] = LongformerTokenizer
_UpperCamelCase: List[Any] = True
_UpperCamelCase: Optional[Any] = LongformerTokenizerFast
_UpperCamelCase: Optional[int] = True
def _snake_case ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase : Optional[int] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
def _snake_case ( self , **lowercase_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self , **lowercase_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self , lowercase_ ) -> Dict:
lowerCAmelCase : List[str] = '''lower newer'''
lowerCAmelCase : Tuple = '''lower newer'''
return input_text, output_text
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : Union[str, Any] = '''lower newer'''
lowerCAmelCase : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase : Optional[int] = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase : str = tokens + [tokenizer.unk_token]
lowerCAmelCase : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__lowerCamelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__lowerCamelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowerCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowerCamelCase )
lowerCAmelCase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowerCamelCase )
lowerCAmelCase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCAmelCase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : List[str] = '''Encode this sequence.'''
lowerCAmelCase : Dict = tokenizer.byte_encoder[''' '''.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowerCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowerCAmelCase : str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
lowerCAmelCase : Any = '''<mask>'''
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
lowerCAmelCase : Optional[Any] = '''Encode <mask> sequence'''
lowerCAmelCase : Dict = '''Encode <mask>sequence'''
lowerCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase : int = encoded.index(__lowerCamelCase )
lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase : List[str] = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase : List[Any] = encoded.index(__lowerCamelCase )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self ) -> Optional[Any]:
pass
def _snake_case ( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase : Any = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase : str = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
lowerCAmelCase : str = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__lowerCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _snake_case ( self ) -> int:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __lowerCamelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __lowerCamelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , __lowerCamelCase )
def _snake_case ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : str = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase : List[str] = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : Dict = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : Any = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : Union[str, Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : Optional[Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
lowerCAmelCase : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : Optional[Any] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : int = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
lowerCAmelCase : List[str] = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(UpperCamelCase__ ,0 ,UpperCamelCase__ ,args=(UpperCamelCase__) )[0]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(UpperCamelCase__ ,z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase : List[str] =list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase : List[Any] =[file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
lowerCAmelCase : Any =[file for file in filepaths if ' ' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
lowerCAmelCase : str =[file for file in filepaths if '-' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase : int =[file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase : Union[str, Any] =len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase : Optional[Any] ="http://www.mocksite.com/file1.txt"
lowerCAmelCase : Tuple ="\"text\": [\"foo\", \"foo\"]"
lowerCAmelCase : Tuple ="6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _a :
_UpperCamelCase: Any = 200
_UpperCamelCase: int = {'''Content-Length''': '''100'''}
_UpperCamelCase: int = {}
def _snake_case ( self , **lowercase_ ) -> Dict:
return [bytes(A_ , """utf-8""" )]
def _UpperCAmelCase ( *SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" ,[str, list, dict] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE__ ,"""request""" ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = URL
if issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = url
elif issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = [url]
elif issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = {"""train""": url}
lowerCAmelCase : Tuple = """dummy"""
lowerCAmelCase : Any = """downloads"""
lowerCAmelCase : Tuple = tmp_path
lowerCAmelCase : Union[str, Any] = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ,use_etag=SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase : str = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ ,download_config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = dl_manager.download(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Optional[Any] = [downloaded_paths]
lowerCAmelCase : List[str] = [urls]
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase : Dict = downloaded_paths.values()
lowerCAmelCase : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase : Tuple = Path(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase : List[Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase : Optional[int] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" ,[str, list, dict] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
if issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = filename
elif issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Any = [filename]
elif issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[Any] = {"""train""": filename}
lowerCAmelCase : List[str] = """dummy"""
lowerCAmelCase : Tuple = xz_file.parent
lowerCAmelCase : int = """extracted"""
lowerCAmelCase : Any = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE__ ,use_etag=SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase : List[str] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ ,download_config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = dl_manager.extract(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[Any] = [extracted_paths]
lowerCAmelCase : List[str] = [paths]
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assert "train" in extracted_paths.keys()
lowerCAmelCase : Any = extracted_paths.values()
lowerCAmelCase : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE__ ,etag=SCREAMING_SNAKE_CASE__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase : List[Any] = extracted_path.read_text()
lowerCAmelCase : Tuple = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE__ ,start=1 ):
lowerCAmelCase : int = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" ,["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) ,start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" ,["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) ,start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) ,start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 705 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Tuple ={
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 0 |
import numpy as np
lowerCAmelCase : Optional[int] =[
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class _a :
def __init__( self ) -> None:
lowerCAmelCase : List[Any] = np.array(__UpperCamelCase )
def _snake_case ( self , lowercase_ ) -> np.ndarray:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = np.where(letter == self.SQUARE )
lowerCAmelCase : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self , lowercase_ , lowercase_ ) -> str:
lowerCAmelCase : Any = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self , lowercase_ ) -> str:
lowerCAmelCase : Optional[int] = message.lower()
lowerCAmelCase : List[Any] = message.replace(""" """ , """""" )
lowerCAmelCase : Tuple = message.replace("""j""" , """i""" )
lowerCAmelCase : Optional[Any] = np.empty((2, len(__UpperCamelCase )) )
for letter_index in range(len(__UpperCamelCase ) ):
lowerCAmelCase : Tuple = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase : List[str] = numbers[0]
lowerCAmelCase : Tuple = numbers[1]
lowerCAmelCase : List[str] = first_step.reshape(2 * len(__UpperCamelCase ) )
lowerCAmelCase : Dict = """"""
for numbers_index in range(len(__UpperCamelCase ) ):
lowerCAmelCase : Dict = int(second_step[numbers_index * 2] )
lowerCAmelCase : int = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase : Union[str, Any] = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase : Any = encoded_message + letter
return encoded_message
def _snake_case ( self , lowercase_ ) -> str:
lowerCAmelCase : Union[str, Any] = message.lower()
message.replace(""" """ , """""" )
lowerCAmelCase : Tuple = np.empty(2 * len(__UpperCamelCase ) )
for letter_index in range(len(__UpperCamelCase ) ):
lowerCAmelCase : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase : Union[str, Any] = numbers[0]
lowerCAmelCase : Any = numbers[1]
lowerCAmelCase : Union[str, Any] = first_step.reshape((2, len(__UpperCamelCase )) )
lowerCAmelCase : int = """"""
for numbers_index in range(len(__UpperCamelCase ) ):
lowerCAmelCase : List[str] = int(second_step[0, numbers_index] )
lowerCAmelCase : Union[str, Any] = int(second_step[1, numbers_index] )
lowerCAmelCase : Any = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase : Dict = decoded_message + letter
return decoded_message
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if "finetuned" not in model_name:
lowerCAmelCase : Optional[Any] = False
if "finetuned" in model_name:
lowerCAmelCase : Dict = """huggingface/label-files"""
if "kinetics" in model_name:
lowerCAmelCase : Union[str, Any] = 4_0_0
lowerCAmelCase : int = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
lowerCAmelCase : List[str] = 1_7_4
lowerCAmelCase : Any = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
lowerCAmelCase : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,repo_type="""dataset""" ) ,"""r""" ) )
lowerCAmelCase : Dict = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowerCAmelCase : Optional[Any] = idalabel
lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "small" in model_name:
lowerCAmelCase : Union[str, Any] = 3_8_4
lowerCAmelCase : Optional[int] = 1_5_3_6
lowerCAmelCase : Union[str, Any] = 1_2
lowerCAmelCase : Optional[int] = 1_6
lowerCAmelCase : Optional[Any] = 1_2
lowerCAmelCase : List[Any] = 3
lowerCAmelCase : Tuple = 1_9_2
lowerCAmelCase : List[Any] = 7_6_8
elif "large" in model_name:
lowerCAmelCase : List[Any] = 1_0_2_4
lowerCAmelCase : Optional[int] = 4_0_9_6
lowerCAmelCase : Optional[int] = 2_4
lowerCAmelCase : Tuple = 1_6
lowerCAmelCase : List[Any] = 1_2
lowerCAmelCase : int = 8
lowerCAmelCase : Tuple = 5_1_2
lowerCAmelCase : Union[str, Any] = 2_0_4_8
elif "huge" in model_name:
lowerCAmelCase : Tuple = 1_2_8_0
lowerCAmelCase : Optional[Any] = 5_1_2_0
lowerCAmelCase : Dict = 3_2
lowerCAmelCase : Dict = 1_6
lowerCAmelCase : Any = 1_2
lowerCAmelCase : Tuple = 8
lowerCAmelCase : Optional[int] = 6_4_0
lowerCAmelCase : Optional[Any] = 2_5_6_0
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "encoder." in name:
lowerCAmelCase : List[Any] = name.replace("""encoder.""" ,"""""" )
if "cls_token" in name:
lowerCAmelCase : Tuple = name.replace("""cls_token""" ,"""videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase : List[Any] = name.replace("""decoder_pos_embed""" ,"""decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase : Dict = name.replace("""pos_embed""" ,"""videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase : str = name.replace("""patch_embed.proj""" ,"""videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase : Any = name.replace("""patch_embed.norm""" ,"""videomae.embeddings.norm""" )
if "decoder.blocks" in name:
lowerCAmelCase : Union[str, Any] = name.replace("""decoder.blocks""" ,"""decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase : Optional[Any] = name.replace("""blocks""" ,"""videomae.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase : Any = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name and "bias" not in name:
lowerCAmelCase : Tuple = name.replace("""attn""" ,"""attention.self""" )
if "attn" in name:
lowerCAmelCase : Dict = name.replace("""attn""" ,"""attention.attention""" )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
lowerCAmelCase : Optional[Any] = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase : Optional[Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase : Dict = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase : Optional[Any] = name.replace("""decoder_embed""" ,"""decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase : Union[str, Any] = name.replace("""decoder_norm""" ,"""decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase : str = name.replace("""decoder_pred""" ,"""decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase : List[str] = name.replace("""norm.weight""" ,"""videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase : int = name.replace("""norm.bias""" ,"""videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
lowerCAmelCase : str = name.replace("""head""" ,"""classifier""" )
return name
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("""encoder.""" ):
lowerCAmelCase : Optional[Any] = key.replace("""encoder.""" ,"""""" )
if "qkv" in key:
lowerCAmelCase : str = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
lowerCAmelCase : str = config.decoder_hidden_size
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : str = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase : List[Any] = val[:dim, :]
lowerCAmelCase : int = val[dim : dim * 2, :]
lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase : Optional[Any] = config.hidden_size
lowerCAmelCase : Dict = int(key_split[1] )
lowerCAmelCase : List[Any] = """videomae.encoder.layer."""
if "weight" in key:
lowerCAmelCase : Union[str, Any] = val[:dim, :]
lowerCAmelCase : Optional[Any] = val[dim : dim * 2, :]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : Optional[Any] = val
return orig_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" ,filename="""eating_spaghetti.npy""" ,repo_type="""dataset""" )
lowerCAmelCase : Dict = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = get_videomae_config(SCREAMING_SNAKE_CASE__ )
if "finetuned" in model_name:
lowerCAmelCase : Any = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Tuple = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase : int = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,quiet=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ ,map_location="""cpu""" )
if "model" in files:
lowerCAmelCase : Union[str, Any] = files["""model"""]
else:
lowerCAmelCase : List[Any] = files["""module"""]
lowerCAmelCase : Any = convert_state_dict(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify model on basic input
lowerCAmelCase : Union[str, Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase : str = prepare_video()
lowerCAmelCase : Any = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors="""pt""" )
if "finetuned" not in model_name:
lowerCAmelCase : Tuple = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" ,filename="""bool_masked_pos.pt""" )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = outputs.logits
lowerCAmelCase : Tuple = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase : Tuple = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[Any] = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase : Tuple = torch.Size([1, 1_7_4] )
lowerCAmelCase : Optional[int] = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase : Any = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : List[str] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : int = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase : Optional[int] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Tuple = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase : Any = torch.Size([1, 4_0_0] )
lowerCAmelCase : Dict = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase : List[str] = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[Any] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase : int = torch.Size([1, 4_0_0] )
lowerCAmelCase : Dict = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase : Union[str, Any] = torch.Size([1, 4_0_0] )
lowerCAmelCase : List[str] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase : Union[str, Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : Tuple = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase : str = torch.Size([1, 1_7_4] )
lowerCAmelCase : Optional[Any] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase : str = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase : int = torch.Size([1, 1_7_4] )
lowerCAmelCase : Any = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 )
else:
print("""Logits:""" ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase : List[Any] = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ ,organization="""nielsr""" )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase : str =parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
lowerCAmelCase : List[str] =[
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Dict = 0
while place < len(lowerCAmelCase__ ):
if (place + 1 < len(lowerCAmelCase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = []
for arabic, roman in ROMAN:
(lowerCAmelCase) : int = divmod(lowerCAmelCase__ ,lowerCAmelCase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
lowerCAmelCase : Any = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
lowerCAmelCase : Optional[Any] = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = np.asarray(weights[0] )
lowerCAmelCase : Any = np.asarray(weights[1] )
lowerCAmelCase : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 ,2 ).contiguous().view(-1 ,SCREAMING_SNAKE_CASE__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 ,2 ).contiguous().view(-1 ,SCREAMING_SNAKE_CASE__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 ,SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 ,1 ) ,)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : int = np.asarray(weights[0] )
lowerCAmelCase : Optional[Any] = np.asarray(weights[1] )
lowerCAmelCase : Any = np.asarray(weights[2] )
lowerCAmelCase : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 ,2 ).contiguous().view(-1 ,SCREAMING_SNAKE_CASE__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 ,2 ).contiguous().view(-1 ,SCREAMING_SNAKE_CASE__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 ,2 ).contiguous().view(-1 ,SCREAMING_SNAKE_CASE__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 ,SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 ,1 ) ,)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = weights[0][0][0]
lowerCAmelCase : List[Any] = np.asarray(layer_norm_a[0] )
lowerCAmelCase : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
# lsh weights + output
lowerCAmelCase : Dict = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ ,torch_block.attention ,SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ ,torch_block.attention ,SCREAMING_SNAKE_CASE__ )
# intermediate weighs
lowerCAmelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
lowerCAmelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCAmelCase : Tuple = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
# intermediate dense
lowerCAmelCase : Tuple = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
# intermediate out
lowerCAmelCase : Any = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = torch_model.reformer
# word embeds
lowerCAmelCase : Optional[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
if isinstance(weights[3] ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
lowerCAmelCase : int = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase : List[str] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# output layer norm
lowerCAmelCase : List[Any] = np.asarray(weights[7][0] )
lowerCAmelCase : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
# output embeddings
lowerCAmelCase : Any = np.asarray(weights[9][0] )
lowerCAmelCase : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(SCREAMING_SNAKE_CASE__ ) ,)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : int = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ ,"""rb""" ) as f:
lowerCAmelCase : List[str] = pickle.load(SCREAMING_SNAKE_CASE__ )["""weights"""]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : Optional[int] =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : Tuple = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : Any = slow.next
lowerCAmelCase : Dict = slow.next
lowerCAmelCase : Optional[int] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[str] = None
while second:
lowerCAmelCase : List[str] = second.next
lowerCAmelCase : Any = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : str = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : int = node.next
lowerCAmelCase : Optional[int] = head.next
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Any = [slow.val]
while slow.next:
lowerCAmelCase : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Tuple = cur.next
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : Dict = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Dict = [pos]
lowerCAmelCase : Union[str, Any] = head.next
pos += 1
lowerCAmelCase : Union[str, Any] = pos - 1
lowerCAmelCase : Union[str, Any] = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE__ ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Dict = 0
for i in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : Any ={
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : Any ={
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : Optional[int] ={
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : List[str] ={
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCAmelCase : List[Any] ={
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCAmelCase : Tuple ={
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCAmelCase : int ={
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCAmelCase : Union[str, Any] ={
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCAmelCase : List[str] ={
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _a ( __UpperCAmelCase ):
_UpperCamelCase: Any = VOCAB_FILES_NAMES
_UpperCamelCase: List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( __UpperCAmelCase ):
_UpperCamelCase: int = VOCAB_FILES_NAMES
_UpperCamelCase: Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Union[str, Any] =collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase : Optional[int] =collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase : Any =r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__UpperCAmelCase )
class _a :
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> Dict:
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
lowerCAmelCase : str = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowerCAmelCase : Optional[Any] = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [titles]
lowerCAmelCase : Any = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [texts]
lowerCAmelCase : Dict = len(lowerCAmelCase_ )
lowerCAmelCase : List[str] = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [questions] * n_passages
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(lowerCAmelCase_ )} titles and {len(lowerCAmelCase_ )} texts.""" )
lowerCAmelCase : List[str] = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
lowerCAmelCase : int = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
lowerCAmelCase : List[str] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
}
if return_attention_mask is not False:
lowerCAmelCase : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase : int = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = 16 , lowercase_ = 64 , lowercase_ = 4 , ) -> Optional[Any]:
lowerCAmelCase : int = reader_input["""input_ids"""]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = reader_output[:3]
lowerCAmelCase : Tuple = len(lowerCAmelCase_ )
lowerCAmelCase : Dict = sorted(range(lowerCAmelCase_ ) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__ )
lowerCAmelCase : Tuple = []
for doc_id in sorted_docs:
lowerCAmelCase : Dict = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase : Optional[Any] = len(lowerCAmelCase_ )
lowerCAmelCase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Any:
lowerCAmelCase : List[str] = []
for start_index, start_score in enumerate(lowerCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase : Tuple = sorted(lowerCAmelCase_ , key=lambda lowercase_ : x[1] , reverse=lowerCAmelCase_ )
lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
lowerCAmelCase : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCAmelCase )
class _a ( __UpperCAmelCase , __UpperCAmelCase ):
_UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase: List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase: str = ["input_ids", "attention_mask"]
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
class _a ( _UpperCamelCase ):
_UpperCamelCase: List[str] = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> Any:
super().__init__(**_UpperCAmelCase )
lowerCAmelCase : List[Any] = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase : Any = get_size_dict(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = do_resize
lowerCAmelCase : List[Any] = size
lowerCAmelCase : Dict = resample
lowerCAmelCase : Tuple = do_center_crop
lowerCAmelCase : Optional[Any] = crop_size
lowerCAmelCase : Any = do_rescale
lowerCAmelCase : List[str] = rescale_factor
lowerCAmelCase : Optional[Any] = do_normalize
lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> List[Any]:
lowerCAmelCase : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase : str = get_resize_output_image_size(_UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> List[Any]:
lowerCAmelCase : Dict = get_size_dict(_UpperCAmelCase )
return center_crop(_UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Tuple:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> str:
lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Optional[int] = size if size is not None else self.size
lowerCAmelCase : int = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowerCAmelCase : List[str] = resample if resample is not None else self.resample
lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : str = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : List[str] = get_size_dict(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Dict = image_std if image_std is not None else self.image_std
lowerCAmelCase : List[str] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase : Union[str, Any] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase : int = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase : Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase : int = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase : Optional[int] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
lowerCAmelCase : List[Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowerCAmelCase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase : str =logging.getLogger(__name__)
@dataclass
class _a :
_UpperCamelCase: Dict = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase: str = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Dict = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Tuple = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase: Tuple = field(
default=_a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase: Dict = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase: Dict = field(
default=_a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _a :
_UpperCamelCase: int = field(default=_a , metadata={"help": "The input training data file (a text file)."} )
_UpperCamelCase: Tuple = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCamelCase: Optional[int] = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCamelCase: Any = field(
default=_a , metadata={"help": "The number of processes to use for the preprocessing."} , )
_UpperCamelCase: List[str] = field(
default=_a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: List[Any] = field(
default=_a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
_UpperCamelCase: Tuple = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase: List[str] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _snake_case ( self ) -> Tuple:
if self.train_file is not None:
lowerCAmelCase : Tuple = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
_UpperCamelCase: str = 42
_UpperCamelCase: str = True
_UpperCamelCase: int = None
_UpperCamelCase: Optional[int] = None
def __call__( self , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : int = """label""" if """label""" in features[0].keys() else """labels"""
lowerCAmelCase : Optional[int] = [feature.pop(_A ) for feature in features]
lowerCAmelCase : Optional[Any] = len(_A )
lowerCAmelCase : List[str] = len(features[0]["""input_ids"""] )
lowerCAmelCase : Dict = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
lowerCAmelCase : Optional[int] = list(chain(*_A ) )
lowerCAmelCase : Union[str, Any] = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
lowerCAmelCase : str = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase : Union[str, Any] = torch.tensor(_A , dtype=torch.intaa )
return batch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" ,UpperCAmelCase__ ,UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase : Optional[int] = {}
if data_args.train_file is not None:
lowerCAmelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase : Any = data_args.validation_file
lowerCAmelCase : Any = data_args.train_file.split(""".""" )[-1]
lowerCAmelCase : int = load_dataset(
UpperCAmelCase__ ,data_files=UpperCAmelCase__ ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase : Union[str, Any] = load_dataset(
"""swag""" ,"""regular""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=UpperCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase : Optional[int] = [F"""ending{i}""" for i in range(4 )]
lowerCAmelCase : List[Any] = """sent1"""
lowerCAmelCase : Any = """sent2"""
if data_args.max_seq_length is None:
lowerCAmelCase : List[Any] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
lowerCAmelCase : List[Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase : str = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[Any] = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase : Tuple = examples[question_header_name]
lowerCAmelCase : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCAmelCase__ )
]
# Flatten out
lowerCAmelCase : str = list(chain(*UpperCAmelCase__ ) )
lowerCAmelCase : Optional[int] = list(chain(*UpperCAmelCase__ ) )
# Tokenize
lowerCAmelCase : int = tokenizer(
UpperCAmelCase__ ,UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding="""max_length""" if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(UpperCAmelCase__ ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowerCAmelCase : List[str] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowerCAmelCase : Dict = min(len(UpperCAmelCase__ ) ,data_args.max_train_samples )
lowerCAmelCase : Any = train_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase : str = train_dataset.map(
UpperCAmelCase__ ,batched=UpperCAmelCase__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowerCAmelCase : Union[str, Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowerCAmelCase : int = min(len(UpperCAmelCase__ ) ,data_args.max_eval_samples )
lowerCAmelCase : Dict = eval_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase : List[Any] = eval_dataset.map(
UpperCAmelCase__ ,batched=UpperCAmelCase__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
lowerCAmelCase : Union[str, Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase__ ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase , lowerCAmelCase : Dict = eval_predictions
lowerCAmelCase : str = np.argmax(UpperCAmelCase__ ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase : List[Any] = Trainer(
model=UpperCAmelCase__ ,args=UpperCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=UpperCAmelCase__ ,data_collator=UpperCAmelCase__ ,compute_metrics=UpperCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : Optional[Any] = last_checkpoint
lowerCAmelCase : Dict = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase : List[str] = train_result.metrics
lowerCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
lowerCAmelCase : Tuple = min(UpperCAmelCase__ ,len(UpperCAmelCase__ ) )
trainer.log_metrics("""train""" ,UpperCAmelCase__ )
trainer.save_metrics("""train""" ,UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : str = trainer.evaluate()
lowerCAmelCase : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
lowerCAmelCase : Tuple = min(UpperCAmelCase__ ,len(UpperCAmelCase__ ) )
trainer.log_metrics("""eval""" ,UpperCAmelCase__ )
trainer.save_metrics("""eval""" ,UpperCAmelCase__ )
lowerCAmelCase : List[str] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
import requests
lowerCAmelCase : Optional[Any] ='https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] ,1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase : str =logging.get_logger(__name__)
class _a ( snake_case_ ):
def __init__( self , *lowercase_ , **lowercase_ ) -> Union[str, Any]:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
import os
import numpy
import onnx
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : int = a.name
lowerCAmelCase : Optional[Any] = b.name
lowerCAmelCase : Tuple = """"""
lowerCAmelCase : Dict = """"""
lowerCAmelCase : Union[str, Any] = a == b
lowerCAmelCase : str = name_a
lowerCAmelCase : List[str] = name_b
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_graph_replace_input_with(node_proto.attribute[1].g ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = list(model.graph.initializer )
lowerCAmelCase : Dict = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase : List[str] = inits[i].name
lowerCAmelCase : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = onnx.load(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Dict = list(model.graph.initializer )
lowerCAmelCase : str = set()
lowerCAmelCase : List[str] = {}
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Tuple = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if i in dup_set:
continue
for j in range(i + 1 ,len(SCREAMING_SNAKE_CASE__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] ,inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE__ )
dup_set.add(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = inits[j].data_type
lowerCAmelCase : str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ ,SCREAMING_SNAKE_CASE__ )
total_reduced_size += mem_size
lowerCAmelCase : str = inits[i].name
lowerCAmelCase : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ ,total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 ,"""GB""" )
lowerCAmelCase : List[str] = sorted(SCREAMING_SNAKE_CASE__ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = """optimized_""" + model_file_name
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
onnx.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return new_model
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : Dict ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : Union[str, Any] ={
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase : Optional[int] ={
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
lowerCAmelCase : List[Any] ={
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class _a ( __lowerCAmelCase ):
_UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: List[str] = ElectraTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase : Dict = getattr(lowerCamelCase__ , normalizer_state.pop("""type""" ) )
lowerCAmelCase : Optional[Any] = do_lower_case
lowerCAmelCase : int = strip_accents
lowerCAmelCase : Any = tokenize_chinese_chars
lowerCAmelCase : Optional[int] = normalizer_class(**lowerCamelCase__ )
lowerCAmelCase : Dict = do_lower_case
def _snake_case ( self , lowercase_ , lowercase_=None ) -> Union[str, Any]:
lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> List[int]:
lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
lowerCAmelCase : Any = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase : List[str] ='src/transformers'
# Matches is_xxx_available()
lowerCAmelCase : Optional[int] =re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase : Dict =re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase : int =re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase : Tuple =re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase : int =re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase : List[Any] =re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase : Optional[int] =re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase : Dict =re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase : Union[str, Any] =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase : List[Any] =re.compile(r'^\s*try:')
# Catches a line with else:
lowerCAmelCase : List[Any] =re.compile(r'^\s*else:')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if _re_test_backend.search(snake_case_ ) is None:
return None
lowerCAmelCase : Optional[Any] = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with open(snake_case_ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
lowerCAmelCase : str = f.readlines()
lowerCAmelCase : Union[str, Any] = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : int = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
lowerCAmelCase : Optional[int] = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
lowerCAmelCase : Optional[Any] = re.findall("""\[([^\]]+)\]""" ,snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowerCAmelCase : Optional[Any] = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
lowerCAmelCase : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
lowerCAmelCase : Tuple = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(""", """ )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
lowerCAmelCase : List[Any] = _re_between_brackets.search(snake_case_ ).groups()[0].split(""", """ )
lowerCAmelCase : int = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Dict = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : Any = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowerCAmelCase : Dict = lines[line_index]
lowerCAmelCase : Optional[int] = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE__ ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Optional[int] = []
for key in import_dict_objects.keys():
lowerCAmelCase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Any = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Dict = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(snake_case_ ,"""__init__.py""" )
lowerCAmelCase : List[Any] = parse_init(snake_case_ )
if objects is not None:
lowerCAmelCase : int = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
lowerCAmelCase : Dict = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError("""\n\n""".join(snake_case_ ) )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : str = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowerCAmelCase : str = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep ,""".""" )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
lowerCAmelCase : Dict = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(snake_case_ )
return submodules
lowerCAmelCase : Optional[Any] =[
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = importlib.util.spec_from_file_location(
"""transformers""" ,os.path.join(snake_case_ ,"""__init__.py""" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
lowerCAmelCase : str = spec.loader.load_module()
lowerCAmelCase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case_ ) > 0:
lowerCAmelCase : Optional[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Any ={
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =[
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase : Any ={
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[int] = """albert"""
def __init__( self , lowercase_=30000 , lowercase_=128 , lowercase_=4096 , lowercase_=12 , lowercase_=1 , lowercase_=64 , lowercase_=16384 , lowercase_=1 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=0 , lowercase_=512 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=0.1 , lowercase_="absolute" , lowercase_=0 , lowercase_=2 , lowercase_=3 , **lowercase_ , ) -> int:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Any = embedding_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : str = num_hidden_groups
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : int = inner_group_num
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : str = classifier_dropout_prob
lowerCAmelCase : Optional[int] = position_embedding_type
class _a ( snake_case_ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : Optional[Any] =get_logger(__name__)
class _a :
def __init__( self , lowercase_ = None ) -> Tuple:
lowerCAmelCase : Dict = (
os.path.join(lowercase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase : Dict = Extractor
def _snake_case ( self , lowercase_ ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase : Any = os.path.abspath(lowercase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase_ ) )
def _snake_case ( self , lowercase_ , lowercase_ ) -> bool:
return force_extract or (
not os.path.isfile(lowercase_ ) and not (os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ))
)
def _snake_case ( self , lowercase_ , lowercase_ = False ) -> str:
lowerCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(lowercase_ )
if not extractor_format:
return input_path
lowerCAmelCase : List[str] = self._get_output_path(lowercase_ )
if self._do_extract(lowercase_ , lowercase_ ):
self.extractor.extract(lowercase_ , lowercase_ , lowercase_ )
return output_path
class _a ( snake_case_ ):
@classmethod
@abstractmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> bool:
...
@staticmethod
@abstractmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
...
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: List[bytes] = []
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> Optional[int]:
with open(lowercase_ , """rb""" ) as f:
return f.read(lowercase_ )
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ = b"" ) -> bool:
if not magic_number:
lowerCAmelCase : str = max(len(lowercase_ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCAmelCase : List[str] = cls.read_magic_number(lowercase_ , lowercase_ )
except OSError:
return False
return any(magic_number.startswith(lowercase_ ) for cls_magic_number in cls.magic_numbers )
class _a ( snake_case_ ):
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> bool:
return tarfile.is_tarfile(lowercase_ )
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> Any:
def resolved(lowercase_ ) -> str:
return os.path.realpath(os.path.abspath(lowercase_ ) )
def badpath(lowercase_ , lowercase_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase_ , lowercase_ ) ).startswith(lowercase_ )
def badlink(lowercase_ , lowercase_ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase : List[Any] = resolved(os.path.join(lowercase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase_ )
lowerCAmelCase : str = resolved(lowercase_ )
for finfo in members:
if badpath(finfo.name , lowercase_ ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(lowercase_ , lowercase_ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(lowercase_ , lowercase_ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : Optional[int] = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ , members=TarExtractor.safemembers(lowercase_ , lowercase_ ) )
tar_file.close()
class _a ( snake_case_ ):
_UpperCamelCase: List[Any] = [b"\x1F\x8B"]
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
with gzip.open(lowercase_ , """rb""" ) as gzip_file:
with open(lowercase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ = b"" ) -> bool:
if super().is_extractable(lowercase_ , magic_number=lowercase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase_ , """rb""" ) as fp:
lowerCAmelCase : Union[str, Any] = _EndRecData(lowercase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase : Union[str, Any] = fp.read(lowercase_ ) # CD is where we expect it to be
if len(lowercase_ ) == sizeCentralDir:
lowerCAmelCase : Any = struct.unpack(lowercase_ , lowercase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with zipfile.ZipFile(lowercase_ , """r""" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
class _a ( snake_case_ ):
_UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
with lzma.open(lowercase_ ) as compressed_file:
with open(lowercase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: List[Any] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : Optional[Any] = rarfile.RarFile(lowercase_ )
rf.extractall(lowercase_ )
rf.close()
class _a ( snake_case_ ):
_UpperCamelCase: Dict = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCAmelCase : List[Any] = zstd.ZstdDecompressor()
with open(lowercase_ , """rb""" ) as ifh, open(lowercase_ , """wb""" ) as ofh:
dctx.copy_stream(lowercase_ , lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: List[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
with bza.open(lowercase_ , """rb""" ) as compressed_file:
with open(lowercase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Optional[int] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with pyazr.SevenZipFile(lowercase_ , """r""" ) as archive:
archive.extractall(lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase_ , """rb""" ) as compressed_file:
with open(lowercase_ , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_ )
class _a :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls ) -> List[Any]:
return max(
len(lowercase_ )
for extractor in cls.extractors.values()
if issubclass(lowercase_ , lowercase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( lowercase_ , lowercase_ ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase_ , magic_number_length=lowercase_ )
except OSError:
return b""
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase_ , )
lowerCAmelCase : int = cls.infer_extractor_format(lowercase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls , lowercase_ ) -> str: # <Added version="2.4.0"/>
lowerCAmelCase : Optional[Any] = cls._get_magic_number_max_length()
lowerCAmelCase : Optional[int] = cls._read_magic_number(lowercase_ , lowercase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase_ , magic_number=lowercase_ ):
return extractor_format
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase_ ) , exist_ok=lowercase_ )
# Prevent parallel extractions
lowerCAmelCase : Tuple = str(Path(lowercase_ ).with_suffix(""".lock""" ) )
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ , ignore_errors=lowercase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase_ , lowercase_ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase_ , )
lowerCAmelCase : Optional[Any] = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCAmelCase : Union[str, Any] = cls.extractors[extractor_format]
return extractor.extract(lowercase_ , lowercase_ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase_ ):
return extractor.extract(lowercase_ , lowercase_ )
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCAmelCase : Tuple = parser.parse_args()
lowerCAmelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : List[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : List[str] ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=8 ):
'''simple docstring'''
lowerCAmelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=5_1_2 ,SCREAMING_SNAKE_CASE__=5_1_2 ):
'''simple docstring'''
lowerCAmelCase : List[Any] = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 127.5 - 1
lowerCAmelCase : Optional[int] = np.transpose(SCREAMING_SNAKE_CASE__ ,[2, 0, 1] )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
return image
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ) -> int:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
lowerCAmelCase : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> str:
# get the original timestep using init_timestep
lowerCAmelCase : Dict = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase : Optional[int] = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Dict:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
lowerCAmelCase : List[Any] = image.to(device=lowercase_ , dtype=lowercase_ )
lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCAmelCase : int = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
lowerCAmelCase : List[str] = torch.cat(lowercase_ , dim=0 )
else:
lowerCAmelCase : List[str] = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
lowerCAmelCase : Tuple = self.movq.config.scaling_factor * init_latents
lowerCAmelCase : Optional[int] = torch.cat([init_latents] , dim=0 )
lowerCAmelCase : Union[str, Any] = init_latents.shape
lowerCAmelCase : Union[str, Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
lowerCAmelCase : Tuple = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Optional[int] = init_latents
return latents
def _snake_case ( self , lowercase_=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def _snake_case ( self , lowercase_=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
lowerCAmelCase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> int:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 0.3 , lowercase_ = 1 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = self._execution_device
lowerCAmelCase : Dict = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : str = torch.cat(lowercase_ , dim=0 )
lowerCAmelCase : List[str] = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : str = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase : Union[str, Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
lowerCAmelCase : Tuple = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Optional[int] = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowerCAmelCase : Any = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
lowerCAmelCase : Optional[int] = image.to(dtype=image_embeds.dtype , device=lowercase_ )
lowerCAmelCase : List[Any] = self.movq.encode(lowercase_ )["""latents"""]
lowerCAmelCase : int = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
lowerCAmelCase : Optional[Any] = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCAmelCase : List[str] = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
lowerCAmelCase : List[Any] = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : int = {"""image_embeds""": image_embeds}
lowerCAmelCase : Tuple = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase : Any = noise_pred.chunk(2 )
lowerCAmelCase : List[Any] = variance_pred.chunk(2 )
lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Any = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
lowerCAmelCase : Optional[int] = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase : Optional[Any] = image * 0.5 + 0.5
lowerCAmelCase : Dict = image.clamp(0 , 1 )
lowerCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : str = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
from collections import defaultdict
from math import gcd
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_5_0_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,SCREAMING_SNAKE_CASE__ ,2 ):
if gcd(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) > 1:
continue
lowerCAmelCase : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE__ ,limit + 1 ,SCREAMING_SNAKE_CASE__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase : int ={
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = limit + 1
lowerCAmelCase : Dict = [0] * limit
for first_term in range(1 ,SCREAMING_SNAKE_CASE__ ):
for n in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Tuple = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase : List[str] = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _a :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.0_2 , lowercase_=3 , lowercase_=None , lowercase_=2 , ) -> Optional[Any]:
lowerCAmelCase : Dict = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : List[str] = image_size
lowerCAmelCase : Dict = patch_size
lowerCAmelCase : str = num_channels
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Any = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Tuple = scope
lowerCAmelCase : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase : Any = (image_size // patch_size) ** 2
lowerCAmelCase : Tuple = num_patches + 2
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : int = None
if self.use_labels:
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Tuple = TFDeiTModel(config=lowercase_ )
lowerCAmelCase : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
lowerCAmelCase : List[str] = TFDeiTForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase : str = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : int = 1
lowerCAmelCase : Any = TFDeiTForMaskedImageModeling(lowercase_ )
lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = self.type_sequence_label_size
lowerCAmelCase : Tuple = TFDeiTForImageClassification(lowercase_ )
lowerCAmelCase : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : List[str] = 1
lowerCAmelCase : Union[str, Any] = TFDeiTForImageClassification(lowercase_ )
lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : str = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCamelCase: Dict = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Optional[Any] = False
_UpperCamelCase: Tuple = False
_UpperCamelCase: Union[str, Any] = False
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = TFDeiTModelTester(self )
lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Dense ) )
def _snake_case ( self ) -> int:
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(lowercase_ )
lowerCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self ) -> int:
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=False ) -> List[str]:
lowerCAmelCase : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _snake_case ( self ) -> int:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = TFDeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Tuple:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=lowercase_ , return_tensors="""tf""" )
# forward pass
lowerCAmelCase : Any = model(**lowercase_ )
# verify the logits
lowerCAmelCase : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowerCAmelCase : Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 706 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _a ( snake_case_ ):
def _snake_case ( self ) -> int:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : int = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowercase_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase : Union[str, Any] = self._create_example_records()
lowerCAmelCase : Optional[int] = Dataset.from_list(lowercase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowercase_ ):
self.assertDictEqual(lowercase_ , example_records[i] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : List[Any] = self._create_example_records()
lowerCAmelCase : Optional[Any] = Dataset.from_list(lowercase_ )
lowerCAmelCase : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _snake_case ( self ) -> List[Any]: # checks what happens with missing columns
lowerCAmelCase : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase : List[Any] = Dataset.from_list(lowercase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _snake_case ( self ) -> List[Any]: # checks if the type can be inferred from the second record
lowerCAmelCase : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase : Optional[Any] = Dataset.from_list(lowercase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(lowercase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Dict ={
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "wavlm"
def __init__( self , lowercase_=32 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(512, 512, 512, 512, 512, 512, 512) , lowercase_=(5, 2, 2, 2, 2, 2, 2) , lowercase_=(10, 3, 3, 3, 3, 2, 2) , lowercase_=False , lowercase_=128 , lowercase_=16 , lowercase_=320 , lowercase_=800 , lowercase_=False , lowercase_=True , lowercase_=0.0_5 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=320 , lowercase_=2 , lowercase_=0.1 , lowercase_=100 , lowercase_=256 , lowercase_=256 , lowercase_=0.1 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=256 , lowercase_=(512, 512, 512, 512, 1500) , lowercase_=(5, 3, 3, 1, 1) , lowercase_=(1, 2, 3, 1, 1) , lowercase_=512 , lowercase_=80 , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=False , lowercase_=3 , lowercase_=2 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> str:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Union[str, Any] = feat_extract_norm
lowerCAmelCase : Union[str, Any] = feat_extract_activation
lowerCAmelCase : Any = list(lowercase_ )
lowerCAmelCase : Optional[Any] = list(lowercase_ )
lowerCAmelCase : Dict = list(lowercase_ )
lowerCAmelCase : List[str] = conv_bias
lowerCAmelCase : Union[str, Any] = num_buckets
lowerCAmelCase : List[str] = max_bucket_distance
lowerCAmelCase : Union[str, Any] = num_conv_pos_embeddings
lowerCAmelCase : Any = num_conv_pos_embedding_groups
lowerCAmelCase : Union[str, Any] = len(self.conv_dim )
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : str = hidden_dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : List[Any] = feat_proj_dropout
lowerCAmelCase : Any = final_dropout
lowerCAmelCase : Optional[Any] = layerdrop
lowerCAmelCase : Tuple = layer_norm_eps
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[Any] = num_ctc_classes
lowerCAmelCase : int = vocab_size
lowerCAmelCase : List[Any] = do_stable_layer_norm
lowerCAmelCase : int = use_weighted_layer_sum
lowerCAmelCase : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : Any = apply_spec_augment
lowerCAmelCase : List[str] = mask_time_prob
lowerCAmelCase : List[str] = mask_time_length
lowerCAmelCase : int = mask_time_min_masks
lowerCAmelCase : str = mask_feature_prob
lowerCAmelCase : int = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase : Dict = num_codevectors_per_group
lowerCAmelCase : int = num_codevector_groups
lowerCAmelCase : int = contrastive_logits_temperature
lowerCAmelCase : Optional[Any] = num_negatives
lowerCAmelCase : Optional[Any] = codevector_dim
lowerCAmelCase : Dict = proj_codevector_dim
lowerCAmelCase : Optional[Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase : Optional[Any] = ctc_loss_reduction
lowerCAmelCase : Optional[int] = ctc_zero_infinity
# adapter
lowerCAmelCase : Optional[Any] = add_adapter
lowerCAmelCase : str = adapter_kernel_size
lowerCAmelCase : Optional[Any] = adapter_stride
lowerCAmelCase : Optional[Any] = num_adapter_layers
lowerCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : Any = list(lowercase_ )
lowerCAmelCase : Union[str, Any] = list(lowercase_ )
lowerCAmelCase : List[Any] = list(lowercase_ )
lowerCAmelCase : Tuple = xvector_output_dim
@property
def _snake_case ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = SamImageProcessor()
lowerCAmelCase : Union[str, Any] = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowerCAmelCase : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : Any = image_processor(lowercase_ , return_tensors="""np""" )
lowerCAmelCase : Optional[int] = processor(images=lowercase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : List[str] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : str = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase : str = [[1764, 2646]]
lowerCAmelCase : Optional[int] = [[683, 1024]]
lowerCAmelCase : Optional[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Optional[int] = processor.post_process_masks(
lowercase_ , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase : Tuple = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase : Dict = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(lowercase_ ):
lowerCAmelCase : Dict = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) )
@require_vision
@require_tf
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
lowerCAmelCase : Dict = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = SamImageProcessor()
lowerCAmelCase : List[str] = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Any:
lowerCAmelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> int:
lowerCAmelCase : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowerCAmelCase : List[str] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Tuple = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : str = self.prepare_image_inputs()
lowerCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors="""np""" )
lowerCAmelCase : Optional[Any] = processor(images=lowercase_ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : List[str] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Optional[int] = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase : Optional[Any] = [[1764, 2646]]
lowerCAmelCase : List[str] = [[683, 1024]]
lowerCAmelCase : List[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Any = processor.post_process_masks(
lowercase_ , tf.convert_to_tensor(lowercase_ ) , tf.convert_to_tensor(lowercase_ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase : List[Any] = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase : List[Any] = processor.post_process_masks(
lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : str = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase : Union[str, Any] = processor.post_process_masks(
lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase : Dict = tempfile.mkdtemp()
lowerCAmelCase : List[Any] = SamImageProcessor()
lowerCAmelCase : int = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase : str = [tf.convert_to_tensor(lowercase_ )]
lowerCAmelCase : str = [torch.tensor(lowercase_ )]
lowerCAmelCase : Union[str, Any] = [[1764, 2646]]
lowerCAmelCase : int = [[683, 1024]]
lowerCAmelCase : List[str] = processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , return_tensors="""tf""" )
lowerCAmelCase : Optional[int] = processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Dict = self.get_image_processor()
lowerCAmelCase : str = SamProcessor(image_processor=lowercase_ )
lowerCAmelCase : Any = self.prepare_image_inputs()
lowerCAmelCase : Dict = image_processor(lowercase_ , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCAmelCase : Tuple = processor(images=lowercase_ , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors="""tf""" )["""pixel_values"""].numpy()
lowerCAmelCase : Any = processor(images=lowercase_ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ='Hello, World!'
lowerCAmelCase : Tuple ='en_XX'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = Path("""data_bin""" )
lowerCAmelCase : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE__ ).parent ) ,checkpoint_file=Path(SCREAMING_SNAKE_CASE__ ).name ,_name="""xmod_base""" ,arch="""xmod_base""" ,task="""multilingual_masked_lm""" ,data_name_or_path=str(SCREAMING_SNAKE_CASE__ ) ,bpe="""sentencepiece""" ,sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """sentencepiece.bpe.model""" ) ,src_dict=str(data_dir / """dict.txt""" ) ,)
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = xmod.model.encoder.sentence_encoder
lowerCAmelCase : int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_1_4 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,"""bottleneck""" ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
lowerCAmelCase : Any = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = XmodForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase : str = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase : Any = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase : List[Any] = model.roberta.encoder.layer[i]
lowerCAmelCase : Dict = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase : Tuple = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
lowerCAmelCase : str = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase : Any = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase : Tuple = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase : Dict = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase : Dict = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase : List[str] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
lowerCAmelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase : str = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase : Optional[int] = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase : int = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
lowerCAmelCase : Tuple = xmod_layer.fca.weight
lowerCAmelCase : str = xmod_layer.fca.bias
# output
lowerCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
lowerCAmelCase : Any = xmod_layer.fca.weight
lowerCAmelCase : Optional[int] = xmod_layer.fca.bias
lowerCAmelCase : Tuple = xmod_layer.final_layer_norm.weight
lowerCAmelCase : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase : Union[str, Any] = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase : Any = bert_output.adapter_modules[lang_code]
lowerCAmelCase : str = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase : Tuple = from_adapter.fca.weight
lowerCAmelCase : Optional[int] = from_adapter.fca.bias
lowerCAmelCase : List[Any] = from_adapter.fca.weight
lowerCAmelCase : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase : Tuple = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].dense.weight
lowerCAmelCase : Any = xmod.model.classification_heads["""mnli"""].dense.bias
lowerCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight
lowerCAmelCase : int = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowerCAmelCase : Any = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase : int = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase : Optional[int] = xmod.model.encoder.lm_head.weight
lowerCAmelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase : Any = xmod.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
lowerCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""](xmod.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
lowerCAmelCase : Optional[Any] = xmod.model(SCREAMING_SNAKE_CASE__ ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
lowerCAmelCase : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase : Tuple = torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowerCAmelCase : int =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
if magnitude == 0:
lowerCAmelCase : Any = column
continue
lowerCAmelCase : Tuple = column / magnitude
# Subtract to cancel term
lowerCAmelCase : str = current_set[0]
lowerCAmelCase : Optional[Any] = [first_row]
lowerCAmelCase : Optional[Any] = current_set[1::]
for row in current_set:
lowerCAmelCase : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE__ )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCAmelCase : Any = final_set[0]
lowerCAmelCase : List[str] = []
lowerCAmelCase : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCAmelCase : Any = simplify(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = resultant
return final_set
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) + 1
if any(len(SCREAMING_SNAKE_CASE__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE__ ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCAmelCase : Dict = equations.copy()
if any(0 in row for row in data_set ):
lowerCAmelCase : List[str] = data_set.copy()
lowerCAmelCase : Optional[int] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ):
if 0 not in row:
lowerCAmelCase : int = data_set.pop(SCREAMING_SNAKE_CASE__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = data_set.copy()
lowerCAmelCase : Dict = simplify(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = simplified[::-1]
lowerCAmelCase : list = []
for row in simplified:
lowerCAmelCase : Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCAmelCase : Any = row.copy()[: len(SCREAMING_SNAKE_CASE__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
solutions.append(0 )
continue
lowerCAmelCase : List[str] = temp_row[1::]
lowerCAmelCase : List[str] = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE__ ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = emb.weight.shape
lowerCAmelCase : Any = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = emb.weight.data
return lin_layer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = {}
for old_key in state_dict.keys():
lowerCAmelCase : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : Any = key.replace("""moe_layer.experts.0""" ,F"""ffn.experts.expert_{expert_idx}""" )
else:
lowerCAmelCase : int = key.replace("""moe_layer.experts.""" ,"""ffn.experts.expert_""" )
if "gate" in key:
lowerCAmelCase : Tuple = key.replace(""".moe_layer.gate.wg""" ,""".ffn.router.classifier""" )
if "fc2" and "experts" not in key:
lowerCAmelCase : List[str] = key.replace(""".fc2.""" ,""".ffn.fc2.""" )
if "fc1" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace(""".fc1.""" ,""".ffn.fc1.""" )
if ".encoder_attn." in key:
lowerCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" ,""".cross_attention.""" )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : Any = key.replace("""encoder_attn_layer_norm""" ,"""cross_attention_layer_norm""" )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace("""final_layer_norm""" ,"""ff_layer_norm""" )
lowerCAmelCase : List[Any] = state_dict[old_key]
return new_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = WEIGHTS_NAME ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Dict = 0
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
for expert in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE__ )["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,weights_name.replace(""".bin""" ,F"""-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin""" ) )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE__ )[0]].dtype )
# Add the last block
lowerCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ ,weights_name.replace(""".bin""" ,F"""-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin""" ) )
lowerCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE__ ) == 1:
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Otherwise, let's build the index
lowerCAmelCase : int = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : str = weights_name.replace(""".bin""" ,F"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE__ ):05d}.bin""" )
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,weights_name.replace(""".bin""" ,F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
for key in shard:
lowerCAmelCase : Union[str, Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {"""total_size""": total_size}
lowerCAmelCase : Tuple = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ,"""w""" ,encoding="""utf-8""" ) as f:
lowerCAmelCase : Tuple = json.dumps(SCREAMING_SNAKE_CASE__ ,indent=2 ,sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE__ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase : Dict =parser.parse_args()
lowerCAmelCase : List[str] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase : Any =NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase : int =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _a ( unittest.TestCase ):
def _snake_case ( self , lowercase_ , lowercase_ ) -> Optional[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"""
def _snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self , lowercase_=0 , lowercase_=(4, 4, 64, 64) , lowercase_=False ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : int = jnp.array(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) , dtype=lowercase_ )
return image
def _snake_case ( self , lowercase_=False , lowercase_="CompVis/stable-diffusion-v1-4" ) -> Tuple:
lowerCAmelCase : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Optional[Any] = """bf16""" if fpaa else None
lowerCAmelCase : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
lowercase_ , subfolder="""unet""" , dtype=lowercase_ , revision=lowercase_ )
return model, params
def _snake_case ( self , lowercase_=0 , lowercase_=(4, 77, 768) , lowercase_=False ) -> Dict:
lowerCAmelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase : Any = jnp.array(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) , dtype=lowercase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
lowerCAmelCase : List[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowercase_ )
lowerCAmelCase : List[Any] = self.get_latents(lowercase_ , fpaa=lowercase_ )
lowerCAmelCase : Optional[Any] = self.get_encoder_hidden_states(lowercase_ , fpaa=lowercase_ )
lowerCAmelCase : Union[str, Any] = model.apply(
{"""params""": params} , lowercase_ , jnp.array(lowercase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowercase_ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase : List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase : Any = jnp.array(lowercase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
lowerCAmelCase : List[Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowercase_ )
lowerCAmelCase : List[str] = self.get_latents(lowercase_ , shape=(4, 4, 96, 96) , fpaa=lowercase_ )
lowerCAmelCase : Optional[int] = self.get_encoder_hidden_states(lowercase_ , shape=(4, 77, 1024) , fpaa=lowercase_ )
lowerCAmelCase : Optional[Any] = model.apply(
{"""params""": params} , lowercase_ , jnp.array(lowercase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowercase_ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase : List[str] = jnp.array(lowercase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase_ , lowercase_ , atol=1e-2 )
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return "".join(sorted(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return word_by_signature[signature(SCREAMING_SNAKE_CASE__ )]
lowerCAmelCase : str =Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCAmelCase : Union[str, Any] =sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase : int =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase : Any ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
lowerCAmelCase : Dict = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE__ ) == len(set(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowerCAmelCase : List[str] =input('Enter a string ').strip()
lowerCAmelCase : int =is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
@staticmethod
def _snake_case ( *lowercase_ , **lowercase_ ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class _a ( unittest.TestCase ):
_UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> str:
lowerCAmelCase : Dict = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
lowerCAmelCase : Any = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _snake_case ( self , lowercase_ , lowercase_ ) -> Dict:
lowerCAmelCase : Dict = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase : Any = len(lowercase_ )
self.assertGreater(lowercase_ , 0 )
self.assertEqual(
lowercase_ , [
{
"""score""": ANY(lowercase_ ),
"""label""": ANY(lowercase_ ),
"""box""": {"""xmin""": ANY(lowercase_ ), """ymin""": ANY(lowercase_ ), """xmax""": ANY(lowercase_ ), """ymax""": ANY(lowercase_ )},
}
for i in range(lowercase_ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _snake_case ( self ) -> List[str]:
pass
@require_torch
def _snake_case ( self ) -> Any:
lowerCAmelCase : int = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
lowerCAmelCase : Optional[Any] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
lowerCAmelCase : Tuple = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
lowerCAmelCase : Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
lowerCAmelCase : Union[str, Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _snake_case ( self ) -> Any:
pass
@require_torch
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : List[str] = 0.2
lowerCAmelCase : str = pipeline("""zero-shot-object-detection""" )
lowerCAmelCase : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : Optional[int] = pipeline("""zero-shot-object-detection""" )
lowerCAmelCase : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> None:
if len(lowercase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
lowerCAmelCase : list[float] = list(lowercase_ )
lowerCAmelCase : Dict = degree
def __add__( self , lowercase_ ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCAmelCase : Tuple = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase_ )
else:
lowerCAmelCase : int = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase_ )
def __sub__( self , lowercase_ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowercase_ ) -> Polynomial:
lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase_ )
def _snake_case ( self , lowercase_ ) -> int | float:
lowerCAmelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
lowerCAmelCase : int = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def _snake_case ( self ) -> Polynomial:
lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCAmelCase : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase_ )
def _snake_case ( self , lowercase_ = 0 ) -> Polynomial:
lowerCAmelCase : list[float] = [0] * (self.degree + 2)
lowerCAmelCase : List[Any] = constant
for i in range(self.degree + 1 ):
lowerCAmelCase : Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase_ )
def __eq__( self , lowercase_ ) -> bool:
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowercase_ ) -> bool:
return not self.__eq__(lowercase_ )
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase : Dict = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = 0
lowerCAmelCase : Dict = 2
while digits < n:
index += 1
lowerCAmelCase : Union[str, Any] = len(str(fibonacci(SCREAMING_SNAKE_CASE__ ) ) )
return index
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = ["image_processor", "tokenizer"]
_UpperCamelCase: Dict = "BlipImageProcessor"
_UpperCamelCase: Tuple = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowercase_ , lowercase_ ) -> Tuple:
lowerCAmelCase : Tuple = False
super().__init__(lowercase_ , lowercase_ )
lowerCAmelCase : Any = self.image_processor
def __call__( self , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase : Optional[Any] = self.tokenizer
lowerCAmelCase : Dict = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
return text_encoding
# add pixel_values
lowerCAmelCase : List[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ )
if text is not None:
lowerCAmelCase : Tuple = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
else:
lowerCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowercase_ )
return encoding_image_processor
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> str:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> List[str]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : int = self.tokenizer.model_input_names
lowerCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( snake_case_ ):
_UpperCamelCase: Union[str, Any] = ["image_processor", "tokenizer"]
_UpperCamelCase: str = "ChineseCLIPImageProcessor"
_UpperCamelCase: List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase_ , )
lowerCAmelCase : Optional[int] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase_ , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processor
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> List[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase : Union[str, Any] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowerCAmelCase : Any = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowerCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self , *lowercase_ , **lowercase_ ) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[Any] = self.tokenizer.model_input_names
lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self ) -> Dict:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , )
return self.image_processor_class
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index == r:
for j in range(SCREAMING_SNAKE_CASE__ ):
print(data[j] ,end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase : str = arr[i]
combination_util(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,index + 1 ,SCREAMING_SNAKE_CASE__ ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0 ,SCREAMING_SNAKE_CASE__ ,0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCAmelCase : int =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 0 |
import argparse
import json
import subprocess
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Optional[int] = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
lowerCAmelCase : int = subprocess.run(SCREAMING_SNAKE_CASE__ ,shell=SCREAMING_SNAKE_CASE__ ,stdout=subprocess.PIPE )
lowerCAmelCase : Optional[int] = output.stdout.decode("""utf-8""" )
lowerCAmelCase : Tuple = json.loads(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase : Union[str, Any] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return values.split(""",""" )
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
import os
from pathlib import Path
def _UpperCAmelCase ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
lowerCAmelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCAmelCase : Union[str, Any] = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" ,"""ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" ,"""ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" ,SCREAMING_SNAKE_CASE__ ,with_cuda=SCREAMING_SNAKE_CASE__ ,extra_include_paths=[str(SCREAMING_SNAKE_CASE__ )] ,extra_cflags=["""-DWITH_CUDA=1"""] ,extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase : List[str] ={'UserAgent': UserAgent().random}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = script.contents[0]
lowerCAmelCase : int = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _a :
def __init__( self , lowercase_ ) -> Tuple:
lowerCAmelCase : List[str] = f"""https://www.instagram.com/{username}/"""
lowerCAmelCase : str = self.get_json()
def _snake_case ( self ) -> dict:
lowerCAmelCase : Tuple = requests.get(self.url , headers=lowercase_ ).text
lowerCAmelCase : int = BeautifulSoup(lowercase_ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _snake_case ( self ) -> str:
return self.user_data["username"]
@property
def _snake_case ( self ) -> str:
return self.user_data["full_name"]
@property
def _snake_case ( self ) -> str:
return self.user_data["biography"]
@property
def _snake_case ( self ) -> str:
return self.user_data["business_email"]
@property
def _snake_case ( self ) -> str:
return self.user_data["external_url"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _snake_case ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _snake_case ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _snake_case ( self ) -> bool:
return self.user_data["is_verified"]
@property
def _snake_case ( self ) -> bool:
return self.user_data["is_private"]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = "github" ):
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCAmelCase : int = InstagramUser(SCREAMING_SNAKE_CASE__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,SCREAMING_SNAKE_CASE__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : int =InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase : str =[
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Any = True
while ask_again:
lowerCAmelCase : Any = input(SCREAMING_SNAKE_CASE__ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE__ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=[] ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
lowerCAmelCase : str = BulletMenu(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = menu.run(default_choice=SCREAMING_SNAKE_CASE__ )
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = int(SCREAMING_SNAKE_CASE__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = int(SCREAMING_SNAKE_CASE__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = int(SCREAMING_SNAKE_CASE__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
lowerCAmelCase : str = super()._format_usage(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : int = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase : str =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = "blenderbot-small"
_UpperCamelCase: str = ["past_key_values"]
_UpperCamelCase: Optional[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowercase_=50265 , lowercase_=512 , lowercase_=8 , lowercase_=2048 , lowercase_=16 , lowercase_=8 , lowercase_=2048 , lowercase_=16 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=512 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1 , lowercase_=False , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=2 , **lowercase_ , ) -> str:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : List[Any] = d_model
lowerCAmelCase : int = encoder_ffn_dim
lowerCAmelCase : Union[str, Any] = encoder_layers
lowerCAmelCase : List[Any] = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = decoder_attention_heads
lowerCAmelCase : int = dropout
lowerCAmelCase : Tuple = attention_dropout
lowerCAmelCase : List[Any] = activation_dropout
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : str = init_std
lowerCAmelCase : str = encoder_layerdrop
lowerCAmelCase : Tuple = decoder_layerdrop
lowerCAmelCase : str = use_cache
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class _a ( snake_case_ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase : List[str] = {0: """batch"""}
lowerCAmelCase : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(lowercase_ ):
lowerCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCAmelCase : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : Optional[int] = super().outputs
else:
lowerCAmelCase : int = super(lowercase_ , self ).outputs
if self.use_past:
lowerCAmelCase : List[Any] = self.num_layers
for i in range(lowercase_ ):
lowerCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
lowerCAmelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
lowerCAmelCase : str = seq_length if not self.use_past else 1
lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Any = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase : Optional[Any] = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
lowerCAmelCase : List[str] = common_inputs["""decoder_input_ids"""].shape[1]
lowerCAmelCase : List[str] = self.num_attention_heads
lowerCAmelCase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase : int = decoder_seq_length + 3
lowerCAmelCase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
lowerCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase : Optional[Any] = self.num_layers
lowerCAmelCase : Tuple = min(lowercase_ , lowercase_ )
lowerCAmelCase : Dict = max(lowercase_ , lowercase_ ) - min_num_layers
lowerCAmelCase : Any = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
lowerCAmelCase : Optional[int] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
lowerCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase : List[Any] = seqlen + 2
lowerCAmelCase : List[str] = self.num_layers
lowerCAmelCase : str = self.num_attention_heads
lowerCAmelCase : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase : int = common_inputs["""attention_mask"""].dtype
lowerCAmelCase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
lowerCAmelCase : str = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ )
lowerCAmelCase : List[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase : List[str] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase : Optional[Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
elif self.task == "causal-lm":
lowerCAmelCase : int = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase : int = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowerCAmelCase : Dict = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 706 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase : Union[str, Any] ={
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "retribert"
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=8 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=True , lowercase_=128 , lowercase_=0 , **lowercase_ , ) -> Tuple:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowerCAmelCase : int = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = share_encoders
lowerCAmelCase : List[str] = projection_dim
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
lowerCAmelCase : List[Any] =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE__ ,[] ,0 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _a ( snake_case_ ):
_UpperCamelCase: List[Any] = "unispeech-sat"
def __init__( self , lowercase_=32 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(512, 512, 512, 512, 512, 512, 512) , lowercase_=(5, 2, 2, 2, 2, 2, 2) , lowercase_=(10, 3, 3, 3, 3, 2, 2) , lowercase_=False , lowercase_=128 , lowercase_=16 , lowercase_=False , lowercase_=True , lowercase_=0.0_5 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_=320 , lowercase_=2 , lowercase_=0.1 , lowercase_=100 , lowercase_=256 , lowercase_=256 , lowercase_=0.1 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=256 , lowercase_=(512, 512, 512, 512, 1500) , lowercase_=(5, 3, 3, 1, 1) , lowercase_=(1, 2, 3, 1, 1) , lowercase_=512 , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=504 , **lowercase_ , ) -> Optional[int]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Dict = feat_extract_norm
lowerCAmelCase : Tuple = feat_extract_activation
lowerCAmelCase : List[Any] = list(lowercase_ )
lowerCAmelCase : Any = list(lowercase_ )
lowerCAmelCase : int = list(lowercase_ )
lowerCAmelCase : Tuple = conv_bias
lowerCAmelCase : List[str] = num_conv_pos_embeddings
lowerCAmelCase : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase : Optional[int] = len(self.conv_dim )
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = hidden_dropout
lowerCAmelCase : Any = attention_dropout
lowerCAmelCase : Optional[int] = activation_dropout
lowerCAmelCase : Any = feat_proj_dropout
lowerCAmelCase : Optional[Any] = final_dropout
lowerCAmelCase : Union[str, Any] = layerdrop
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Union[str, Any] = num_clusters
lowerCAmelCase : Optional[int] = do_stable_layer_norm
lowerCAmelCase : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : List[str] = apply_spec_augment
lowerCAmelCase : List[Any] = mask_time_prob
lowerCAmelCase : Optional[Any] = mask_time_length
lowerCAmelCase : Optional[Any] = mask_time_min_masks
lowerCAmelCase : str = mask_feature_prob
lowerCAmelCase : int = mask_feature_length
lowerCAmelCase : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase : List[str] = num_codevectors_per_group
lowerCAmelCase : int = num_codevector_groups
lowerCAmelCase : Dict = contrastive_logits_temperature
lowerCAmelCase : int = feat_quantizer_dropout
lowerCAmelCase : Union[str, Any] = num_negatives
lowerCAmelCase : List[Any] = codevector_dim
lowerCAmelCase : Tuple = proj_codevector_dim
lowerCAmelCase : List[str] = diversity_loss_weight
# ctc loss
lowerCAmelCase : Optional[int] = ctc_loss_reduction
lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : Union[str, Any] = list(lowercase_ )
lowerCAmelCase : Dict = list(lowercase_ )
lowerCAmelCase : Dict = list(lowercase_ )
lowerCAmelCase : Optional[Any] = xvector_output_dim
@property
def _snake_case ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
from math import sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0
for i in range(1 ,int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Any = sum(
i
for i in range(1 ,SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _a ( nn.Module ):
def __init__( self , lowercase_ = 16 , lowercase_ = 88 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = 32 , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = "geglu" , lowercase_ = None , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase_ , attention_head_dim=lowercase_ , in_channels=lowercase_ , num_layers=lowercase_ , dropout=lowercase_ , norm_num_groups=lowercase_ , cross_attention_dim=lowercase_ , attention_bias=lowercase_ , sample_size=lowercase_ , num_vector_embeds=lowercase_ , activation_fn=lowercase_ , num_embeds_ada_norm=lowercase_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase : Optional[int] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase : Optional[int] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase : Union[str, Any] = [1, 0]
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = True , ) -> str:
lowerCAmelCase : Dict = hidden_states
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase : Tuple = self.transformer_index_for_condition[i]
lowerCAmelCase : List[Any] = self.transformers[transformer_index](
lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase_ )
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__)
@dataclass
class _a :
_UpperCamelCase: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=snake_case_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=snake_case_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=snake_case_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class _a :
_UpperCamelCase: str = field(
default=snake_case_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase: str = field(
default=snake_case_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Train language if it is different from the evaluation language."} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase: Optional[bool] = field(
default=snake_case_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" ,SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Any = load_dataset(
"""xnli""" ,model_args.language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowerCAmelCase : Tuple = load_dataset(
"""xnli""" ,model_args.train_language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Union[str, Any] = train_dataset.features["""label"""].names
if training_args.do_eval:
lowerCAmelCase : List[Any] = load_dataset(
"""xnli""" ,model_args.language ,split="""validation""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Optional[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowerCAmelCase : List[Any] = load_dataset(
"""xnli""" ,model_args.language ,split="""test""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Tuple = predict_dataset.features["""label"""].names
# Labels
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE__ ,idalabel={str(SCREAMING_SNAKE_CASE__ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} ,finetuning_task="""xnli""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : Any = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Any = False
def preprocess_function(SCREAMING_SNAKE_CASE__ ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] ,examples["""hypothesis"""] ,padding=SCREAMING_SNAKE_CASE__ ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE__ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : List[Any] = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_train_samples )
lowerCAmelCase : List[str] = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowerCAmelCase : Union[str, Any] = train_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on train dataset""" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) ,3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_eval_samples )
lowerCAmelCase : Union[str, Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowerCAmelCase : Union[str, Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on validation dataset""" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ) ,data_args.max_predict_samples )
lowerCAmelCase : Dict = predict_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
lowerCAmelCase : Tuple = predict_dataset.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on prediction dataset""" ,)
# Get the metric function
lowerCAmelCase : str = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE__ ) else p.predictions
lowerCAmelCase : str = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=8 )
else:
lowerCAmelCase : Dict = None
# Initialize our Trainer
lowerCAmelCase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
lowerCAmelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : str = last_checkpoint
lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = train_result.metrics
lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowerCAmelCase : Tuple = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""train""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("""eval""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" ,SCREAMING_SNAKE_CASE__ )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase : List[Any] = trainer.predict(SCREAMING_SNAKE_CASE__ ,metric_key_prefix="""predict""" )
lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics("""predict""" ,SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""predict""" ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE__ ,axis=1 )
lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir ,"""predictions.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Dict = str(SCREAMING_SNAKE_CASE__ )
while len(SCREAMING_SNAKE_CASE__ ) != 1:
lowerCAmelCase : Any = [int(SCREAMING_SNAKE_CASE__ ) for i in num_string]
lowerCAmelCase : Optional[int] = 1
for i in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ):
total *= numbers[i]
lowerCAmelCase : int = str(SCREAMING_SNAKE_CASE__ )
steps += 1
return steps
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
while len(SCREAMING_SNAKE_CASE__ ) != 1:
lowerCAmelCase : List[Any] = [int(SCREAMING_SNAKE_CASE__ ) for i in num_string]
lowerCAmelCase : str = 0
for i in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ):
total += numbers[i]
lowerCAmelCase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : str = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : int = sylvester(number - 1 )
lowerCAmelCase : Optional[int] = num - 1
lowerCAmelCase : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
def _UpperCAmelCase ( ):
'''simple docstring'''
for n in range(1 ,1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Any = 2
while i * i <= n:
lowerCAmelCase : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _UpperCAmelCase ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : Optional[Any] ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=8 ):
'''simple docstring'''
lowerCAmelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
lowerCAmelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
if latents is None:
lowerCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase : Union[str, Any] = latents.to(lowercase_ )
lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , lowercase_=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase : str = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def _snake_case ( self , lowercase_=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase : Optional[Any] = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> List[str]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[str, Any]:
lowerCAmelCase : Tuple = self._execution_device
lowerCAmelCase : Optional[int] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Optional[Any] = torch.cat(lowercase_ , dim=0 )
lowerCAmelCase : Dict = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
lowerCAmelCase : Optional[Any] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
lowerCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
lowerCAmelCase : List[Any] = self.scheduler.timesteps
lowerCAmelCase : Optional[Any] = self.unet.config.in_channels
lowerCAmelCase : int = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
lowerCAmelCase : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
lowerCAmelCase : Any = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase : Any = noise_pred.chunk(2 )
lowerCAmelCase : Optional[Any] = variance_pred.chunk(2 )
lowerCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : List[Any] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
lowerCAmelCase : str = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase : str = image * 0.5 + 0.5
lowerCAmelCase : Optional[int] = image.clamp(0 , 1 )
lowerCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Optional[int] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : Union[str, Any] ={1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _a ( nn.Module ):
def __init__( self , lowercase_ ) -> Tuple:
super().__init__()
lowerCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=lowercase_ )
lowerCAmelCase : List[Any] = list(model.children() )[:-2]
lowerCAmelCase : int = nn.Sequential(*lowercase_ )
lowerCAmelCase : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _snake_case ( self , lowercase_ ) -> List[Any]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCAmelCase : int = self.pool(self.model(lowercase_ ) )
lowerCAmelCase : Optional[Any] = torch.flatten(lowercase_ , start_dim=2 )
lowerCAmelCase : Optional[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
lowerCAmelCase : Optional[Any] = [json.loads(lowercase_ ) for l in open(lowercase_ )]
lowerCAmelCase : List[str] = os.path.dirname(lowercase_ )
lowerCAmelCase : Union[str, Any] = tokenizer
lowerCAmelCase : Optional[int] = labels
lowerCAmelCase : int = len(lowercase_ )
lowerCAmelCase : Optional[int] = max_seq_length
lowerCAmelCase : Any = transforms
def __len__( self ) -> Union[str, Any]:
return len(self.data )
def __getitem__( self , lowercase_ ) -> Tuple:
lowerCAmelCase : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowercase_ ) )
lowerCAmelCase : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase : Optional[Any] = sentence[: self.max_seq_length]
lowerCAmelCase : List[Any] = torch.zeros(self.n_classes )
lowerCAmelCase : int = 1
lowerCAmelCase : str = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
lowerCAmelCase : str = self.transforms(lowercase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Optional[Any] = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [len(row["""sentence"""] ) for row in batch]
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = torch.zeros(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=torch.long )
lowerCAmelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ):
lowerCAmelCase : List[Any] = input_row["""sentence"""]
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Dict = torch.stack([row["""image"""] for row in batch] )
lowerCAmelCase : Union[str, Any] = torch.stack([row["""label"""] for row in batch] )
lowerCAmelCase : Dict = torch.stack([row["""image_start_token"""] for row in batch] )
lowerCAmelCase : Any = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _UpperCAmelCase ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _UpperCAmelCase ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] ,std=[0.12221994, 0.12145835, 0.14380469] ,),
] )
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
from __future__ import annotations
from cmath import sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
lowerCAmelCase : Union[str, Any] = b * b - 4 * a * c
lowerCAmelCase : Optional[int] = (-b + sqrt(SCREAMING_SNAKE_CASE__ )) / (2 * a)
lowerCAmelCase : Tuple = (-b - sqrt(SCREAMING_SNAKE_CASE__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : int = quadratic_roots(a=5 ,b=6 ,c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Any =logging.get_logger(__name__)
class _a ( snake_case_ ):
_UpperCamelCase: Any = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_ )
lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 384}
lowerCAmelCase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : Optional[Any] = do_resize
lowerCAmelCase : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase : List[Any] = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase : Any = resample
lowerCAmelCase : Any = do_rescale
lowerCAmelCase : Optional[Any] = rescale_factor
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
lowerCAmelCase : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase : Optional[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase : Any = int(shortest_edge / crop_pct )
lowerCAmelCase : Tuple = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : str = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Tuple:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
lowerCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Tuple = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase : Tuple = resample if resample is not None else self.resample
lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase : Dict = size if size is not None else self.size
lowerCAmelCase : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase : Any = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase : Optional[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase : List[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = []
lowerCAmelCase : Tuple = 1
while len(SCREAMING_SNAKE_CASE__ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE__ ) )
i += 1
lowerCAmelCase : Tuple = """""".join(SCREAMING_SNAKE_CASE__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] =250_004
lowerCAmelCase : Optional[Any] =250_020
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[int] = MBartaaTokenizer
_UpperCamelCase: Union[str, Any] = MBartaaTokenizerFast
_UpperCamelCase: int = True
_UpperCamelCase: Tuple = True
def _snake_case ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[str] = MBartaaTokenizer(lowercase_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> str:
lowerCAmelCase : Dict = """<s>"""
lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowercase_ ) , 1054 )
def _snake_case ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _snake_case ( self ) -> Any:
lowerCAmelCase : List[str] = MBartaaTokenizer(lowercase_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=lowercase_ )
lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _snake_case ( self ) -> Union[str, Any]:
# fmt: off
lowerCAmelCase : int = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def _snake_case ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowerCAmelCase : int = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(lowercase_ )
lowerCAmelCase : Optional[int] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : str = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase : str = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : str = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : Tuple = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase : str = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase : List[Any] = tempfile.mkdtemp()
lowerCAmelCase : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
lowerCAmelCase : int = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase : Tuple = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
_UpperCamelCase: Optional[Any] = "facebook/mbart-large-50-one-to-many-mmt"
_UpperCamelCase: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_UpperCamelCase: Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_UpperCamelCase: List[str] = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _snake_case ( cls ) -> Optional[int]:
lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCAmelCase : List[str] = 1
return cls
def _snake_case ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250038 )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def _snake_case ( self ) -> int:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
lowerCAmelCase : int = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCAmelCase : Optional[int] = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowercase_ )
lowerCAmelCase : List[str] = 10
lowerCAmelCase : Optional[int] = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[0] , lowercase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250053, 250001] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase : Any = MBartaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors="""pt""" )
lowerCAmelCase : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase : Optional[Any] = targets["""input_ids"""]
lowerCAmelCase : Union[str, Any] = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( snake_case_ ):
_UpperCamelCase: UNetaDModel
_UpperCamelCase: ScoreSdeVeScheduler
def __init__( self , lowercase_ , lowercase_ ) -> Dict:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = 2000 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , **lowercase_ , ) -> Union[ImagePipelineOutput, Tuple]:
lowerCAmelCase : List[str] = self.unet.config.sample_size
lowerCAmelCase : int = (batch_size, 3, img_size, img_size)
lowerCAmelCase : Optional[int] = self.unet
lowerCAmelCase : Optional[Any] = randn_tensor(lowercase_ , generator=lowercase_ ) * self.scheduler.init_noise_sigma
lowerCAmelCase : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
self.scheduler.set_sigmas(lowercase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase : Tuple = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : List[Any] = self.scheduler.step_correct(lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# prediction step
lowerCAmelCase : Optional[Any] = model(lowercase_ , lowercase_ ).sample
lowerCAmelCase : int = self.scheduler.step_pred(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
lowerCAmelCase : int = output.prev_sample, output.prev_sample_mean
lowerCAmelCase : Optional[int] = sample_mean.clamp(0 , 1 )
lowerCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : int = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase_ )
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Any = CycleDiffusionPipeline
_UpperCamelCase: List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
_UpperCamelCase: str = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
_UpperCamelCase: Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase: Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase : str = CLIPTextModel(lowercase_ )
lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> List[Any]:
lowerCAmelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : List[str] = image / 2 + 0.5
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Dict = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Tuple = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = CycleDiffusionPipeline(**lowercase_ )
lowerCAmelCase : List[str] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase : Any = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase : str = pipe(**lowercase_ )
lowerCAmelCase : List[str] = output.images
lowerCAmelCase : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase : List[str] = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , """half""" ):
lowerCAmelCase : int = module.half()
lowerCAmelCase : Dict = CycleDiffusionPipeline(**lowercase_ )
lowerCAmelCase : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase : str = pipe(**lowercase_ )
lowerCAmelCase : int = output.images
lowerCAmelCase : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase : Optional[int] = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _snake_case ( self ) -> Union[str, Any]:
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _snake_case ( self ) -> Tuple:
return super().test_inference_batch_single_identical()
@skip_mps
def _snake_case ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _snake_case ( self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def _snake_case ( self ) -> Union[str, Any]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
lowerCAmelCase : Any = init_image.resize((512, 512) )
lowerCAmelCase : int = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase : Optional[int] = DDIMScheduler.from_pretrained(lowercase_ , subfolder="""scheduler""" )
lowerCAmelCase : Dict = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase : str = """A black colored car"""
lowerCAmelCase : int = """A blue colored car"""
lowerCAmelCase : str = torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type="""np""" , )
lowerCAmelCase : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
lowerCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
lowerCAmelCase : Optional[Any] = init_image.resize((512, 512) )
lowerCAmelCase : Any = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase : List[str] = DDIMScheduler.from_pretrained(lowercase_ , subfolder="""scheduler""" )
lowerCAmelCase : Dict = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase : Tuple = """A black colored car"""
lowerCAmelCase : Tuple = """A blue colored car"""
lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type="""np""" , )
lowerCAmelCase : str = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _a :
_UpperCamelCase: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
_UpperCamelCase: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class _a :
_UpperCamelCase: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_UpperCamelCase: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
_UpperCamelCase: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
_UpperCamelCase: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
_UpperCamelCase: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
_UpperCamelCase: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
_UpperCamelCase: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
_UpperCamelCase: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
_UpperCamelCase: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F"""{split}_results.json""" ) )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase : Any = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase : str = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase : int = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : str = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase : List[str] = SeqaSeqDataset
# Get datasets
lowerCAmelCase : Optional[Any] = (
dataset_class(
SCREAMING_SNAKE_CASE__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowerCAmelCase : str = (
dataset_class(
SCREAMING_SNAKE_CASE__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase : int = (
dataset_class(
SCREAMING_SNAKE_CASE__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase : Any = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE__ ) if training_args.predict_with_generate else None
)
lowerCAmelCase : int = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , data_args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowerCAmelCase : Tuple = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase : Dict = train_result.metrics
lowerCAmelCase : Union[str, Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , SCREAMING_SNAKE_CASE__ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : str = trainer.evaluate(metric_key_prefix="""val""" )
lowerCAmelCase : Dict = data_args.n_val
lowerCAmelCase : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , SCREAMING_SNAKE_CASE__ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase : List[Any] = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE__ , metric_key_prefix="""test""" )
lowerCAmelCase : Any = test_output.metrics
lowerCAmelCase : str = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase : Tuple = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , SCREAMING_SNAKE_CASE__ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.predict_with_generate:
lowerCAmelCase : Dict = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = lmap(str.strip , SCREAMING_SNAKE_CASE__ )
write_txt_file(SCREAMING_SNAKE_CASE__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 706 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def is_in_circle(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> bool:
lowerCAmelCase : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE__ ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ,):
'''simple docstring'''
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) * (max_value - min_value)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ):
'''simple docstring'''
def identity_function(SCREAMING_SNAKE_CASE__ ) -> float:
return x
lowerCAmelCase : Union[str, Any] = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def function_to_integrate(SCREAMING_SNAKE_CASE__ ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase : int = area_under_curve_estimator(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0.0 ,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase : int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase : List[Any] =None
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : Tuple ='▁'
lowerCAmelCase : Optional[Any] ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : str ={
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
lowerCAmelCase : Tuple ={
'google/pegasus-xsum': 512,
}
class _a ( snake_case_ ):
_UpperCamelCase: int = VOCAB_FILES_NAMES
_UpperCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: Dict = PegasusTokenizer
_UpperCamelCase: Any = ["input_ids", "attention_mask"]
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<pad>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<mask_2>" , lowercase_="<mask_1>" , lowercase_=None , lowercase_=103 , **lowercase_ , ) -> Union[str, Any]:
lowerCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowercase_ )}, but is"""
f""" {type(lowercase_ )}""" )
lowerCAmelCase : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCAmelCase : str = additional_special_tokens_extended
else:
lowerCAmelCase : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowerCAmelCase : Tuple = vocab_file
lowerCAmelCase : Tuple = False if not self.vocab_file else True
def _snake_case ( self , lowercase_ ) -> List[str]:
lowerCAmelCase : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase_ , lowercase_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : Dict = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
lowerCAmelCase : Tuple ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Optional[int] =[{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : Dict ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,1_6 ) for i in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
@staticmethod
def _snake_case ( *lowercase_ , **lowercase_ ) -> Any:
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
@require_torch
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase : Tuple = image_classifier(lowercase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase_ ) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
lowerCAmelCase : Any = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
] , )
@require_tf
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
lowerCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase : Any = image_classifier(lowercase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
lowerCAmelCase : Optional[int] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
{"""score""": 0.3_3_3, """label""": ANY(lowercase_ )},
],
] , )
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase : Dict = image_classifier(lowercase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
lowerCAmelCase : Optional[int] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : int = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase : Tuple = image_classifier(lowercase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
lowerCAmelCase : Any = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Any ='docs/source/en/_toctree.yml'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = defaultdict(SCREAMING_SNAKE_CASE__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase : Optional[int] = [key for key, value in counts.items() if value > 1]
lowerCAmelCase : str = []
for duplicate_key in duplicates:
lowerCAmelCase : str = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__ : s["title"].lower() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ,encoding="""utf-8""" ) as f:
lowerCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase : List[str] = content[api_idx]["""sections"""]
# Then to the model doc
lowerCAmelCase : Optional[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase : Union[str, Any] = api_doc[model_idx]["""sections"""]
lowerCAmelCase : Optional[Any] = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__ ) if """sections""" in section]
lowerCAmelCase : Optional[Any] = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase : int = modality_doc["""sections"""]
lowerCAmelCase : Tuple = clean_model_doc_toc(SCREAMING_SNAKE_CASE__ )
if old_modality_doc != new_modality_doc:
lowerCAmelCase : int = True
if overwrite:
lowerCAmelCase : int = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase : Optional[Any] = model_doc
lowerCAmelCase : Any = api_doc
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ ,allow_unicode=SCREAMING_SNAKE_CASE__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase : List[str] =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple ={
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : int = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=SCREAMING_SNAKE_CASE__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=SCREAMING_SNAKE_CASE__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : str = parse_args()
# Import training_script as a module.
lowerCAmelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase : Any = script_fpath.stem
lowerCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
# Patch sys.argv
lowerCAmelCase : Optional[Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , ) -> int:
lowerCAmelCase : Any = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase : int = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Tuple = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : Dict = apply_ocr
def _snake_case ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = LayoutLMvaImageProcessingTester(self )
@property
def _snake_case ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> str:
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """apply_ocr""" ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> int:
# Initialize image_processing
lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , lowercase_ )
self.assertIsInstance(encoding.boxes , lowercase_ )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _snake_case ( self ) -> List[str]:
# with apply_OCR = True
lowerCAmelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase : str = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
lowerCAmelCase : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
lowerCAmelCase : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase_ )
self.assertListEqual(encoding.boxes , lowercase_ )
# with apply_OCR = False
lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase_ )
lowerCAmelCase : Dict = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Dict ={'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =[
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.