code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCamelCase ( *_a : Any , **_a : str ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Dict , _a : int , _a : Tuple , _a : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : Any , _a : Optional[int] , _a : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'''score''': ANY(_a ),
'''label''': ANY(_a ),
'''box''': {'''xmin''': ANY(_a ), '''ymin''': ANY(_a ), '''xmax''': ANY(_a ), '''ymax''': ANY(_a )},
} , )
import datasets
_SCREAMING_SNAKE_CASE =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_SCREAMING_SNAKE_CASE =[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
_SCREAMING_SNAKE_CASE =object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'''score''': ANY(_a ),
'''label''': ANY(_a ),
'''box''': {'''xmin''': ANY(_a ), '''ymin''': ANY(_a ), '''xmax''': ANY(_a ), '''ymax''': ANY(_a )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''hf-internal-testing/tiny-detr-mobilenetsv3'''
_SCREAMING_SNAKE_CASE =AutoModelForObjectDetection.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoFeatureExtractor.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_SCREAMING_SNAKE_CASE =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''facebook/detr-resnet-50'''
_SCREAMING_SNAKE_CASE =AutoModelForObjectDetection.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoFeatureExtractor.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_SCREAMING_SNAKE_CASE =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''facebook/detr-resnet-50'''
_SCREAMING_SNAKE_CASE =pipeline('''object-detection''' , model=_a )
_SCREAMING_SNAKE_CASE =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0.99_85
_SCREAMING_SNAKE_CASE ='''facebook/detr-resnet-50'''
_SCREAMING_SNAKE_CASE =pipeline('''object-detection''' , model=_a )
_SCREAMING_SNAKE_CASE =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''Narsil/layoutlmv3-finetuned-funsd'''
_SCREAMING_SNAKE_CASE =0.99_93
_SCREAMING_SNAKE_CASE =pipeline('''object-detection''' , model=_a , threshold=_a )
_SCREAMING_SNAKE_CASE =object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , ) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
from __future__ import annotations
import math
class A__ :
def __init__( self : Any , _a : int ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size
# approximate the overall size of segment tree with given value
_SCREAMING_SNAKE_CASE =[0 for i in range(0 , 4 * size )]
# create array to store lazy update
_SCREAMING_SNAKE_CASE =[0 for i in range(0 , 4 * size )]
_SCREAMING_SNAKE_CASE =[0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : str , _a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : List[Any] , _a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : Dict , _a : int , _a : int , _a : int , _a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
_SCREAMING_SNAKE_CASE =a[left_element - 1]
else:
_SCREAMING_SNAKE_CASE =(left_element + right_element) // 2
self.build(self.left(_a ) , _a , _a , _a )
self.build(self.right(_a ) , mid + 1 , _a , _a )
_SCREAMING_SNAKE_CASE =max(
self.segment_tree[self.left(_a )] , self.segment_tree[self.right(_a )] )
def __UpperCamelCase ( self : int , _a : int , _a : int , _a : int , _a : int , _a : int , _a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
_SCREAMING_SNAKE_CASE =self.lazy[idx]
_SCREAMING_SNAKE_CASE =False
if left_element != right_element:
_SCREAMING_SNAKE_CASE =self.lazy[idx]
_SCREAMING_SNAKE_CASE =self.lazy[idx]
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_SCREAMING_SNAKE_CASE =val
if left_element != right_element:
_SCREAMING_SNAKE_CASE =val
_SCREAMING_SNAKE_CASE =val
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
return True
_SCREAMING_SNAKE_CASE =(left_element + right_element) // 2
self.update(self.left(_a ) , _a , _a , _a , _a , _a )
self.update(self.right(_a ) , mid + 1 , _a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =max(
self.segment_tree[self.left(_a )] , self.segment_tree[self.right(_a )] )
return True
def __UpperCamelCase ( self : Tuple , _a : int , _a : int , _a : int , _a : int , _a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
_SCREAMING_SNAKE_CASE =self.lazy[idx]
_SCREAMING_SNAKE_CASE =False
if left_element != right_element:
_SCREAMING_SNAKE_CASE =self.lazy[idx]
_SCREAMING_SNAKE_CASE =self.lazy[idx]
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_SCREAMING_SNAKE_CASE =(left_element + right_element) // 2
_SCREAMING_SNAKE_CASE =self.query(self.left(_a ) , _a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =self.query(self.right(_a ) , mid + 1 , _a , _a , _a )
return max(_a , _a )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _a , _a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
snake_case_ : str = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
snake_case_ : str = 15
snake_case_ : Dict = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case_ : Dict = '''Create a default config file for Accelerate with only a few flags set.'''
def lowerCamelCase( a__="no" ,a__ = default_json_config_file ,a__ = False):
_SCREAMING_SNAKE_CASE =Path(a__)
path.parent.mkdir(parents=a__ ,exist_ok=a__)
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.")
return False
_SCREAMING_SNAKE_CASE =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}")
_SCREAMING_SNAKE_CASE ={
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
_SCREAMING_SNAKE_CASE =torch.cuda.device_count()
_SCREAMING_SNAKE_CASE =num_gpus
_SCREAMING_SNAKE_CASE =False
if num_gpus > 1:
_SCREAMING_SNAKE_CASE ='''MULTI_GPU'''
else:
_SCREAMING_SNAKE_CASE ='''NO'''
elif is_xpu_available() and use_xpu:
_SCREAMING_SNAKE_CASE =torch.xpu.device_count()
_SCREAMING_SNAKE_CASE =num_xpus
_SCREAMING_SNAKE_CASE =False
if num_xpus > 1:
_SCREAMING_SNAKE_CASE ='''MULTI_XPU'''
else:
_SCREAMING_SNAKE_CASE ='''NO'''
elif is_npu_available():
_SCREAMING_SNAKE_CASE =torch.npu.device_count()
_SCREAMING_SNAKE_CASE =num_npus
_SCREAMING_SNAKE_CASE =False
if num_npus > 1:
_SCREAMING_SNAKE_CASE ='''MULTI_NPU'''
else:
_SCREAMING_SNAKE_CASE ='''NO'''
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE ='''NO'''
_SCREAMING_SNAKE_CASE =ClusterConfig(**a__)
config.to_json_file(a__)
return path
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =parser.add_parser('''default''' ,parents=a__ ,help=a__ ,formatter_class=a__)
parser.add_argument(
'''--config_file''' ,default=a__ ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,dest='''save_location''' ,)
parser.add_argument(
'''--mixed_precision''' ,choices=['''no''', '''fp16''', '''bf16'''] ,type=a__ ,help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' ,default='''no''' ,)
parser.set_defaults(func=a__)
return parser
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =write_basic_config(args.mixed_precision ,args.save_location)
if config_file:
print(f"accelerate configuration saved at {config_file}") | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "ViTImageProcessor"
UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Dict , _a : Optional[Any]=None , _a : List[str]=None , **_a : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_SCREAMING_SNAKE_CASE =kwargs.pop('''feature_extractor''' )
_SCREAMING_SNAKE_CASE =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self : Tuple , _a : Optional[int]=None , _a : int=None , _a : str=None , _a : Optional[Any]=None , **_a : str ) -> Optional[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , return_tensors=_a , **_a )
if visual_prompt is not None:
_SCREAMING_SNAKE_CASE =self.image_processor(_a , return_tensors=_a , **_a )
if images is not None:
_SCREAMING_SNAKE_CASE =self.image_processor(_a , return_tensors=_a , **_a )
if visual_prompt is not None and images is not None:
_SCREAMING_SNAKE_CASE ={
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_SCREAMING_SNAKE_CASE =image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_SCREAMING_SNAKE_CASE ={
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __UpperCamelCase ( self : Dict , *_a : int , **_a : Dict ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCamelCase ( self : Tuple , *_a : List[str] , **_a : Dict ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =10
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[1, 2, 3, 4]
_SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_SCREAMING_SNAKE_CASE =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_a )
self.assertEqual(_a , [] )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_a )
self.assertEqual(_a , [] )
self.assertEqual(_a , [] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_a )
_SCREAMING_SNAKE_CASE =[
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_a , _a )
_SCREAMING_SNAKE_CASE =['''It was the best of times.''']
self.assertEqual(_a , _a )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.tensor([1, 2, 3, 4] )
_SCREAMING_SNAKE_CASE =torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_a , 0 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_SCREAMING_SNAKE_CASE =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_a , 23 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_SCREAMING_SNAKE_CASE =torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_a , 1 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =101
_SCREAMING_SNAKE_CASE =torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_SCREAMING_SNAKE_CASE =torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_SCREAMING_SNAKE_CASE =compute_token_type_ids(_a , _a )
np.testing.assert_array_equal(_a , _a ) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
snake_case_ : List[str] = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ : Dict = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
snake_case_ : Any = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __UpperCamelCase ( self : Optional[Any] , _a : Any , _a : Any , _a : int = CHRF.CHAR_ORDER , _a : int = CHRF.WORD_ORDER , _a : int = CHRF.BETA , _a : bool = False , _a : bool = False , _a : bool = False , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_SCREAMING_SNAKE_CASE =[[refs[i] for refs in references] for i in range(_a )]
_SCREAMING_SNAKE_CASE =CHRF(_a , _a , _a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =sb_chrf.corpus_score(_a , _a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[int(a__) for i in ip_va_address.split('''.''') if i.isdigit()]
return len(a__) == 4 and all(0 <= int(a__) <= 254 for octet in octets)
if __name__ == "__main__":
snake_case_ : List[Any] = input().strip()
snake_case_ : Optional[Any] = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""") | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A__ :
UpperCAmelCase = PegasusConfig
UpperCAmelCase = {}
UpperCAmelCase = "gelu"
def __init__( self : List[str] , _a : Optional[Any] , _a : Tuple=13 , _a : Tuple=7 , _a : List[Any]=True , _a : List[Any]=False , _a : List[str]=99 , _a : Any=32 , _a : Any=2 , _a : Union[str, Any]=4 , _a : List[str]=37 , _a : int=0.1 , _a : Union[str, Any]=0.1 , _a : Any=40 , _a : Dict=2 , _a : int=1 , _a : List[str]=0 , ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =pad_token_id
_SCREAMING_SNAKE_CASE =bos_token_id
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE =tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE =prepare_pegasus_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __UpperCamelCase ( self : Optional[Any] , _a : int , _a : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFPegasusModel(config=_a ).get_decoder()
_SCREAMING_SNAKE_CASE =inputs_dict['''input_ids''']
_SCREAMING_SNAKE_CASE =input_ids[:1, :]
_SCREAMING_SNAKE_CASE =inputs_dict['''attention_mask'''][:1, :]
_SCREAMING_SNAKE_CASE =inputs_dict['''head_mask''']
_SCREAMING_SNAKE_CASE =1
# first forward pass
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE =tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )[0]
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE =output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def lowerCamelCase( a__ ,a__ ,a__ ,a__=None ,a__=None ,a__=None ,a__=None ,a__=None ,):
if attention_mask is None:
_SCREAMING_SNAKE_CASE =tf.cast(tf.math.not_equal(a__ ,config.pad_token_id) ,tf.inta)
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id) ,tf.inta),
] ,axis=-1 ,)
if head_mask is None:
_SCREAMING_SNAKE_CASE =tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE =tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE =tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFPegasusModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
UpperCAmelCase = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase = "google/pegasus-xsum"
@cached_property
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCamelCase ( self : str , **_a : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.translate_src_text(**_a )
assert self.expected_text == generated_words
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer(self.src_text , **_a , padding=_a , return_tensors='''tf''' )
_SCREAMING_SNAKE_CASE =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_a , )
_SCREAMING_SNAKE_CASE =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )
return generated_words
@slow
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
self._assert_generated_batch_equal_expected() | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = (DDPMParallelScheduler,)
def __UpperCamelCase ( self : Dict , **_a : List[str] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter + 0.1
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter - 0.1
_SCREAMING_SNAKE_CASE =samplea.shape[0]
_SCREAMING_SNAKE_CASE =torch.stack([samplea, samplea, samplea] , dim=0 )
_SCREAMING_SNAKE_CASE =torch.arange(_a )[0:3, None].repeat(1 , _a )
_SCREAMING_SNAKE_CASE =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_SCREAMING_SNAKE_CASE =scheduler.batch_step_no_noise(_a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_a ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_SCREAMING_SNAKE_CASE =model(_a , _a )
# 2. predict previous mean of sample x_t-1
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , generator=_a ).prev_sample
_SCREAMING_SNAKE_CASE =pred_prev_sample
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_a ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(prediction_type='''v_prediction''' )
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
_SCREAMING_SNAKE_CASE =model(_a , _a )
# 2. predict previous mean of sample x_t-1
_SCREAMING_SNAKE_CASE =scheduler.step(_a , _a , _a , generator=_a ).prev_sample
_SCREAMING_SNAKE_CASE =pred_prev_sample
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_a ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
_SCREAMING_SNAKE_CASE =scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
_SCREAMING_SNAKE_CASE =-1
else:
_SCREAMING_SNAKE_CASE =timesteps[i + 1]
_SCREAMING_SNAKE_CASE =scheduler.previous_timestep(_a )
_SCREAMING_SNAKE_CASE =prev_t.item()
self.assertEqual(_a , _a )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =[100, 87, 50, 51, 0]
with self.assertRaises(_a , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =[100, 87, 50, 1, 0]
_SCREAMING_SNAKE_CASE =len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_a )
_SCREAMING_SNAKE_CASE =[scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a ) | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
# using dfs for finding eulerian path traversal
def lowerCamelCase( a__ ,a__ ,a__ ,a__=None):
_SCREAMING_SNAKE_CASE =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =True, True
_SCREAMING_SNAKE_CASE =dfs(a__ ,a__ ,a__ ,a__)
return path
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =-1
for i in range(a__):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
_SCREAMING_SNAKE_CASE =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =check_circuit_or_path(a__ ,a__)
if check == 3:
print('''graph is not Eulerian''')
print('''no path''')
return
_SCREAMING_SNAKE_CASE =1
if check == 2:
_SCREAMING_SNAKE_CASE =odd_node
print('''graph has a Euler path''')
if check == 1:
print('''graph has a Euler cycle''')
_SCREAMING_SNAKE_CASE =dfs(a__ ,a__ ,a__)
print(a__)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_SCREAMING_SNAKE_CASE ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_SCREAMING_SNAKE_CASE ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_SCREAMING_SNAKE_CASE ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
_SCREAMING_SNAKE_CASE ={
1: [],
2: []
# all degree is zero
}
_SCREAMING_SNAKE_CASE =10
check_euler(a__ ,a__)
check_euler(a__ ,a__)
check_euler(a__ ,a__)
check_euler(a__ ,a__)
check_euler(a__ ,a__)
if __name__ == "__main__":
main() | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Any = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
snake_case_ : Optional[Any] = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =EfficientNetConfig()
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''hidden_dim''']
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''width_coef''']
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''depth_coef''']
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''image_size''']
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''dropout_rate''']
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''dw_padding''']
_SCREAMING_SNAKE_CASE ='''huggingface/label-files'''
_SCREAMING_SNAKE_CASE ='''imagenet-1k-id2label.json'''
_SCREAMING_SNAKE_CASE =1000
_SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(a__ ,a__ ,repo_type='''dataset''') ,'''r'''))
_SCREAMING_SNAKE_CASE ={int(a__): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =idalabel
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
return config
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw)
return im
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''image_size''']
_SCREAMING_SNAKE_CASE =EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] ,do_center_crop=a__ ,)
return preprocessor
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[v.split('''_''')[0].split('''block''')[1] for v in original_param_names if v.startswith('''block''')]
_SCREAMING_SNAKE_CASE =sorted(set(a__))
_SCREAMING_SNAKE_CASE =len(a__)
_SCREAMING_SNAKE_CASE ={b: str(a__) for b, i in zip(a__ ,range(a__))}
_SCREAMING_SNAKE_CASE =[]
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight'''))
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight'''))
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias'''))
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean'''))
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var'''))
for b in block_names:
_SCREAMING_SNAKE_CASE =block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight"))
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight"))
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias"))
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean"))
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var"))
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight"))
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight"))
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias"))
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean"))
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var"))
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight"))
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias"))
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight"))
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias"))
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight"))
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight"))
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias"))
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean"))
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var"))
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight'''))
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight'''))
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias'''))
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean'''))
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var'''))
_SCREAMING_SNAKE_CASE ={}
for item in rename_keys:
if item[0] in original_param_names:
_SCREAMING_SNAKE_CASE ='''efficientnet.''' + item[1]
_SCREAMING_SNAKE_CASE ='''classifier.weight'''
_SCREAMING_SNAKE_CASE ='''classifier.bias'''
return key_mapping
def lowerCamelCase( a__ ,a__ ,a__):
for key, value in tf_params.items():
if "normalization" in key:
continue
_SCREAMING_SNAKE_CASE =key_mapping[key]
if "_conv" in key and "kernel" in key:
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__).permute(3 ,2 ,0 ,1)
elif "depthwise_kernel" in key:
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__).permute(2 ,3 ,0 ,1)
elif "kernel" in key:
_SCREAMING_SNAKE_CASE =torch.from_numpy(np.transpose(a__))
else:
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__)
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(a__)
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =model_classes[model_name](
include_top=a__ ,weights='''imagenet''' ,input_tensor=a__ ,input_shape=a__ ,pooling=a__ ,classes=1000 ,classifier_activation='''softmax''' ,)
_SCREAMING_SNAKE_CASE =original_model.trainable_variables
_SCREAMING_SNAKE_CASE =original_model.non_trainable_variables
_SCREAMING_SNAKE_CASE ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_SCREAMING_SNAKE_CASE =param.numpy()
_SCREAMING_SNAKE_CASE =list(tf_params.keys())
# Load HuggingFace model
_SCREAMING_SNAKE_CASE =get_efficientnet_config(a__)
_SCREAMING_SNAKE_CASE =EfficientNetForImageClassification(a__).eval()
_SCREAMING_SNAKE_CASE =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''')
_SCREAMING_SNAKE_CASE =rename_keys(a__)
replace_params(a__ ,a__ ,a__)
# Initialize preprocessor and preprocess input image
_SCREAMING_SNAKE_CASE =convert_image_processor(a__)
_SCREAMING_SNAKE_CASE =preprocessor(images=prepare_img() ,return_tensors='''pt''')
# HF model inference
hf_model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =hf_model(**a__)
_SCREAMING_SNAKE_CASE =outputs.logits.detach().numpy()
# Original model inference
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =CONFIG_MAP[model_name]['''image_size''']
_SCREAMING_SNAKE_CASE =prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST)
_SCREAMING_SNAKE_CASE =image.img_to_array(a__)
_SCREAMING_SNAKE_CASE =np.expand_dims(a__ ,axis=0)
_SCREAMING_SNAKE_CASE =original_model.predict(a__)
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(a__ ,a__ ,atol=1e-3), "The predicted logits are not the same."
print('''Model outputs match!''')
if save_model:
# Create folder to save model
if not os.path.isdir(a__):
os.mkdir(a__)
# Save converted model and image processor
hf_model.save_pretrained(a__)
preprocessor.save_pretrained(a__)
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub...")
_SCREAMING_SNAKE_CASE =f"efficientnet-{model_name}"
preprocessor.push_to_hub(a__)
hf_model.push_to_hub(a__)
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
snake_case_ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase( a__):
return (data["data"], data["target"])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =XGBClassifier()
classifier.fit(a__ ,a__)
return classifier
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =load_iris()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =data_handling(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =train_test_split(
a__ ,a__ ,test_size=0.25)
_SCREAMING_SNAKE_CASE =iris['''target_names''']
# Create an XGBoost Classifier from the training data
_SCREAMING_SNAKE_CASE =xgboost(a__ ,a__)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
a__ ,a__ ,a__ ,display_labels=a__ ,cmap='''Blues''' ,normalize='''true''' ,)
plt.title('''Normalized Confusion Matrix - IRIS Dataset''')
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A__ ( UpperCamelCase__ ):
def __lt__( self : int , _a : Union[str, Any] ) -> Any:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Tuple , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
# sort into stacks
for element in collection:
_SCREAMING_SNAKE_CASE =Stack([element])
_SCREAMING_SNAKE_CASE =bisect_left(a__ ,a__)
if i != len(a__):
stacks[i].append(a__)
else:
stacks.append(a__)
# use a heap-based merge to merge stack efficiently
_SCREAMING_SNAKE_CASE =merge(*(reversed(a__) for stack in stacks))
return collection
if __name__ == "__main__":
snake_case_ : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ : Any = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted)) | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
from datetime import datetime as dt
import os
from github import Github
snake_case_ : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Github(os.environ['''GITHUB_TOKEN'''])
_SCREAMING_SNAKE_CASE =g.get_repo('''huggingface/transformers''')
_SCREAMING_SNAKE_CASE =repo.get_issues(state='''open''')
for issue in open_issues:
_SCREAMING_SNAKE_CASE =sorted([comment for comment in issue.get_comments()] ,key=lambda a__: i.created_at ,reverse=a__)
_SCREAMING_SNAKE_CASE =comments[0] if len(a__) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''')
if __name__ == "__main__":
main() | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
snake_case_ : Union[str, Any] = '''Tobias Carryer'''
from time import time
class A__ :
def __init__( self : List[Any] , _a : Any , _a : Union[str, Any] , _a : List[Any] , _a : Dict=int(time() ) ) -> str: # noqa: B008
"""simple docstring"""
_SCREAMING_SNAKE_CASE =multiplier
_SCREAMING_SNAKE_CASE =increment
_SCREAMING_SNAKE_CASE =modulo
_SCREAMING_SNAKE_CASE =seed
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case_ : Union[str, Any] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number()) | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
def lowerCamelCase( a__ ,a__ ,a__):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a__))
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
# Base Case
if index == len(a__):
return True
# Recursive Step
for i in range(a__):
if valid_coloring(graph[index] ,a__ ,a__):
# Color current vertex
_SCREAMING_SNAKE_CASE =i
# Validate coloring
if util_color(a__ ,a__ ,a__ ,index + 1):
return True
# Backtrack
_SCREAMING_SNAKE_CASE =-1
return False
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[-1] * len(a__)
if util_color(a__ ,a__ ,a__ ,0):
return colored_vertices
return [] | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__)):
if lista[i] != lista[i]:
count += 1
_SCREAMING_SNAKE_CASE ='''_'''
if count > 1:
return False
else:
return "".join(a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
while True:
_SCREAMING_SNAKE_CASE =['''$'''] * len(a__)
_SCREAMING_SNAKE_CASE =[]
for i in range(len(a__)):
for j in range(i + 1 ,len(a__)):
_SCREAMING_SNAKE_CASE =compare_string(binary[i] ,binary[j])
if k is False:
_SCREAMING_SNAKE_CASE ='''*'''
_SCREAMING_SNAKE_CASE ='''*'''
temp.append('''X''')
for i in range(len(a__)):
if checka[i] == "$":
pi.append(binary[i])
if len(a__) == 0:
return pi
_SCREAMING_SNAKE_CASE =list(set(a__))
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
for minterm in minterms:
_SCREAMING_SNAKE_CASE =''''''
for _ in range(a__):
_SCREAMING_SNAKE_CASE =str(minterm % 2) + string
minterm //= 2
temp.append(a__)
return temp
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =list(a__)
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[0] * len(a__)
for i in range(len(chart[0])):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =-1
for j in range(len(a__)):
if chart[j][i] == 1:
count += 1
_SCREAMING_SNAKE_CASE =j
if count == 1:
_SCREAMING_SNAKE_CASE =1
for i in range(len(a__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(a__)):
_SCREAMING_SNAKE_CASE =0
temp.append(prime_implicants[i])
while True:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =0
for i in range(len(a__)):
_SCREAMING_SNAKE_CASE =chart[i].count(1)
if count_n > max_n:
_SCREAMING_SNAKE_CASE =count_n
_SCREAMING_SNAKE_CASE =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(a__)):
_SCREAMING_SNAKE_CASE =0
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[[0 for x in range(len(a__))] for x in range(len(a__))]
for i in range(len(a__)):
_SCREAMING_SNAKE_CASE =prime_implicants[i].count('''_''')
for j in range(len(a__)):
if is_for_table(prime_implicants[i] ,binary[j] ,a__):
_SCREAMING_SNAKE_CASE =1
return chart
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =int(input('''Enter the no. of variables\n'''))
_SCREAMING_SNAKE_CASE =[
float(a__)
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split()
]
_SCREAMING_SNAKE_CASE =decimal_to_binary(a__ ,a__)
_SCREAMING_SNAKE_CASE =check(a__)
print('''Prime Implicants are:''')
print(a__)
_SCREAMING_SNAKE_CASE =prime_implicant_chart(a__ ,a__)
_SCREAMING_SNAKE_CASE =selection(a__ ,a__)
print('''Essential Prime Implicants are:''')
print(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
def __init__( self : Optional[Any] , _a : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(id_ )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE ={} # {vertex:distance}
def __lt__( self : Optional[int] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Tuple ) -> Any:
"""simple docstring"""
return self.id
def __UpperCamelCase ( self : List[str] , _a : int ) -> Union[str, Any]:
"""simple docstring"""
self.neighbors.append(_a )
def __UpperCamelCase ( self : str , _a : List[str] , _a : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =weight
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a__)
graph[b - 1].add_edge(graph[a - 1] ,a__)
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
for u in graph:
_SCREAMING_SNAKE_CASE =math.inf
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =graph[:]
while q:
_SCREAMING_SNAKE_CASE =min(a__)
q.remove(a__)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_SCREAMING_SNAKE_CASE =u
_SCREAMING_SNAKE_CASE =u.edges[v.id]
for i in range(1 ,len(a__)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase( a__ ,a__):
for u in graph:
_SCREAMING_SNAKE_CASE =math.inf
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =list(a__)
hq.heapify(a__)
while h:
_SCREAMING_SNAKE_CASE =hq.heappop(a__)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_SCREAMING_SNAKE_CASE =u
_SCREAMING_SNAKE_CASE =u.edges[v.id]
hq.heapify(a__)
for i in range(1 ,len(a__)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : int = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["vqvae"]
def __init__( self : str , _a : AutoencoderKL , _a : UNetaDConditionModel , _a : Mel , _a : Union[DDIMScheduler, DDPMScheduler] , ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_a , scheduler=_a , mel=_a , vqvae=_a )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _a ) else 1000
@torch.no_grad()
def __call__( self : Any , _a : int = 1 , _a : str = None , _a : np.ndarray = None , _a : int = 0 , _a : int = 0 , _a : int = None , _a : torch.Generator = None , _a : float = 0 , _a : float = 0 , _a : torch.Generator = None , _a : float = 0 , _a : torch.Tensor = None , _a : torch.Tensor = None , _a : Any=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
_SCREAMING_SNAKE_CASE =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_a , device=self.device , )
_SCREAMING_SNAKE_CASE =noise
_SCREAMING_SNAKE_CASE =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a , _a )
_SCREAMING_SNAKE_CASE =self.mel.audio_slice_to_image(_a )
_SCREAMING_SNAKE_CASE =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE =(input_image / 255) * 2 - 1
_SCREAMING_SNAKE_CASE =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE =self.vqvae.encode(torch.unsqueeze(_a , 0 ) ).latent_dist.sample(
generator=_a )[0]
_SCREAMING_SNAKE_CASE =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE =self.scheduler.add_noise(_a , _a , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE =int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE =int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE =self.scheduler.add_noise(_a , _a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _a ):
_SCREAMING_SNAKE_CASE =self.unet(_a , _a , _a )['''sample''']
else:
_SCREAMING_SNAKE_CASE =self.unet(_a , _a )['''sample''']
if isinstance(self.scheduler , _a ):
_SCREAMING_SNAKE_CASE =self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , eta=_a , generator=_a , )['''prev_sample''']
else:
_SCREAMING_SNAKE_CASE =self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , generator=_a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE =mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE =1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE =self.vqvae.decode(_a )['''sample''']
_SCREAMING_SNAKE_CASE =(images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE =(images * 255).round().astype('''uint8''' )
_SCREAMING_SNAKE_CASE =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_SCREAMING_SNAKE_CASE =[self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) , **ImagePipelineOutput(_a ) )
@torch.no_grad()
def __UpperCamelCase ( self : List[str] , _a : List[Image.Image] , _a : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _a )
self.scheduler.set_timesteps(_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE =(sample / 255) * 2 - 1
_SCREAMING_SNAKE_CASE =torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE =self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE =1 - alpha_prod_t
_SCREAMING_SNAKE_CASE =self.unet(_a , _a )['''sample''']
_SCREAMING_SNAKE_CASE =(1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __UpperCamelCase ( _a : torch.Tensor , _a : torch.Tensor , _a : float ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =acos(torch.dot(torch.flatten(_a ) , torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a ) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase( a__ ,a__ ,a__ ,a__="attention"):
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"]
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"]
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"]
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def lowerCamelCase( a__ ,a__ ,a__ ,a__=False):
if split_mlp_wi:
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"]
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"]
_SCREAMING_SNAKE_CASE =(wi_a, wi_a)
else:
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/mlp/wi/kernel"]
_SCREAMING_SNAKE_CASE =params[f"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
return params[f"{prefix}/layers_{i}/{layer_name}/scale"]
def lowerCamelCase( a__ ,*, a__ ,a__):
_SCREAMING_SNAKE_CASE =traverse_util.flatten_dict(variables['''target'''])
_SCREAMING_SNAKE_CASE ={'''/'''.join(a__): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_SCREAMING_SNAKE_CASE ='''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,a__)
_SCREAMING_SNAKE_CASE =collections.OrderedDict()
# Shared embeddings.
_SCREAMING_SNAKE_CASE =old['''token_embedder/embedding''']
# Encoder.
for i in range(a__):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE =tax_layer_norm_lookup(a__ ,a__ ,'''encoder''' ,'''pre_attention_layer_norm''')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =tax_attention_lookup(a__ ,a__ ,'''encoder''' ,'''attention''')
_SCREAMING_SNAKE_CASE =layer_norm
_SCREAMING_SNAKE_CASE =k.T
_SCREAMING_SNAKE_CASE =o.T
_SCREAMING_SNAKE_CASE =q.T
_SCREAMING_SNAKE_CASE =v.T
# Block i, layer 1 (MLP).
_SCREAMING_SNAKE_CASE =tax_layer_norm_lookup(a__ ,a__ ,'''encoder''' ,'''pre_mlp_layer_norm''')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =tax_mlp_lookup(a__ ,a__ ,'''encoder''' ,a__)
_SCREAMING_SNAKE_CASE =layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE =wi[0].T
_SCREAMING_SNAKE_CASE =wi[1].T
else:
_SCREAMING_SNAKE_CASE =wi.T
_SCREAMING_SNAKE_CASE =wo.T
_SCREAMING_SNAKE_CASE =old[
'''encoder/relpos_bias/rel_embedding'''
].T
_SCREAMING_SNAKE_CASE =old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(a__):
# Block i, layer 0 (Self Attention).
_SCREAMING_SNAKE_CASE =tax_layer_norm_lookup(a__ ,a__ ,'''decoder''' ,'''pre_self_attention_layer_norm''')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =tax_attention_lookup(a__ ,a__ ,'''decoder''' ,'''self_attention''')
_SCREAMING_SNAKE_CASE =layer_norm
_SCREAMING_SNAKE_CASE =k.T
_SCREAMING_SNAKE_CASE =o.T
_SCREAMING_SNAKE_CASE =q.T
_SCREAMING_SNAKE_CASE =v.T
# Block i, layer 1 (Cross Attention).
_SCREAMING_SNAKE_CASE =tax_layer_norm_lookup(a__ ,a__ ,'''decoder''' ,'''pre_cross_attention_layer_norm''')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =tax_attention_lookup(a__ ,a__ ,'''decoder''' ,'''encoder_decoder_attention''')
_SCREAMING_SNAKE_CASE =layer_norm
_SCREAMING_SNAKE_CASE =k.T
_SCREAMING_SNAKE_CASE =o.T
_SCREAMING_SNAKE_CASE =q.T
_SCREAMING_SNAKE_CASE =v.T
# Block i, layer 2 (MLP).
_SCREAMING_SNAKE_CASE =tax_layer_norm_lookup(a__ ,a__ ,'''decoder''' ,'''pre_mlp_layer_norm''')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =tax_mlp_lookup(a__ ,a__ ,'''decoder''' ,a__)
_SCREAMING_SNAKE_CASE =layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE =wi[0].T
_SCREAMING_SNAKE_CASE =wi[1].T
else:
_SCREAMING_SNAKE_CASE =wi.T
_SCREAMING_SNAKE_CASE =wo.T
_SCREAMING_SNAKE_CASE =old['''decoder/decoder_norm/scale''']
_SCREAMING_SNAKE_CASE =old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_SCREAMING_SNAKE_CASE =old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE =state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_SCREAMING_SNAKE_CASE =state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
_SCREAMING_SNAKE_CASE =state_dict['''shared.weight''']
return state_dict
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =checkpoints.load_tax_checkpoint(a__)
_SCREAMING_SNAKE_CASE =convert_tax_to_pytorch(a__ ,num_layers=config.num_layers ,is_encoder_only=a__)
_SCREAMING_SNAKE_CASE =make_state_dict(a__ ,a__)
model.load_state_dict(a__ ,strict=a__)
def lowerCamelCase( a__ ,a__ ,a__ ,a__ = False):
_SCREAMING_SNAKE_CASE =TaConfig.from_json_file(a__)
print(f"Building PyTorch model from configuration: {config}")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_SCREAMING_SNAKE_CASE =TaEncoderModel(a__)
else:
_SCREAMING_SNAKE_CASE =TaForConditionalGeneration(a__)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a__ ,a__ ,a__ ,a__)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
model.save_pretrained(a__)
# Verify that we can load the checkpoint.
model.from_pretrained(a__)
print('''Done''')
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
def lowerCamelCase( a__ ,a__):
return base * power(a__ ,(exponent - 1)) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case_ : Dict = int(input('''Enter the base: ''').strip())
snake_case_ : int = int(input('''Enter the exponent: ''').strip())
snake_case_ : Any = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case_ : Dict = 1 / result
print(f"""{base} to the power of {exponent} is {result}""") | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = AltDiffusionPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_SCREAMING_SNAKE_CASE =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_SCREAMING_SNAKE_CASE =CLIPTextModel(_a )
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_SCREAMING_SNAKE_CASE =77
_SCREAMING_SNAKE_CASE ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : Tuple , _a : List[Any] , _a : Tuple=0 ) -> str:
"""simple docstring"""
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_SCREAMING_SNAKE_CASE =RobertaSeriesModelWithTransformation(_a )
_SCREAMING_SNAKE_CASE =text_encoder
_SCREAMING_SNAKE_CASE =AltDiffusionPipeline(**_a )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE ='''A photo of an astronaut'''
_SCREAMING_SNAKE_CASE =alt_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_SCREAMING_SNAKE_CASE =RobertaSeriesModelWithTransformation(_a )
_SCREAMING_SNAKE_CASE =text_encoder
_SCREAMING_SNAKE_CASE =AltDiffusionPipeline(**_a )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =alt_pipe(**_a )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_a )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
_SCREAMING_SNAKE_CASE =AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_a , safety_checker=_a )
_SCREAMING_SNAKE_CASE =alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =alt_pipe([prompt] , generator=_a , num_inference_steps=2 , output_type='''numpy''' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import math
def lowerCamelCase( a__):
if not isinstance(a__ ,a__):
_SCREAMING_SNAKE_CASE =f"Input value of [number={number}] must be an integer"
raise TypeError(a__)
if number < 1:
_SCREAMING_SNAKE_CASE =f"Input value of [number={number}] must be > 0"
raise ValueError(a__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
_SCREAMING_SNAKE_CASE =int(math.log(number // 3 ,2)) + 2
_SCREAMING_SNAKE_CASE =[3, 5]
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =3
for block in range(1 ,a__):
for _ in range(a__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
snake_case_ : Optional[int] = 0
try:
snake_case_ : int = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""") | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
from __future__ import annotations
snake_case_ : int = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
snake_case_ : Dict = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =len(a__)
for i in range(a__):
_SCREAMING_SNAKE_CASE =-1
for j in range(i + 1 ,a__):
if arr[i] < arr[j]:
_SCREAMING_SNAKE_CASE =arr[j]
break
result.append(a__)
return result
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
for i, outer in enumerate(a__):
_SCREAMING_SNAKE_CASE =-1
for inner in arr[i + 1 :]:
if outer < inner:
_SCREAMING_SNAKE_CASE =inner
break
result.append(a__)
return result
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =len(a__)
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[-1] * arr_size
for index in reversed(range(a__)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_SCREAMING_SNAKE_CASE =stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
snake_case_ : Any = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
snake_case_ : List[str] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __init__( self : Dict , _a : Tuple=-1 ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =label_idx
def __UpperCamelCase ( self : Optional[Any] , _a : Any , _a : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =mode.value
_SCREAMING_SNAKE_CASE =os.path.join(_a , f"{mode}.txt" )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =[]
with open(_a , encoding='''utf-8''' ) as f:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_a , labels=_a ) )
guid_index += 1
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
else:
_SCREAMING_SNAKE_CASE =line.split(''' ''' )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_a , labels=_a ) )
return examples
def __UpperCamelCase ( self : Dict , _a : TextIO , _a : TextIO , _a : List ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_SCREAMING_SNAKE_CASE =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(_a )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __UpperCamelCase ( self : int , _a : str ) -> List[str]:
"""simple docstring"""
if path:
with open(_a , '''r''' ) as f:
_SCREAMING_SNAKE_CASE =f.read().splitlines()
if "O" not in labels:
_SCREAMING_SNAKE_CASE =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A__ ( UpperCamelCase__ ):
def __init__( self : str ) -> Optional[int]:
"""simple docstring"""
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
if path:
with open(_a , '''r''' ) as f:
_SCREAMING_SNAKE_CASE =f.read().splitlines()
if "O" not in labels:
_SCREAMING_SNAKE_CASE =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[Any] , _a : Any , _a : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =mode.value
_SCREAMING_SNAKE_CASE =os.path.join(_a , f"{mode}.txt" )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =[]
with open(_a , encoding='''utf-8''' ) as f:
for sentence in parse_incr(_a ):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_a , labels=_a ) )
guid_index += 1
return examples
def __UpperCamelCase ( self : List[str] , _a : TextIO , _a : TextIO , _a : List ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for sentence in parse_incr(_a ):
_SCREAMING_SNAKE_CASE =preds_list[example_id]
_SCREAMING_SNAKE_CASE =''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(_a )
example_id += 1
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
if path:
with open(_a , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =inspect.getfile(accelerate.test_utils )
_SCREAMING_SNAKE_CASE =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_SCREAMING_SNAKE_CASE =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
_SCREAMING_SNAKE_CASE =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : List[str] = Accelerator()
snake_case_ : Optional[int] = (accelerator.state.process_index + 2, 10)
snake_case_ : Any = torch.randint(0, 10, shape).to(accelerator.device)
snake_case_ : List[str] = ''''''
snake_case_ : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
snake_case_ : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
snake_case_ : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
from __future__ import annotations
snake_case_ : Dict = list[list[int]]
# assigning initial values to the grid
snake_case_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
snake_case_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
for i in range(9):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3):
for j in range(3):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase( a__):
for i in range(9):
for j in range(9):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase( a__):
if location := find_empty_location(a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10):
if is_safe(a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =digit
if sudoku(a__) is not None:
return grid
_SCREAMING_SNAKE_CASE =0
return None
def lowerCamelCase( a__):
for row in grid:
for cell in row:
print(a__ ,end=''' ''')
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
snake_case_ : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
snake_case_ : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
for attribute in key.split('''.'''):
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__)
if weight_type is not None:
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__).shape
else:
_SCREAMING_SNAKE_CASE =hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE =hf_model.feature_extractor
_SCREAMING_SNAKE_CASE =hf_model.adapter
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE =False
if "conv_layers" in name:
load_conv_layer(
a__ ,a__ ,a__ ,a__ ,hf_model.config.feat_extract_norm == '''group''' ,)
_SCREAMING_SNAKE_CASE =True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.''']):
load_adapter(a__ ,a__ ,a__ ,a__)
_SCREAMING_SNAKE_CASE =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
_SCREAMING_SNAKE_CASE =True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE =name.split(a__)[0].split('''.''')[-2]
_SCREAMING_SNAKE_CASE =mapped_key.replace('''*''' ,a__)
if "weight_g" in name:
_SCREAMING_SNAKE_CASE ='''weight_g'''
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE ='''weight_v'''
elif "bias" in name:
_SCREAMING_SNAKE_CASE ='''bias'''
elif "weight" in name:
_SCREAMING_SNAKE_CASE ='''weight'''
else:
_SCREAMING_SNAKE_CASE =None
set_recursively(a__ ,a__ ,a__ ,a__ ,a__)
continue
if not is_used:
unused_weights.append(a__)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =full_name.split('''conv_layers.''')[-1]
_SCREAMING_SNAKE_CASE =name.split('''.''')
_SCREAMING_SNAKE_CASE =int(items[0])
_SCREAMING_SNAKE_CASE =int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(a__)
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =full_name.split('''adaptor.''')[-1]
_SCREAMING_SNAKE_CASE =name.split('''.''')
if items[1].isdigit():
_SCREAMING_SNAKE_CASE =int(items[1])
else:
_SCREAMING_SNAKE_CASE =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
_SCREAMING_SNAKE_CASE =value
logger.info(f"Adapter proj layer norm bias was initialized from {full_name}.")
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
_SCREAMING_SNAKE_CASE =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
_SCREAMING_SNAKE_CASE =value
logger.info(f"Adapter proj layer bias was initialized from {full_name}.")
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
_SCREAMING_SNAKE_CASE =value
logger.info(f"Adapter proj layer weight was initialized from {full_name}.")
elif isinstance(a__ ,a__):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
_SCREAMING_SNAKE_CASE =value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.")
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
_SCREAMING_SNAKE_CASE =value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.")
else:
unused_weights.append(a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =emb.weight.shape
_SCREAMING_SNAKE_CASE =nn.Linear(a__ ,a__ ,bias=a__)
_SCREAMING_SNAKE_CASE =emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,):
_SCREAMING_SNAKE_CASE =WavaVecaConfig.from_pretrained(
a__ ,add_adapter=a__ ,adapter_stride=a__ ,adapter_kernel_size=a__ ,use_auth_token=a__ ,output_hidden_size=a__ ,)
_SCREAMING_SNAKE_CASE =MBartConfig.from_pretrained(a__)
# load model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''')[:-1]),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} ,)
_SCREAMING_SNAKE_CASE =model[0].eval()
# load feature extractor
_SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(a__ ,use_auth_token=a__)
# set weights for wav2vec2 encoder
_SCREAMING_SNAKE_CASE =WavaVecaModel(a__)
recursively_load_weights_wavaveca(model.encoder ,a__)
# load decoder weights
_SCREAMING_SNAKE_CASE =MBartForCausalLM(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=a__)
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}")
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
_SCREAMING_SNAKE_CASE =SpeechEncoderDecoderModel(encoder=a__ ,decoder=a__)
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =MBartaaTokenizer(a__)
tokenizer.save_pretrained(a__)
_SCREAMING_SNAKE_CASE =hf_wavavec.config.to_dict()
_SCREAMING_SNAKE_CASE =tokenizer.pad_token_id
_SCREAMING_SNAKE_CASE =tokenizer.bos_token_id
_SCREAMING_SNAKE_CASE =tokenizer.eos_token_id
_SCREAMING_SNAKE_CASE ='''mbart50'''
_SCREAMING_SNAKE_CASE ='''wav2vec2'''
_SCREAMING_SNAKE_CASE =tokenizer.eos_token_id
_SCREAMING_SNAKE_CASE =25_0004
_SCREAMING_SNAKE_CASE =tokenizer.eos_token_id
_SCREAMING_SNAKE_CASE =SpeechEncoderDecoderConfig.from_dict(a__)
hf_wavavec.save_pretrained(a__)
feature_extractor.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=10_24, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_00_04, type=int, help='''`decoder_start_token_id` of model config''')
snake_case_ : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
from scipy.stats import pearsonr
import datasets
snake_case_ : List[str] = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
snake_case_ : Dict = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
snake_case_ : Union[str, Any] = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __UpperCamelCase ( self : Optional[int] , _a : Dict , _a : Optional[Any] , _a : Optional[Any]=False ) -> str:
"""simple docstring"""
if return_pvalue:
_SCREAMING_SNAKE_CASE =pearsonr(_a , _a )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_a , _a )[0] )} | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mgp-str"
def __init__( self : int , _a : Union[str, Any]=[32, 128] , _a : List[str]=4 , _a : Tuple=3 , _a : Union[str, Any]=27 , _a : Any=38 , _a : Dict=5_0257 , _a : Optional[int]=3_0522 , _a : List[str]=768 , _a : Tuple=12 , _a : str=12 , _a : Tuple=4.0 , _a : Dict=True , _a : Union[str, Any]=False , _a : List[str]=1E-5 , _a : Dict=0.0 , _a : Optional[int]=0.0 , _a : List[Any]=0.0 , _a : List[Any]=False , _a : Union[str, Any]=0.02 , **_a : Tuple , ) -> int:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =max_token_length
_SCREAMING_SNAKE_CASE =num_character_labels
_SCREAMING_SNAKE_CASE =num_bpe_labels
_SCREAMING_SNAKE_CASE =num_wordpiece_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =distilled
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =drop_rate
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =attn_drop_rate
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =output_aa_attentions
_SCREAMING_SNAKE_CASE =initializer_range | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
import os
from math import logaa
def lowerCamelCase( a__ = "base_exp.txt"):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
for i, line in enumerate(open(os.path.join(os.path.dirname(a__) ,a__))):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =list(map(a__ ,line.split(''',''')))
if x * logaa(a__) > largest:
_SCREAMING_SNAKE_CASE =x * logaa(a__)
_SCREAMING_SNAKE_CASE =i + 1
return result
if __name__ == "__main__":
print(solution()) | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
from __future__ import annotations
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(a__) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE =i + 1
else:
_SCREAMING_SNAKE_CASE =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""") | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase = "document_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = VisionEncoderDecoderModel
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self : Any , *_a : Any , **_a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_a , **_a )
def __UpperCamelCase ( self : Any , _a : "Image" , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_SCREAMING_SNAKE_CASE =task_prompt.replace('''{user_input}''' , _a )
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenizer(
_a , add_special_tokens=_a , return_tensors='''pt''' ).input_ids
_SCREAMING_SNAKE_CASE =self.pre_processor(_a , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_a , ).sequences
def __UpperCamelCase ( self : Union[str, Any] , _a : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.pre_processor.batch_decode(_a )[0]
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_SCREAMING_SNAKE_CASE =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_SCREAMING_SNAKE_CASE =re.sub(R'''<.*?>''' , '''''' , _a , count=1 ).strip() # remove first task start token
_SCREAMING_SNAKE_CASE =self.pre_processor.tokenajson(_a )
return sequence["answer"] | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
def lowerCamelCase( a__):
if len(a__) < 2:
return collection
def circle_sort_util(a__ ,a__ ,a__) -> bool:
_SCREAMING_SNAKE_CASE =False
if low == high:
return swapped
_SCREAMING_SNAKE_CASE =low
_SCREAMING_SNAKE_CASE =high
while left < right:
if collection[left] > collection[right]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
collection[right],
collection[left],
)
_SCREAMING_SNAKE_CASE =True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
collection[right + 1],
collection[left],
)
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =low + int((high - low) / 2)
_SCREAMING_SNAKE_CASE =circle_sort_util(a__ ,a__ ,a__)
_SCREAMING_SNAKE_CASE =circle_sort_util(a__ ,mid + 1 ,a__)
return swapped or left_swap or right_swap
_SCREAMING_SNAKE_CASE =True
while is_not_sorted is True:
_SCREAMING_SNAKE_CASE =circle_sort_util(a__ ,0 ,len(a__) - 1)
return collection
if __name__ == "__main__":
snake_case_ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ : Tuple = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted)) | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
def lowerCamelCase( a__ ,a__=False):
_SCREAMING_SNAKE_CASE =[]
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
_SCREAMING_SNAKE_CASE =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def lowerCamelCase( a__ ,a__ ,a__=False):
for i in range(config.num_hidden_layers):
if base_model:
_SCREAMING_SNAKE_CASE =''''''
else:
_SCREAMING_SNAKE_CASE ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.weight")
_SCREAMING_SNAKE_CASE =state_dict.pop(f"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE =in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE =in_proj_bias[-config.hidden_size :]
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ ,a__)
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =dct.pop(a__)
_SCREAMING_SNAKE_CASE =val
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw)
return im
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__=False):
_SCREAMING_SNAKE_CASE =BitConfig(
global_padding='''same''' ,layer_type='''bottleneck''' ,depths=(3, 4, 9) ,out_features=['''stage3'''] ,embedding_dynamic_padding=a__ ,)
_SCREAMING_SNAKE_CASE =ViTHybridConfig(backbone_config=a__ ,image_size=384 ,num_labels=1000)
_SCREAMING_SNAKE_CASE =False
# load original model from timm
_SCREAMING_SNAKE_CASE =timm.create_model(a__ ,pretrained=a__)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE =timm_model.state_dict()
if base_model:
remove_classification_head_(a__)
_SCREAMING_SNAKE_CASE =create_rename_keys(a__ ,a__)
for src, dest in rename_keys:
rename_key(a__ ,a__ ,a__)
read_in_q_k_v(a__ ,a__ ,a__)
_SCREAMING_SNAKE_CASE ='''huggingface/label-files'''
_SCREAMING_SNAKE_CASE ='''imagenet-1k-id2label.json'''
_SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(a__ ,a__ ,repo_type='''dataset''') ,'''r'''))
_SCREAMING_SNAKE_CASE ={int(a__): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE =idalabel
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_SCREAMING_SNAKE_CASE =ViTHybridModel(a__).eval()
else:
_SCREAMING_SNAKE_CASE =ViTHybridForImageClassification(a__).eval()
model.load_state_dict(a__)
# create image processor
_SCREAMING_SNAKE_CASE =create_transform(**resolve_data_config({} ,model=a__))
_SCREAMING_SNAKE_CASE =transform.transforms
_SCREAMING_SNAKE_CASE ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_SCREAMING_SNAKE_CASE =ViTHybridImageProcessor(
do_resize=a__ ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=a__ ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=a__ ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =transform(a__).unsqueeze(0)
_SCREAMING_SNAKE_CASE =processor(a__ ,return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a__ ,a__)
# verify logits
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(a__)
_SCREAMING_SNAKE_CASE =outputs.logits
print('''Predicted class:''' ,logits.argmax(-1).item())
if base_model:
_SCREAMING_SNAKE_CASE =timm_model.forward_features(a__)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a__ ,outputs.pooler_output ,atol=1e-3)
else:
_SCREAMING_SNAKE_CASE =timm_model(a__)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ ,outputs.logits ,atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a__).mkdir(exist_ok=a__)
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a__)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a__)
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(f"ybelkada/{vit_name}")
processor.push_to_hub(f"ybelkada/{vit_name}")
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
snake_case_ : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case_ : str = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
snake_case_ : int = logging.getLogger(__name__)
snake_case_ : Any = {'''facebook/bart-base''': BartForConditionalGeneration}
snake_case_ : List[Any] = {'''facebook/bart-base''': BartTokenizer}
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''')
parser.add_argument(
'''--validation_file''' ,type=a__ ,default=a__ ,help='''A csv or a json file containing the validation data.''')
parser.add_argument(
'''--max_length''' ,type=a__ ,default=5 ,help='''The maximum total input sequence length after tokenization.''' ,)
parser.add_argument(
'''--num_beams''' ,type=a__ ,default=a__ ,help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) ,)
parser.add_argument(
'''--model_name_or_path''' ,type=a__ ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=a__ ,)
parser.add_argument(
'''--config_name''' ,type=a__ ,default=a__ ,help='''Pretrained config name or path if not the same as model_name''' ,)
parser.add_argument(
'''--device''' ,type=a__ ,default='''cpu''' ,help='''Device where the model will be run''' ,)
parser.add_argument('''--output_file_path''' ,type=a__ ,default=a__ ,help='''Where to store the final ONNX file.''')
_SCREAMING_SNAKE_CASE =parser.parse_args()
return args
def lowerCamelCase( a__ ,a__="cpu"):
_SCREAMING_SNAKE_CASE =model_dict[model_name].from_pretrained(a__).to(a__)
_SCREAMING_SNAKE_CASE =tokenizer_dict[model_name].from_pretrained(a__)
if model_name in ["facebook/bart-base"]:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
return huggingface_model, tokenizer
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
model.eval()
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =torch.jit.script(BARTBeamSearchGenerator(a__))
with torch.no_grad():
_SCREAMING_SNAKE_CASE ='''My friends are cool but they eat too many carbs.'''
_SCREAMING_SNAKE_CASE =tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors='''pt''').to(model.device)
_SCREAMING_SNAKE_CASE =model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,num_beams=a__ ,max_length=a__ ,early_stopping=a__ ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
a__ ,(
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,a__ ,opset_version=14 ,input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] ,output_names=['''output_ids'''] ,dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} ,example_outputs=a__ ,)
logger.info('''Model exported to {}'''.format(a__))
_SCREAMING_SNAKE_CASE =remove_dup_initializers(os.path.abspath(a__))
logger.info('''Deduplicated and optimized model written to {}'''.format(a__))
_SCREAMING_SNAKE_CASE =onnxruntime.InferenceSession(a__)
_SCREAMING_SNAKE_CASE =ort_sess.run(
a__ ,{
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(a__),
'''max_length''': np.array(a__),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3)
logger.info('''Model outputs from torch and ONNX Runtime are similar.''')
logger.info('''Success.''')
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =parse_args()
_SCREAMING_SNAKE_CASE =5
_SCREAMING_SNAKE_CASE =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO ,)
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
_SCREAMING_SNAKE_CASE =torch.device(args.device)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =load_model_tokenizer(args.model_name_or_path ,a__)
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''')
model.to(a__)
if args.max_length:
_SCREAMING_SNAKE_CASE =args.max_length
if args.num_beams:
_SCREAMING_SNAKE_CASE =args.num_beams
if args.output_file_path:
_SCREAMING_SNAKE_CASE =args.output_file_path
else:
_SCREAMING_SNAKE_CASE ='''BART.onnx'''
logger.info('''Exporting model to ONNX''')
export_and_validate_model(a__ ,a__ ,a__ ,a__ ,a__)
if __name__ == "__main__":
main() | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case_ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = field(default=UpperCamelCase__ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =super().to_dict()
for k, v in d.items():
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =v.to_dict()
return d | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
import math
class A__ :
def __init__( self : Optional[Any] , _a : Any=0 ) -> Dict: # a graph with Node 0,1,...,N-1
"""simple docstring"""
_SCREAMING_SNAKE_CASE =n
_SCREAMING_SNAKE_CASE =[
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # adjacency matrix for weight
_SCREAMING_SNAKE_CASE =[
[math.inf for j in range(0 , _a )] for i in range(0 , _a )
] # dp[i][j] stores minimum distance from i to j
def __UpperCamelCase ( self : List[str] , _a : Dict , _a : Any , _a : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =w
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_SCREAMING_SNAKE_CASE =min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __UpperCamelCase ( self : List[Any] , _a : Optional[int] , _a : List[str] ) -> List[str]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
snake_case_ : Union[str, Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase( a__):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''mock-s3-bucket'''
_SCREAMING_SNAKE_CASE =f"s3://{mock_bucket}"
_SCREAMING_SNAKE_CASE =extract_path_from_uri(a__)
assert dataset_path.startswith('''s3://''') is False
_SCREAMING_SNAKE_CASE ='''./local/path'''
_SCREAMING_SNAKE_CASE =extract_path_from_uri(a__)
assert dataset_path == new_dataset_path
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =is_remote_filesystem(a__)
assert is_remote is True
_SCREAMING_SNAKE_CASE =fsspec.filesystem('''file''')
_SCREAMING_SNAKE_CASE =is_remote_filesystem(a__)
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' ,a__)
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_SCREAMING_SNAKE_CASE =input_paths[compression_fs_class.protocol]
if input_path is None:
_SCREAMING_SNAKE_CASE =f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__)
_SCREAMING_SNAKE_CASE =fsspec.filesystem(compression_fs_class.protocol ,fo=a__)
assert isinstance(a__ ,a__)
_SCREAMING_SNAKE_CASE =os.path.basename(a__)
_SCREAMING_SNAKE_CASE =expected_filename[: expected_filename.rindex('''.''')]
assert fs.glob('''*''') == [expected_filename]
with fs.open(a__ ,'''r''' ,encoding='''utf-8''') as f, open(a__ ,encoding='''utf-8''') as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' ,['''zip''', '''gzip'''])
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_SCREAMING_SNAKE_CASE =compressed_file_paths[protocol]
_SCREAMING_SNAKE_CASE ='''dataset.jsonl'''
_SCREAMING_SNAKE_CASE =f"{protocol}://{member_file_path}::{compressed_file_path}"
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE =fsspec.get_fs_token_paths(a__)
assert fs.isfile(a__)
assert not fs.isfile('''non_existing_''' + member_file_path)
@pytest.mark.integration
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =hf_api.dataset_info(a__ ,token=a__)
_SCREAMING_SNAKE_CASE =HfFileSystem(repo_info=a__ ,token=a__)
assert sorted(hffs.glob('''*''')) == [".gitattributes", "data"]
assert hffs.isdir('''data''')
assert hffs.isfile('''.gitattributes''') and hffs.isfile('''data/text_data.txt''')
with open(a__) as f:
assert hffs.open('''data/text_data.txt''' ,'''r''').read() == f.read()
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(a__ ,a__ ,clobber=a__)
with pytest.warns(a__) as warning_info:
importlib.reload(datasets.filesystems)
assert len(a__) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
) | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
def lowerCamelCase( a__ = 3 ,a__ = 7 ,a__ = 100_0000):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
for current_denominator in range(1 ,limit + 1):
_SCREAMING_SNAKE_CASE =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_SCREAMING_SNAKE_CASE =current_numerator
_SCREAMING_SNAKE_CASE =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00)) | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self : Optional[Any] , _a : str , _a : Any=13 , _a : Optional[Any]=30 , _a : int=2 , _a : List[str]=3 , _a : Dict=True , _a : Optional[int]=True , _a : Union[str, Any]=32 , _a : List[str]=2 , _a : List[Any]=4 , _a : Optional[Any]=37 , _a : Tuple="gelu" , _a : Optional[int]=0.1 , _a : Optional[Any]=0.1 , _a : Optional[Any]=10 , _a : str=0.02 , _a : List[str]=3 , _a : Tuple=None , _a : str=2 , ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE =num_patches + 2
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : int , _a : str , _a : Union[str, Any] , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFDeiTModel(config=_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , _a : str , _a : List[str] , _a : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFDeiTForMaskedImageModeling(config=_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =TFDeiTForMaskedImageModeling(_a )
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Tuple , _a : List[str] , _a : Union[str, Any] , _a : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.type_sequence_label_size
_SCREAMING_SNAKE_CASE =TFDeiTForImageClassification(_a )
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =TFDeiTForImageClassification(_a )
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFDeiTModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Dense ) )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCamelCase ( self : Any , _a : Dict , _a : int , _a : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFDeiTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''tf''' )
# forward pass
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) ) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "Wav2Vec2FeatureExtractor"
UpperCAmelCase = "AutoTokenizer"
def __init__( self : int , _a : int , _a : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__(_a , _a )
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
@classmethod
def __UpperCamelCase ( cls : List[Any] , _a : Optional[Any] , **_a : Dict ) -> str:
"""simple docstring"""
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _a , )
_SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE =WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self : Union[str, Any] , *_a : Dict , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''raw_speech''' )
else:
_SCREAMING_SNAKE_CASE =kwargs.pop('''audio''' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('''sampling_rate''' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('''text''' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE =encodings['''input_ids''']
return inputs
def __UpperCamelCase ( self : Any , *_a : Union[str, Any] , **_a : Dict ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_SCREAMING_SNAKE_CASE =kwargs.pop('''input_features''' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('''labels''' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE =labels['''input_ids''']
return input_features
def __UpperCamelCase ( self : Dict , *_a : Optional[Any] , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , *_a : Any , **_a : int ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =self.tokenizer
yield
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' ,type=a__ ,default='''microsoft/unixcoder-base-nine''')
parser.add_argument('''--num_epochs''' ,type=a__ ,default=5)
parser.add_argument('''--batch_size''' ,type=a__ ,default=6)
parser.add_argument('''--gradient_accumulation_steps''' ,type=a__ ,default=1)
parser.add_argument('''--freeze''' ,type=a__ ,default=a__)
parser.add_argument('''--learning_rate''' ,type=a__ ,default=5e-4)
parser.add_argument('''--seed''' ,type=a__ ,default=0)
parser.add_argument('''--lr_scheduler_type''' ,type=a__ ,default='''cosine''')
parser.add_argument('''--num_warmup_steps''' ,type=a__ ,default=10)
parser.add_argument('''--weight_decay''' ,type=a__ ,default=0.01)
parser.add_argument('''--output_dir''' ,type=a__ ,default='''./results''')
return parser.parse_args()
snake_case_ : int = load('''accuracy''')
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =eval_pred
_SCREAMING_SNAKE_CASE =np.argmax(a__ ,axis=1)
return metric.compute(predictions=a__ ,references=a__)
class A__ ( UpperCamelCase__ ):
def __init__( self : Tuple , _a : List[str] ) -> None:
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE =trainer
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : int , _a : List[Any] , **_a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if control.should_evaluate:
_SCREAMING_SNAKE_CASE =deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =get_args()
set_seed(args.seed)
_SCREAMING_SNAKE_CASE =load_dataset('''codeparrot/codecomplex''' ,split='''train''')
_SCREAMING_SNAKE_CASE =dataset.train_test_split(test_size=0.2)
_SCREAMING_SNAKE_CASE =train_test['''test'''].train_test_split(test_size=0.5)
_SCREAMING_SNAKE_CASE =DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
})
print('''Loading tokenizer and model''')
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE =tokenizer.eos_token
_SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7)
_SCREAMING_SNAKE_CASE =model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =ClassLabel(num_classes=7 ,names=list(set(train_test_validation['''train''']['''complexity'''])))
def tokenize(a__):
_SCREAMING_SNAKE_CASE =tokenizer(example['''src'''] ,truncation=a__ ,max_length=1024)
_SCREAMING_SNAKE_CASE =labels.straint(example['''complexity'''])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_SCREAMING_SNAKE_CASE =train_test_validation.map(
a__ ,batched=a__ ,remove_columns=train_test_validation['''train'''].column_names ,)
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(tokenizer=a__)
_SCREAMING_SNAKE_CASE =TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='''epoch''' ,save_strategy='''epoch''' ,logging_strategy='''epoch''' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model='''accuracy''' ,run_name='''complexity-java''' ,report_to='''wandb''' ,)
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=tokenized_datasets['''train'''] ,eval_dataset=tokenized_datasets['''valid'''] ,tokenizer=a__ ,data_collator=a__ ,compute_metrics=a__ ,)
print('''Training...''')
trainer.add_callback(CustomCallback(a__))
trainer.train()
if __name__ == "__main__":
main() | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
snake_case_ : int = range(2, 20 + 1)
snake_case_ : Any = [10**k for k in range(ks[-1] + 1)]
snake_case_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =sum(a_i[j] for j in range(a__ ,len(a__)))
_SCREAMING_SNAKE_CASE =sum(a_i[j] * base[j] for j in range(min(len(a__) ,a__)))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 0
_SCREAMING_SNAKE_CASE =n - i
_SCREAMING_SNAKE_CASE =memo.get(a__)
if sub_memo is not None:
_SCREAMING_SNAKE_CASE =sub_memo.get(a__)
if jumps is not None and len(a__) > 0:
# find and make the largest jump without going over
_SCREAMING_SNAKE_CASE =-1
for _k in range(len(a__) - 1 ,-1 ,-1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_SCREAMING_SNAKE_CASE =_k
break
if max_jump >= 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =jumps[max_jump]
# since the difference between jumps is cached, add c
_SCREAMING_SNAKE_CASE =diff + c
for j in range(min(a__ ,len(a__))):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(a__ ,10)
if new_c > 0:
add(a__ ,a__ ,a__)
else:
_SCREAMING_SNAKE_CASE =[]
else:
_SCREAMING_SNAKE_CASE ={c: []}
_SCREAMING_SNAKE_CASE =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =next_term(a__ ,k - 1 ,i + dn ,a__)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =compute(a__ ,a__ ,i + dn ,a__)
diff += _diff
dn += terms_jumped
_SCREAMING_SNAKE_CASE =sub_memo[c]
# keep jumps sorted by # of terms skipped
_SCREAMING_SNAKE_CASE =0
while j < len(a__):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a__ ,(diff, dn, k))
return (diff, dn)
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
if i >= n:
return 0, i
if k > len(a__):
a_i.extend([0 for _ in range(k - len(a__))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_SCREAMING_SNAKE_CASE =i
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 0, 0
for j in range(len(a__)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_SCREAMING_SNAKE_CASE =ds_c + ds_b
diff += addend
_SCREAMING_SNAKE_CASE =0
for j in range(a__):
_SCREAMING_SNAKE_CASE =a_i[j] + addend
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(a__ ,10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a__ ,a__ ,a__)
return diff, i - start_i
def lowerCamelCase( a__ ,a__ ,a__):
for j in range(a__ ,len(a__)):
_SCREAMING_SNAKE_CASE =digits[j] + addend
if s >= 10:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(a__ ,10)
_SCREAMING_SNAKE_CASE =addend // 10 + quotient
else:
_SCREAMING_SNAKE_CASE =s
_SCREAMING_SNAKE_CASE =addend // 10
if addend == 0:
break
while addend > 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(a__ ,10)
digits.append(a__)
def lowerCamelCase( a__ = 10**15):
_SCREAMING_SNAKE_CASE =[1]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =0
while True:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =next_term(a__ ,20 ,i + dn ,a__)
dn += terms_jumped
if dn == n - i:
break
_SCREAMING_SNAKE_CASE =0
for j in range(len(a__)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""") | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
import PIL.Image
_SCREAMING_SNAKE_CASE =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=_a ) as mock_cast_to_python_objects:
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , _a )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =pa.BufferReader(a__) if isinstance(a__ ,pa.Buffer) else pa.memory_map(a__)
_SCREAMING_SNAKE_CASE =pa.ipc.open_stream(a__)
_SCREAMING_SNAKE_CASE =f.read_all()
assert len(pa_table.to_batches()) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 1, 10])
@pytest.mark.parametrize(
'''fields''' ,[None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
_SCREAMING_SNAKE_CASE =pa.schema(a__) if fields else None
with ArrowWriter(stream=a__ ,schema=a__ ,writer_batch_size=a__) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1})
writer.write({'''col_1''': '''bar''', '''col_2''': 2})
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_SCREAMING_SNAKE_CASE ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(a__ ,metadata=writer._schema.metadata)
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
_SCREAMING_SNAKE_CASE =Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''])})
with ArrowWriter(stream=a__ ,features=a__) as writer:
writer.write({'''labels''': 0})
writer.write({'''labels''': 1})
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue())
_SCREAMING_SNAKE_CASE =pa.ipc.open_stream(a__)
_SCREAMING_SNAKE_CASE =f.read_all()
_SCREAMING_SNAKE_CASE =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(a__)
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 1, 10])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=a__ ,writer_batch_size=a__ ,hash_salt='''split_name''' ,check_duplicates=a__ ,) as writer:
with pytest.raises(a__):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} ,key=[1, 2])
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 2, 10])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=a__ ,writer_batch_size=a__ ,hash_salt='''split_name''' ,check_duplicates=a__ ,) as writer:
with pytest.raises(a__):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} ,key=10)
writer.write({'''col_1''': '''bar''', '''col_2''': 2} ,key=10)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 2, 10])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(
stream=a__ ,writer_batch_size=a__ ,hash_salt='''split_name''' ,check_duplicates=a__ ,) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} ,key=1)
writer.write({'''col_1''': '''bar''', '''col_2''': 2} ,key=2)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 1, 10])
@pytest.mark.parametrize(
'''fields''' ,[None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
_SCREAMING_SNAKE_CASE =pa.schema(a__) if fields else None
with ArrowWriter(stream=a__ ,schema=a__ ,writer_batch_size=a__) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]})
writer.write_batch({'''col_1''': [], '''col_2''': []})
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_SCREAMING_SNAKE_CASE ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(a__ ,metadata=writer._schema.metadata)
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 1, 10])
@pytest.mark.parametrize(
'''fields''' ,[None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
_SCREAMING_SNAKE_CASE =pa.schema(a__) if fields else None
with ArrowWriter(stream=a__ ,schema=a__ ,writer_batch_size=a__) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]}))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_SCREAMING_SNAKE_CASE ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(a__ ,metadata=writer._schema.metadata)
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
@pytest.mark.parametrize('''writer_batch_size''' ,[None, 1, 10])
@pytest.mark.parametrize(
'''fields''' ,[None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
_SCREAMING_SNAKE_CASE =pa.schema(a__) if fields else None
with ArrowWriter(stream=a__ ,schema=a__ ,writer_batch_size=a__) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]}))
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]}))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_SCREAMING_SNAKE_CASE ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(a__ ,metadata=writer._schema.metadata)
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1)
def lowerCamelCase( ):
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''test.arrow''')
with ArrowWriter(path=a__ ,schema=pa.schema(a__)) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]})
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(a__ ,metadata=writer._schema.metadata)
_check_output(a__ ,1)
def lowerCamelCase( a__):
if pa.types.is_list(a__):
return get_base_dtype(arr_type.value_type)
else:
return arr_type
def lowerCamelCase( a__ ,a__):
if isinstance(lst[0] ,a__):
change_first_primitive_element_in_list(lst[0] ,a__)
else:
_SCREAMING_SNAKE_CASE =value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' ,[(None, pa.intaa()), (Value('''int32'''), pa.intaa())])
@pytest.mark.parametrize('''sequence''' ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =pa.array(TypedSequence(a__ ,optimized_int_type=a__))
assert get_base_dtype(arr.type) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' ,[
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] ,)
@pytest.mark.parametrize('''sequence''' ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]])
def lowerCamelCase( a__ ,a__ ,a__):
# in range
_SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(a__ ,col=a__))
assert get_base_dtype(arr.type) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_SCREAMING_SNAKE_CASE =copy.deepcopy(a__)
_SCREAMING_SNAKE_CASE =np.iinfo(expected_dtype.to_pandas_dtype()).max + 1
change_first_primitive_element_in_list(a__ ,a__)
_SCREAMING_SNAKE_CASE =pa.array(OptimizedTypedSequence(a__ ,col=a__))
assert get_base_dtype(arr.type) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' ,[False, True])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =str(tmp_path / '''dataset-train.arrow''')
try:
with ArrowWriter(path=a__) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE ='''mock://dataset-train.arrow'''
with ArrowWriter(path=a__ ,storage_options=mockfs.storage_options) as writer:
assert isinstance(writer._fs ,type(a__))
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1})
writer.write({'''col_1''': '''bar''', '''col_2''': 2})
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(a__)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(stream=a__) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1})
writer.write({'''col_1''': '''bar''', '''col_2''': 2})
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue())
_SCREAMING_SNAKE_CASE =pq.read_table(a__)
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' ,[False, True])
def lowerCamelCase( a__ ,a__):
import PIL.Image
_SCREAMING_SNAKE_CASE =str(tmp_path / '''test_image_rgb.jpg''')
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta)).save(a__ ,format='''png''')
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ParquetWriter(
stream=a__ ,features=Features({'''image''': Image()}) ,embed_local_files=a__) as writer:
writer.write({'''image''': image_path})
writer.finalize()
_SCREAMING_SNAKE_CASE =pa.BufferReader(output.getvalue())
_SCREAMING_SNAKE_CASE =pq.read_table(a__)
_SCREAMING_SNAKE_CASE =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] ,a__)
with open(a__ ,'''rb''') as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =pa.schema([pa.field('''col_1''' ,pa.string() ,nullable=a__)])
_SCREAMING_SNAKE_CASE =pa.BufferOutputStream()
with ArrowWriter(stream=a__) as writer:
writer._build_writer(inferred_schema=a__)
assert writer._schema == pa.schema([pa.field('''col_1''' ,pa.string())]) | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias"))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
])
return rename_keys
def lowerCamelCase( a__ ,a__):
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE =state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight")
_SCREAMING_SNAKE_CASE =in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE =in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =dct.pop(a__)
_SCREAMING_SNAKE_CASE =val
def lowerCamelCase( a__):
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE ='''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE ='''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw).convert('''RGB''')
return im
@torch.no_grad()
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =ViTConfig(image_size=384 ,qkv_bias=a__)
_SCREAMING_SNAKE_CASE =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE =768
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE =1024
_SCREAMING_SNAKE_CASE =4096
_SCREAMING_SNAKE_CASE =24
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''')
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE ='''relu'''
_SCREAMING_SNAKE_CASE =1024
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
# load HuggingFace model
_SCREAMING_SNAKE_CASE =ViTModel(a__ ,add_pooling_layer=a__)
_SCREAMING_SNAKE_CASE =TrOCRForCausalLM(a__)
_SCREAMING_SNAKE_CASE =VisionEncoderDecoderModel(encoder=a__ ,decoder=a__)
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(a__ ,map_location='''cpu''' ,check_hash=a__)['''model''']
_SCREAMING_SNAKE_CASE =create_rename_keys(a__ ,a__)
for src, dest in rename_keys:
rename_key(a__ ,a__ ,a__)
read_in_q_k_v(a__ ,a__)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE =state_dict.pop(a__)
if key.startswith('''decoder''') and "output_projection" not in key:
_SCREAMING_SNAKE_CASE =val
else:
_SCREAMING_SNAKE_CASE =val
# load state dict
model.load_state_dict(a__)
# Check outputs on an image
_SCREAMING_SNAKE_CASE =ViTImageProcessor(size=encoder_config.image_size)
_SCREAMING_SNAKE_CASE =RobertaTokenizer.from_pretrained('''roberta-large''')
_SCREAMING_SNAKE_CASE =TrOCRProcessor(a__ ,a__)
_SCREAMING_SNAKE_CASE =processor(images=prepare_img(a__) ,return_tensors='''pt''').pixel_values
# verify logits
_SCREAMING_SNAKE_CASE =torch.tensor([[model.config.decoder.decoder_start_token_id]])
_SCREAMING_SNAKE_CASE =model(pixel_values=a__ ,decoder_input_ids=a__)
_SCREAMING_SNAKE_CASE =outputs.logits
_SCREAMING_SNAKE_CASE =torch.Size([1, 1, 5_0265])
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311])
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170])
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210])
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE =torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,a__ ,atol=1e-3), "First elements of logits not as expected"
Path(a__).mkdir(exist_ok=a__)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a__)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case_ : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
snake_case_ : Optional[int] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
snake_case_ : List[Any] = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
snake_case_ : List[str] = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
snake_case_ : Union[str, Any] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
snake_case_ : int = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase( a__ ,a__):
for tf_name, hf_name in patterns:
_SCREAMING_SNAKE_CASE =k.replace(a__ ,a__)
return k
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =BigBirdPegasusConfig(**a__)
_SCREAMING_SNAKE_CASE =BigBirdPegasusForConditionalGeneration(a__)
_SCREAMING_SNAKE_CASE =torch_model.state_dict()
_SCREAMING_SNAKE_CASE ={}
# separating decoder weights
_SCREAMING_SNAKE_CASE ={k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''')}
_SCREAMING_SNAKE_CASE ={k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''')}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion'''):
_SCREAMING_SNAKE_CASE =[k.endswith(a__) for ending in KEYS_TO_IGNORE]
if any(a__):
continue
_SCREAMING_SNAKE_CASE =DECODER_PATTERNS
_SCREAMING_SNAKE_CASE =rename_state_dict_key(a__ ,a__)
if new_k not in state_dict:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value''']):
_SCREAMING_SNAKE_CASE =v.T
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__)
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion'''):
_SCREAMING_SNAKE_CASE =[k.endswith(a__) for ending in KEYS_TO_IGNORE]
if any(a__):
continue
_SCREAMING_SNAKE_CASE =REMAINING_PATTERNS
_SCREAMING_SNAKE_CASE =rename_state_dict_key(a__ ,a__)
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value''']):
_SCREAMING_SNAKE_CASE =v.T
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__)
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
_SCREAMING_SNAKE_CASE =mapping['''model.embed_positions.weight''']
_SCREAMING_SNAKE_CASE =mapping.pop('''model.embed_positions.weight''')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =torch_model.load_state_dict(a__ ,strict=a__)
_SCREAMING_SNAKE_CASE =[
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tf.train.list_variables(a__)
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =['''global_step''']
for name, shape in tqdm(a__ ,desc='''converting tf checkpoint to dict'''):
_SCREAMING_SNAKE_CASE =any(pat in name for pat in ignore_name)
if skip_key:
continue
_SCREAMING_SNAKE_CASE =tf.train.load_variable(a__ ,a__)
_SCREAMING_SNAKE_CASE =array
return tf_weights
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =get_tf_weights_as_numpy(a__)
_SCREAMING_SNAKE_CASE =convert_bigbird_pegasus(a__ ,a__)
torch_model.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case_ : Any = parser.parse_args()
snake_case_ : str = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A__ ( pl.LightningModule ):
def __init__( self : List[str] , _a : Dict ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE =model
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase( a__ ,a__ ,a__):
# load longformer model from model identifier
_SCREAMING_SNAKE_CASE =LongformerModel.from_pretrained(a__)
_SCREAMING_SNAKE_CASE =LightningModel(a__)
_SCREAMING_SNAKE_CASE =torch.load(a__ ,map_location=torch.device('''cpu'''))
lightning_model.load_state_dict(ckpt['''state_dict'''])
# init longformer question answering model
_SCREAMING_SNAKE_CASE =LongformerForQuestionAnswering.from_pretrained(a__)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__)
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}")
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case_ : Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self : Dict , _a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self : List[Any] , _a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =config.num_hidden_layers
_SCREAMING_SNAKE_CASE =DeeRobertaModel(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Dict=None , _a : Optional[Any]=None , _a : int=None , _a : Union[str, Any]=None , _a : int=None , _a : Tuple=None , _a : Union[str, Any]=None , _a : str=-1 , _a : Dict=False , ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_layers
try:
_SCREAMING_SNAKE_CASE =self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_SCREAMING_SNAKE_CASE =outputs[1]
_SCREAMING_SNAKE_CASE =self.dropout(_a )
_SCREAMING_SNAKE_CASE =self.classifier(_a )
_SCREAMING_SNAKE_CASE =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_SCREAMING_SNAKE_CASE =e.message
_SCREAMING_SNAKE_CASE =e.exit_layer
_SCREAMING_SNAKE_CASE =outputs[0]
if not self.training:
_SCREAMING_SNAKE_CASE =entropy(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_SCREAMING_SNAKE_CASE =[]
for highway_exit in outputs[-1]:
_SCREAMING_SNAKE_CASE =highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_SCREAMING_SNAKE_CASE =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_SCREAMING_SNAKE_CASE =(loss,) + outputs
if not self.training:
_SCREAMING_SNAKE_CASE =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_SCREAMING_SNAKE_CASE =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : Tuple , _a : Tuple , _a : Union[str, Any]=13 , _a : Union[str, Any]=7 , _a : Optional[int]=True , _a : Any=True , _a : List[str]=True , _a : Any=True , _a : List[str]=99 , _a : str=32 , _a : Dict=5 , _a : int=4 , _a : List[str]=37 , _a : Any="gelu" , _a : Tuple=0.1 , _a : Tuple=0.1 , _a : str=512 , _a : str=16 , _a : Optional[int]=2 , _a : Union[str, Any]=0.02 , _a : Optional[Any]=4 , ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_attention_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_choices
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = True
UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =FlaxRobertaModelTester(self )
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class_name.from_pretrained('''roberta-base''' , from_pt=_a )
_SCREAMING_SNAKE_CASE =model(np.ones((1, 1) ) )
self.assertIsNotNone(_a ) | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
snake_case_ : List[Any] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
snake_case_ : List[Any] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
snake_case_ : str = BeautifulSoup(res.text, '''html.parser''')
snake_case_ : Tuple = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""") | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
from math import pi, sqrt
def _A ( _lowercase ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(_lowercase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(_lowercase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_lowercase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case = 1.0
while num:
__snake_case = float(input('''Gamma of: '''))
print(f"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''')
| 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : str = "realm"
def __init__( self : List[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : str=1_28 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Tuple=30_72 , __lowerCAmelCase : Optional[int]="gelu_new" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=1E-12 , __lowerCAmelCase : Tuple=2_56 , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : Union[str, Any]=1E-3 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : List[Any]=3_20 , __lowerCAmelCase : Any=13_35_37_18 , __lowerCAmelCase : Tuple=50_00 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : Dict , ) -> int:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = retriever_proj_size
_A = num_hidden_layers
_A = num_attention_heads
_A = num_candidates
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
# Reader config
_A = span_hidden_size
_A = max_span_width
_A = reader_layer_norm_eps
_A = reader_beam_size
_A = reader_seq_len
# Retrieval config
_A = num_block_records
_A = searcher_beam_size
| 2 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
def A_( A : Union[str, Any] , A : List[str]):
try:
with open(A , 'rb') as flax_state_f:
UpperCamelCase = from_bytes(A , flax_state_f.read())
except UnpicklingError as e:
try:
with open(A) as f:
if f.read().startswith('version'):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.')
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''')
return load_flax_weights_in_pytorch_model(A , A)
def A_( A : Tuple , A : List[str]):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.')
raise
# check if we have bf16 weights
UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda A: x.dtype == jnp.bfloataa , A)).values()
if any(A):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.')
UpperCamelCase = jax.tree_util.tree_map(
lambda A: params.astype(np.floataa) if params.dtype == jnp.bfloataa else params , A)
UpperCamelCase = ''
UpperCamelCase = flatten_dict(A , sep='.')
UpperCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase = []
UpperCamelCase = set(pt_model_dict.keys())
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase = flax_key_tuple.split('.')
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
UpperCamelCase = jnp.transpose(A , (3, 2, 0, 1))
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
UpperCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(A):
UpperCamelCase = (
flax_key_tuple_string.replace('_0' , '.0')
.replace('_1' , '.1')
.replace('_2' , '.2')
.replace('_3' , '.3')
.replace('_4' , '.4')
.replace('_5' , '.5')
.replace('_6' , '.6')
.replace('_7' , '.7')
.replace('_8' , '.8')
.replace('_9' , '.9')
)
UpperCamelCase = '.'.join(A)
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''')
else:
# add weight to pytorch dict
UpperCamelCase = np.asarray(A) if not isinstance(A , np.ndarray) else flax_tensor
UpperCamelCase = torch.from_numpy(A)
# remove from missing keys
missing_keys.remove(A)
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A)
pt_model.load_state_dict(A)
# re-transform missing_keys to list
UpperCamelCase = list(A)
if len(A) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).')
if len(A) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.')
return pt_model
| 3 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __get__( self , _lowercase , _lowercase=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
_lowerCAmelCase = """__cached_""" + self.fget.__name__
_lowerCAmelCase = getattr(_lowercase , _lowercase , _lowercase )
if cached is None:
_lowerCAmelCase = self.fget(_lowercase )
setattr(_lowercase , _lowercase , _lowercase )
return cached
def A (__lowerCamelCase :int ):
_lowerCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def A (__lowerCamelCase :Union[str, Any] ):
if is_torch_fx_proxy(__lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowerCamelCase , np.ndarray )
def A (__lowerCamelCase :List[Any] ):
return isinstance(__lowerCamelCase , np.ndarray )
def A (__lowerCamelCase :str ):
return _is_numpy(__lowerCamelCase )
def A (__lowerCamelCase :Dict ):
import torch
return isinstance(__lowerCamelCase , torch.Tensor )
def A (__lowerCamelCase :Union[str, Any] ):
return False if not is_torch_available() else _is_torch(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] ):
import torch
return isinstance(__lowerCamelCase , torch.device )
def A (__lowerCamelCase :List[str] ):
return False if not is_torch_available() else _is_torch_device(__lowerCamelCase )
def A (__lowerCamelCase :Dict ):
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if hasattr(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
else:
return False
return isinstance(__lowerCamelCase , torch.dtype )
def A (__lowerCamelCase :Dict ):
return False if not is_torch_available() else _is_torch_dtype(__lowerCamelCase )
def A (__lowerCamelCase :str ):
import tensorflow as tf
return isinstance(__lowerCamelCase , tf.Tensor )
def A (__lowerCamelCase :Optional[int] ):
return False if not is_tf_available() else _is_tensorflow(__lowerCamelCase )
def A (__lowerCamelCase :Tuple ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowerCamelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__lowerCamelCase )
return type(__lowerCamelCase ) == tf.Tensor
def A (__lowerCamelCase :int ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowerCamelCase )
def A (__lowerCamelCase :Optional[int] ):
import jax.numpy as jnp # noqa: F811
return isinstance(__lowerCamelCase , jnp.ndarray )
def A (__lowerCamelCase :List[str] ):
return False if not is_flax_available() else _is_jax(__lowerCamelCase )
def A (__lowerCamelCase :Dict ):
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_py_obj(__lowerCamelCase ) for o in obj]
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase ).tolist()
elif isinstance(__lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A (__lowerCamelCase :Optional[int] ):
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return np.array(__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = fields(self )
# Safety and consistency checks
if not len(_lowercase ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
_lowerCAmelCase = getattr(self , class_fields[0].name )
_lowerCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_lowercase ):
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = first_field.items()
_lowerCAmelCase = True
else:
try:
_lowerCAmelCase = iter(_lowercase )
_lowerCAmelCase = True
except TypeError:
_lowerCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_lowercase ):
if (
not isinstance(_lowercase , (list, tuple) )
or not len(_lowercase ) == 2
or not isinstance(element[0] , _lowercase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_lowerCAmelCase = element[1]
elif first_field is not None:
_lowerCAmelCase = first_field
else:
for field in class_fields:
_lowerCAmelCase = getattr(self , field.name )
if v is not None:
_lowerCAmelCase = v
def __delitem__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , _lowercase ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _lowercase , _lowercase ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_lowercase , _lowercase )
super().__setattr__(_lowercase , _lowercase )
def __setitem__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__setitem__(_lowercase , _lowercase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def _lowercase ( cls , _lowercase ):
"""simple docstring"""
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : int = '''longest'''
_lowercase : str = '''max_length'''
_lowercase : List[Any] = '''do_not_pad'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''pt'''
_lowercase : Optional[Any] = '''tf'''
_lowercase : List[str] = '''np'''
_lowercase : int = '''jax'''
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = context_managers
_lowerCAmelCase = ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(_lowercase )
def __exit__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
self.stack.__exit__(*_lowercase , **_lowercase )
def A (__lowerCamelCase :str ):
_lowerCAmelCase = infer_framework(__lowerCamelCase )
if framework == "tf":
_lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = model_class.__name__
_lowerCAmelCase = infer_framework(__lowerCamelCase )
if framework == "tf":
_lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A (__lowerCamelCase :MutableMapping , __lowerCamelCase :str = "" , __lowerCamelCase :str = "." ):
def _flatten_dict(__lowerCamelCase :Optional[Any] , __lowerCamelCase :Optional[int]="" , __lowerCamelCase :Dict="." ):
for k, v in d.items():
_lowerCAmelCase = str(__lowerCamelCase ) + delimiter + str(__lowerCamelCase ) if parent_key else k
if v and isinstance(__lowerCamelCase , __lowerCamelCase ):
yield from flatten_dict(__lowerCamelCase , __lowerCamelCase , delimiter=__lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
@contextmanager
def A (__lowerCamelCase :Any , __lowerCamelCase :bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A (__lowerCamelCase :Dict , __lowerCamelCase :List[str]=None ):
if is_numpy_array(__lowerCamelCase ):
return np.transpose(__lowerCamelCase , axes=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.T if axes is None else array.permute(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.transpose(__lowerCamelCase , perm=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.transpose(__lowerCamelCase , axes=__lowerCamelCase )
else:
raise ValueError(f'Type not supported for transpose: {type(__lowerCamelCase )}.' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :List[Any] ):
if is_numpy_array(__lowerCamelCase ):
return np.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.reshape(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.reshape(__lowerCamelCase , __lowerCamelCase )
else:
raise ValueError(f'Type not supported for reshape: {type(__lowerCamelCase )}.' )
def A (__lowerCamelCase :str , __lowerCamelCase :Any=None ):
if is_numpy_array(__lowerCamelCase ):
return np.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f'Type not supported for squeeze: {type(__lowerCamelCase )}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict ):
if is_numpy_array(__lowerCamelCase ):
return np.expand_dims(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.unsqueeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__lowerCamelCase )}.' )
def A (__lowerCamelCase :Optional[Any] ):
if is_numpy_array(__lowerCamelCase ):
return np.size(__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.numel()
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.size(__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__lowerCamelCase )}.' )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Optional[int] ):
for key, value in auto_map.items():
if isinstance(__lowerCamelCase , (tuple, list) ):
_lowerCAmelCase = [f'{repo_id}--{v}' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase = f'{repo_id}--{value}'
return auto_map
def A (__lowerCamelCase :Tuple ):
for base_class in inspect.getmro(__lowerCamelCase ):
_lowerCAmelCase = base_class.__module__
_lowerCAmelCase = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 5 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowerCamelCase = ['text', 'image', 'audio']
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List ):
SCREAMING_SNAKE_CASE__ = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCamelCase_ :
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , __A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = self.tool(*__A )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertListEqual(output_types(__A ) , self.tool.outputs )
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = self.tool(*__A )
if not isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
for output, output_type in zip(__A , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__A , __A ) )
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = []
for _input, input_type in zip(__A , self.tool.inputs ):
if isinstance(__A , __A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ = self.tool(*__A )
if not isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) ) | 6 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.2_5) = }''')
print(F'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 7 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__UpperCamelCase , __UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__UpperCamelCase , __UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__UpperCamelCase ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__UpperCamelCase , args=(
len(__UpperCamelCase ) - 1,
arr[len(__UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__UpperCamelCase ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A ( ) -> Optional[int]:
A__ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__UpperCamelCase )
A__ = odd_even_transposition(__UpperCamelCase )
print('Sorted List\n' )
print(*__UpperCamelCase )
if __name__ == "__main__":
main()
| 9 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 0 |
from __future__ import annotations
import math
def _snake_case ( __snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCamelCase = []
for num in range(len(__snake_case ) ):
_UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _snake_case ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase_ = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowercase_ = "▁"
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , A , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , A = None , **A , ) -> None:
"""simple docstring"""
_a = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Optional[Any]:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> Tuple:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
if self.remove_space:
_a = ''' '''.join(inputs.strip().split() )
else:
_a = inputs
_a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_a = unicodedata.normalize('''NFKD''' , A )
_a = ''''''.join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
_a = outputs.lower()
return outputs
def a__ (self , A ) -> List[str]:
"""simple docstring"""
_a = self.preprocess_text(A )
_a = self.sp_model.encode(A , out_type=A )
_a = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_a = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a = cur_pieces[1:]
else:
_a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.sp_model.PieceToId(A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
_a = []
_a = ''''''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_a = True
_a = []
else:
current_sub_tokens.append(A )
_a = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ (self , A , A = None , A = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 11 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 0 |
import unittest
import numpy as np
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray:
'''simple docstring'''
lowercase__ : Tuple = np.shape(lowercase_ )
lowercase__ : Tuple = np.shape(lowercase_ )
lowercase__ : Optional[int] = np.shape(lowercase_ )
if shape_a[0] != shape_b[0]:
lowercase__ : List[Any] = (
"""Expected the same number of rows for A and B. """
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(lowercase_ )
if shape_b[1] != shape_c[1]:
lowercase__ : Any = (
"""Expected the same number of columns for B and C. """
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(lowercase_ )
lowercase__ : List[str] = pseudo_inv
if a_inv is None:
try:
lowercase__ : List[str] = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowercase__ : List[str] = np.array([[0, 3], [3, 0], [2, 3]])
lowercase__ : Tuple = np.array([[2, 1], [6, 3]])
lowercase__ : List[Any] = schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = np.block([[a, b], [b.T, c]])
lowercase__ : List[Any] = np.linalg.det(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = np.linalg.det(SCREAMING_SNAKE_CASE_)
lowercase__ : int = np.linalg.det(SCREAMING_SNAKE_CASE_)
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , det_a * det_s)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowercase__ : Dict = np.array([[0, 3], [3, 0], [2, 3]])
lowercase__ : int = np.array([[2, 1], [6, 3]])
with self.assertRaises(SCREAMING_SNAKE_CASE_):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
lowercase__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]])
lowercase__ : str = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(SCREAMING_SNAKE_CASE_):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 12 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a = True , _a = False ) -> Optional[Any]:
_a : Tuple = scheduler
_a : List[Any] = optimizers if isinstance(_a , (list, tuple) ) else [optimizers]
_a : Any = split_batches
_a : Dict = step_with_optimizer
_a : Tuple = GradientState()
def __lowercase ( self , *_a , **_a ) -> int:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_a , **_a )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_a , **_a )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_a : Tuple = AcceleratorState().num_processes
for _ in range(_a ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_a , **_a )
else:
self.scheduler.step(*_a , **_a )
def __lowercase ( self ) -> int:
return self.scheduler.get_last_lr()
def __lowercase ( self ) -> Optional[int]:
return self.scheduler.state_dict()
def __lowercase ( self , _a ) -> Dict:
self.scheduler.load_state_dict(_a )
def __lowercase ( self ) -> str:
return self.scheduler.get_lr()
def __lowercase ( self , *_a , **_a ) -> List[str]:
return self.scheduler.print_lr(*_a , **_a )
| 14 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 0
while number > 0:
lowercase__ = number % 10
sum_of_digits += last_digit
lowercase__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase ( __magic_name__ : int = 100 ) -> int:
"""simple docstring"""
lowercase__ = factorial(__magic_name__ )
lowercase__ = split_and_add(__magic_name__ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 15 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "blenderbot-small"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int="gelu" , __lowerCamelCase : str=512 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[Any]=2 , **__lowerCamelCase : Optional[int] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE = {0: "batch"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = super().outputs
else:
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_attention_heads
SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE = decoder_seq_length + 3
SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
SCREAMING_SNAKE_CASE = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def _snake_case ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_attention_heads
SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def _snake_case ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def _snake_case ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def _snake_case ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) | 16 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> str:
__A : Optional[int] = 0
__A : List[str] = 0
while num > 0:
__A : Optional[int] = num % 8
__A : List[Any] = octal + (remainder * math.floor(math.pow(10 ,a__ ) ))
counter += 1
__A : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(a__ )}"""
def __SCREAMING_SNAKE_CASE ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 17 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["ViTFeatureExtractor"]
_SCREAMING_SNAKE_CASE = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__snake_case, '''_dynamo''' ):
return False
return isinstance(__snake_case, torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase__ ( __snake_case, __snake_case = True ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCamelCase = is_compiled_module(__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case, __snake_case ):
_UpperCamelCase = model.module
if not keep_fpaa_wrapper:
_UpperCamelCase = getattr(__snake_case, '''forward''' )
_UpperCamelCase = model.__dict__.pop('''_original_forward''', __snake_case )
if original_forward is not None:
while hasattr(__snake_case, '''__wrapped__''' ):
_UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
_UpperCamelCase = forward
if getattr(__snake_case, '''_converted_to_transformer_engine''', __snake_case ):
convert_model(__snake_case, to_transformer_engine=__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = compiled_model
return model
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
PartialState().wait_for_everyone()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case, __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case, __snake_case )
@contextmanager
def lowerCamelCase__ ( **__snake_case ) -> Tuple:
"""simple docstring"""
for key, value in kwargs.items():
_UpperCamelCase = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(__snake_case, '''__qualname__''' ) and not hasattr(__snake_case, '''__name__''' ):
_UpperCamelCase = getattr(__snake_case, '''__class__''', __snake_case )
if hasattr(__snake_case, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__snake_case, '''__name__''' ):
return obj.__name__
return str(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = destination.setdefault(__snake_case, {} )
merge_dicts(__snake_case, __snake_case )
else:
_UpperCamelCase = value
return destination
def lowerCamelCase__ ( __snake_case = None ) -> bool:
"""simple docstring"""
if port is None:
_UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 19 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase: Optional[Any] = datasets.logging.get_logger(__name__)
_lowerCAmelCase: int = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_lowerCAmelCase: Tuple = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_lowerCAmelCase: List[Any] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_lowerCAmelCase: List[Any] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCamelCase ( self , lowercase_) -> int:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').')
a__ ='bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
a__ =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
a__ =self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""")
# download the model checkpoint specified by self.config_name and set up the scorer
a__ =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
a__ =score.BleurtScorer(os.path.join(lowercase_ , lowercase_))
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =self.scorer.score(references=lowercase_ , candidates=lowercase_)
return {"scores": scores}
| 20 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 0 |
import argparse
import os
import re
UpperCAmelCase_ : Union[str, Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
UpperCAmelCase_ : str = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase_ : Dict = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase_ : List[Any] = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase_ : str = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase_ : Tuple = re.compile(R"\[([^\]]+)\]")
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =_re_indent.search(lowerCamelCase )
return "" if search is None else search.groups()[0]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase="" , lowerCamelCase=None , lowerCamelCase=None ):
__magic_name__ : List[str] =0
__magic_name__ : Dict =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase ):
index += 1
__magic_name__ : Optional[Any] =["""\n""".join(lines[:index] )]
else:
__magic_name__ : str =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__magic_name__ : Dict =[lines[index]]
index += 1
while index < len(lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowerCamelCase ) )
if index < len(lowerCamelCase ) - 1:
__magic_name__ : Optional[Any] =[lines[index + 1]]
index += 1
else:
__magic_name__ : Optional[Any] =[]
else:
blocks.append("""\n""".join(lowerCamelCase ) )
__magic_name__ : Optional[int] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase ) > 0:
blocks.append("""\n""".join(lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def lowerCAmelCase_ ( lowerCamelCase ):
def _inner(lowerCamelCase ):
return key(lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None ):
# If no key is provided, we use a noop.
def noop(lowerCamelCase ):
return x
if key is None:
__magic_name__ : Optional[int] =noop
# Constants are all uppercase, they go first.
__magic_name__ : List[Any] =[obj for obj in objects if key(lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__magic_name__ : int =[obj for obj in objects if key(lowerCamelCase )[0].isupper() and not key(lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
__magic_name__ : Tuple =[obj for obj in objects if not key(lowerCamelCase )[0].isupper()]
__magic_name__ : int =ignore_underscore(lowerCamelCase )
return sorted(lowerCamelCase , key=lowerCamelCase ) + sorted(lowerCamelCase , key=lowerCamelCase ) + sorted(lowerCamelCase , key=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
# This inner function sort imports between [ ].
def _replace(lowerCamelCase ):
__magic_name__ : Optional[Any] =match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__magic_name__ : Optional[int] =[part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__magic_name__ : Tuple =keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCamelCase )] ) + "]"
__magic_name__ : Any =import_statement.split("""\n""" )
if len(lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__magic_name__ : Dict =2 if lines[1].strip() == """[""" else 1
__magic_name__ : Optional[int] =[(i, _re_strip_line.search(lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__magic_name__ : Union[str, Any] =sort_objects(lowerCamelCase , key=lambda lowerCamelCase : x[1] )
__magic_name__ : Dict =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__magic_name__ : Union[str, Any] =_re_bracket_content.sub(_replace , lines[1] )
else:
__magic_name__ : Optional[int] =[part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__magic_name__ : Tuple =keys[:-1]
__magic_name__ : List[Any] =get_indent(lines[1] ) + """, """.join([F"\"{k}\"" for k in sort_objects(lowerCamelCase )] )
return "\n".join(lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
__magic_name__ : Dict =_re_bracket_content.sub(_replace , lowerCamelCase )
return import_statement
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=True ):
with open(lowerCamelCase , encoding="""utf-8""" ) as f:
__magic_name__ : Union[str, Any] =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__magic_name__ : Dict =split_code_in_indented_blocks(
lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__magic_name__ : Optional[Any] =main_blocks[block_idx]
__magic_name__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
__magic_name__ : Tuple =0
while line_idx < len(lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__magic_name__ : List[Any] =len(lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
__magic_name__ : Dict ="""\n""".join(block_lines[line_idx:-1] )
__magic_name__ : int =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__magic_name__ : Dict =split_code_in_indented_blocks(lowerCamelCase , indent_level=lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
__magic_name__ : str =_re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__magic_name__ : Optional[int] =[(pattern.search(lowerCamelCase ).groups()[0] if pattern.search(lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__magic_name__ : List[Any] =[(i, key) for i, key in enumerate(lowerCamelCase ) if key is not None]
__magic_name__ : Dict =[x[0] for x in sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__magic_name__ : Any =0
__magic_name__ : List[Any] =[]
for i in range(len(lowerCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__magic_name__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
__magic_name__ : Union[str, Any] ="""\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase=True ):
__magic_name__ : Union[str, Any] =[]
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
__magic_name__ : Dict =sort_imports(os.path.join(lowerCamelCase , """__init__.py""" ) , check_only=lowerCamelCase )
if result:
__magic_name__ : Any =[os.path.join(lowerCamelCase , """__init__.py""" )]
if len(lowerCamelCase ) > 0:
raise ValueError(F"Would overwrite {len(lowerCamelCase )} files, run `make style`." )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
UpperCAmelCase_ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 21 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 0 |
'''simple docstring'''
import baseaa
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_snake_case : Optional[int] = 'Hello World!'
_snake_case : Optional[Any] = baseaa_encode(test)
print(encoded)
_snake_case : Optional[Any] = baseaa_decode(encoded)
print(decoded)
| 22 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Tuple = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( _a , _a = None , _a = None):
if start is None:
SCREAMING_SNAKE_CASE : List[str] = 0
if end is None:
SCREAMING_SNAKE_CASE : List[Any] = len(_a) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE : Any = (start + end) // 2
slowsort(_a , _a , _a)
slowsort(_a , mid + 1 , _a)
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1)
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 28 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCAmelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowerCamelCase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase__ ) )
]
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase__ ) )
]
def lowercase ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowerCamelCase_ = len(lowerCAmelCase__ )
lowerCamelCase_ = matrix_length // 2
lowerCamelCase_ = [[a[i][j] for j in range(lowerCAmelCase__ ,lowerCAmelCase__ )] for i in range(lowerCAmelCase__ )]
lowerCamelCase_ = [
[a[i][j] for j in range(lowerCAmelCase__ ,lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ,lowerCAmelCase__ )
]
lowerCamelCase_ = [[a[i][j] for j in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ )]
lowerCamelCase_ = [[a[i][j] for j in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ,lowerCAmelCase__ )]
return top_left, top_right, bot_left, bot_right
def lowercase ( lowerCAmelCase__ ):
return len(lowerCAmelCase__ ), len(matrix[0] )
def lowercase ( lowerCAmelCase__ ):
print('''\n'''.join(str(lowerCAmelCase__ ) for line in matrix ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if matrix_dimensions(lowerCAmelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = split_matrix(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = split_matrix(lowerCAmelCase__ )
lowerCamelCase_ = actual_strassen(lowerCAmelCase__ ,matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
lowerCamelCase_ = actual_strassen(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
lowerCamelCase_ = actual_strassen(lowerCAmelCase__ ,matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) ,matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) ,matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = matrix_addition(matrix_subtraction(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
lowerCamelCase_ = matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
# construct the new matrix from our 4 quadrants
lowerCamelCase_ = []
for i in range(len(lowerCAmelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCAmelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if matrix_dimensions(lowerCAmelCase__ )[1] != matrix_dimensions(lowerCAmelCase__ )[0]:
lowerCamelCase_ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(lowerCAmelCase__ )
lowerCamelCase_ = matrix_dimensions(lowerCAmelCase__ )
lowerCamelCase_ = matrix_dimensions(lowerCAmelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase_ = max(*lowerCAmelCase__ ,*lowerCAmelCase__ )
lowerCamelCase_ = int(math.pow(2 ,math.ceil(math.loga(lowerCAmelCase__ ) ) ) )
lowerCamelCase_ = matrixa
lowerCamelCase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowerCAmelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowerCAmelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowerCAmelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase_ = actual_strassen(lowerCAmelCase__ ,lowerCAmelCase__ )
# Removing the additional zeros
for i in range(0 ,lowerCAmelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowerCAmelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 29 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 0 |
from collections import deque
from .hash_table import HashTable
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.values[key]
def a__ ( self ) -> int:
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) | 30 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# Construct model
if openai_config_file == "":
snake_case__ = OpenAIGPTConfig()
else:
snake_case__ = OpenAIGPTConfig.from_json_file(__lowerCAmelCase )
snake_case__ = OpenAIGPTModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
snake_case__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase__ : Any = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 33 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = 1
snake_case : str = 3
snake_case : Optional[Any] = (32, 32)
snake_case : int = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
def extract(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : Optional[Any] = torch.ones([0] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = self.dummy_cond_unet
snake_case : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="""scaled_linear""" ,clip_sample=SCREAMING_SNAKE_CASE_ ,set_alpha_to_one=SCREAMING_SNAKE_CASE_ ,)
snake_case : Any = self.dummy_vae
snake_case : List[str] = self.dummy_text_encoder
snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
snake_case : Dict = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,feature_extractor=self.dummy_extractor ,)
snake_case : int = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : str = """A painting of a squirrel eating a burger"""
snake_case : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Tuple = sd_pipe([prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
snake_case : Any = output.images
snake_case : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Dict = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=SCREAMING_SNAKE_CASE_ ,)[0]
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Any = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.dummy_cond_unet
snake_case : Any = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
snake_case : str = self.dummy_vae
snake_case : Dict = self.dummy_text_encoder
snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
snake_case : Dict = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,feature_extractor=self.dummy_extractor ,)
snake_case : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = """A painting of a squirrel eating a burger"""
snake_case : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Optional[int] = sd_pipe([prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
snake_case : str = output.images
snake_case : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : str = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=SCREAMING_SNAKE_CASE_ ,)[0]
snake_case : int = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : int = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
assert isinstance(pipe.scheduler ,SCREAMING_SNAKE_CASE_ )
assert pipe.safety_checker is None
snake_case : str = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Any = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case : int = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = self.dummy_cond_unet
snake_case : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
snake_case : int = self.dummy_vae
snake_case : int = self.dummy_text_encoder
snake_case : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
snake_case : Dict = unet.half()
snake_case : Optional[int] = vae.half()
snake_case : Any = bert.half()
# make sure here that pndm scheduler skips prk
snake_case : Any = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,feature_extractor=self.dummy_extractor ,)
snake_case : int = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = """A painting of a squirrel eating a burger"""
snake_case : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
snake_case : List[Any] = 4003660346
snake_case : Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
snake_case : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : int = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
snake_case : Optional[Any] = output.images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : Optional[int] = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
snake_case : int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
snake_case : Dict = output.images
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : str = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=SCREAMING_SNAKE_CASE_ )
snake_case : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = """padme amidala taking a bath artwork, safe for work, no nudity"""
snake_case : Optional[Any] = 2734971755
snake_case : Dict = 7
snake_case : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : int = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
snake_case : Union[str, Any] = output.images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
snake_case : str = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
snake_case : str = output.images
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Any = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
snake_case : List[Any] = 1044355234
snake_case : Optional[Any] = 12
snake_case : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
snake_case : str = output.images
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
snake_case : str = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case : str = sd_pipe(
[prompt] ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=SCREAMING_SNAKE_CASE_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
snake_case : Dict = output.images
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Optional[Any] = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 36 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a=None , __a=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
_lowercase = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
_lowercase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
_lowercase = field(
default=A__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
_lowercase = field(
default=A__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
_lowercase = field(
default=A__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
_lowercase = field(default=A__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
_lowercase = field(default=A__ , metadata={'help': 'Benchmark training of model'} )
_lowercase = field(default=A__ , metadata={'help': 'Verbose memory tracing'} )
_lowercase = field(
default=A__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
_lowercase = field(
default=A__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
_lowercase = field(default=A__ , metadata={'help': 'Trace memory line by line'} )
_lowercase = field(default=A__ , metadata={'help': 'Save result to a CSV file'} )
_lowercase = field(default=A__ , metadata={'help': 'Save all print statements in a log file'} )
_lowercase = field(default=A__ , metadata={'help': 'Whether to print environment information'} )
_lowercase = field(
default=A__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
_lowercase = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
_lowercase = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
_lowercase = field(
default=F'train_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
_lowercase = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
_lowercase = field(
default=F'env_info_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving environment information.'} , )
_lowercase = field(
default=F'log_{round(time() )}.csv' , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
_lowercase = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
_lowercase = field(
default=A__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def _UpperCamelCase( self : Optional[int] ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , lowerCamelCase__ , )
def _UpperCamelCase( self : Union[str, Any] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _UpperCamelCase( self : List[str] ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def _UpperCamelCase( self : Any ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 37 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(f"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(f"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(f"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
snake_case_ = self.transformer_dir
shutil.copy(
os.path.join(_UpperCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def snake_case__( self : Dict ) ->Tuple:
snake_case_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : int=None ) ->List[str]:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
snake_case_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
snake_case_ = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
snake_case_ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_UpperCamelCase , '''w''' , newline='''\n''' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , '''r''' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->str:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _UpperCamelCase ) , )
# Copy consistency with a really long name
snake_case_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _UpperCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _UpperCamelCase ) , )
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
snake_case_, snake_case_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_, snake_case_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCamelCase )
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
snake_case_, snake_case_ = check_copies.convert_to_localized_md(
_UpperCamelCase , _UpperCamelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.